code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os
from typing import Any, Dict, List, Optional
import numpy as np
from OpenGL.GL import (
GL_BLEND,
GL_CULL_FACE,
GL_LINEAR,
GL_LINEAR_MIPMAP_LINEAR,
GL_ONE,
GL_ONE_MINUS_SRC_ALPHA,
GL_REPEAT,
GL_RGB,
GL_RGBA,
GL_SRC_ALPHA,
GL_TEXTURE0,
GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER,
GL_TEXTURE_MIN_FILTER,
GL_TEXTURE_WRAP_S,
GL_TEXTURE_WRAP_T,
GL_UNPACK_ALIGNMENT,
GL_UNSIGNED_BYTE,
glActiveTexture,
glBindTexture,
glBlendFunc,
glDisable,
glEnable,
glGenerateMipmap,
glGenTextures,
glPixelStorei,
glTexImage2D,
glTexParameterf,
)
from PIL import Image
from payton.math.vector import Vector3D
from payton.scene.shader import Shader
from payton.scene.types import IList
SOLID = 0 # type: int
WIREFRAME = 1 # type: int
POINTS = 2 # type: int
RED = [1.0, 0.0, 0.0] # type: Vector3D
GREEN = [0.0, 1.0, 0.0] # type: Vector3D
BLUE = [0.0, 0.0, 1.0] # type: Vector3D
CRIMSON = [220 / 255.0, 20 / 255.0, 60 / 255.0] # type: Vector3D
PINK = [1.0, 192 / 255.0, 203 / 255.0] # type: Vector3D
VIOLET_RED = [1.0, 62 / 255.0, 150 / 255.0] # type: Vector3D
DEEP_PINK = [1.0, 20 / 255.0, 147 / 255.0] # type: Vector3D
ORCHID = [218 / 255.0, 112 / 255.0, 214 / 255.0] # type: Vector3D
PURPLE = [128 / 255.0, 0.0, 128 / 255.0] # type: Vector3D
NAVY = [0.0, 0.0, 0.5] # type: Vector3D
ROYAL_BLUE = [65 / 255.0, 105 / 255.0, 225 / 255.0] # type: Vector3D
LIGHT_STEEL_BLUE = [176 / 255.0, 196 / 255.0, 222 / 255.0] # type: Vector3D
STEEL_BLUE = [70 / 255.0, 130 / 255.0, 180 / 255.0] # type: Vector3D
TURQUOISE = [0.0, 245 / 255.0, 1.0] # type: Vector3D
YELLOW = [1.0, 1.0, 0.0] # type: Vector3D
GOLD = [1.0, 225 / 255.0, 0.0] # type: Vector3D
ORANGE = [1.0, 165 / 255.0, 0.0] # type: Vector3D
WHITE = [1.0, 1.0, 1.0] # type: Vector3D
BLACK = [0.0, 0.0, 0.0] # type: Vector3D
DARK_GRAY = [0.2, 0.2, 0.2] # type: Vector3D
LIGHT_GRAY = [0.8, 0.8, 0.8] # type: Vector3D
DEFAULT = "default"
NO_VERTEX_ARRAY = -1
NO_INDICE = -2
EMPTY_VERTEX_ARRAY = -3
BASE_PARTICLE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "particle.png")
_IMAGE_CACHE: Dict[str, int] = {}
class Material:
def __init__(
self,
color: Optional[Vector3D] = None,
display: int = SOLID,
lights: bool = True,
texture: str = "",
opacity: float = 1.0,
**kwargs: Any,
):
"""Payton materials are quite simple and does not support some
functionalities like Game Engines or design softwares.
Keyword arguments:
color -- Material color
display -- Display type of the material (SOLID, WIREFRAME, POINTS)
lights -- Is this material effected by the light shading?
texture -- Texture filename
opacity -- Opacity of the material
"""
self._color = [1.0, 1.0, 1.0] if color is None else color
self._color_np: np.ndarray = np.array(list(self._color), dtype=np.float32)
self.display: int = display
self.lights: bool = lights
self.texture: str = texture
self.particle_texture: str = BASE_PARTICLE
self.opacity: float = opacity
self.particle_size: float = 0.16
self._image: Optional[Image.Image] = None
self._indices: IList = []
self._vao: int = NO_VERTEX_ARRAY
self._vbos: List[int] = []
self._vertex_count: int = 0
self._index_count: int = 0
self._initialized: bool = False
self._texture: Optional[int] = None
self._particle_texture: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert the material into dictionary"""
return {
"color": self.color,
"display": self.display,
"texture": self.texture,
"opacity": self.opacity,
"indices": self._indices,
}
@property
def index_count(self) -> int:
"""Return the number of indexes for OpenGL"""
if self._index_count > 0:
return self._index_count
self._index_count = len(self._indices)
return self._index_count
@property
def color(self) -> Vector3D:
"""Return the material color"""
return self._color
@color.setter
def color(self, color: Vector3D) -> None:
"""Set the material color
Keyword arguments:
color -- Color to set
"""
self._color = color
self._color_np = np.array(list(self._color), dtype=np.float32)
@classmethod
def from_dict(cls, material_dictionary: Dict[str, Any]) -> "Material":
"""Import material from dictionary
material_dictionary -- Dictionary to import"""
res = cls()
res.color = material_dictionary["color"]
res.display = material_dictionary["display"]
res.texture = material_dictionary["texture"]
res.opacity = material_dictionary["opacity"]
res._indices = material_dictionary["indices"]
return res
def build(self) -> bool:
global _IMAGE_CACHE
"""Build the material"""
self._initialized = True
if os.path.isfile(self.texture):
if self.texture in _IMAGE_CACHE:
self._texture = _IMAGE_CACHE[self.texture]
else:
img = Image.open(self.texture)
_IMAGE_CACHE[self.texture] = self.load_texture(img)
if self._image is not None:
self.load_texture(self._image)
if os.path.isfile(self.particle_texture):
img = Image.open(self.particle_texture)
self.load_texture(img, particle=True)
return True
def load_texture(self, img: Image.Image, particle: bool = False) -> int:
"""Load texture directly from PIL Image object
Keyword arguments:
img -- Image to load
particle -- Is this a particle material?
"""
img_data = np.fromstring(img.tobytes(), np.uint8) # type: ignore
width, height = img.size
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
if particle:
self._particle_texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._particle_texture)
else:
self._texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._texture)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
mode = GL_RGBA
if img.mode == "RGB":
mode = GL_RGB
if img.mode == "P":
img = img.convert("RGB")
img_data = np.fromstring(img.tobytes(), np.uint8) # type: ignore
mode = GL_RGB
glTexImage2D(
GL_TEXTURE_2D,
0,
mode,
width,
height,
0,
mode,
GL_UNSIGNED_BYTE,
img_data,
)
glGenerateMipmap(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, 0)
return self._texture or -1
def refresh(self) -> None:
"""Refresh / apply the material changes into OpenGL context"""
self._initialized = False
def material_mode(self, lit: bool) -> int:
"""Return the material mode
Keyword argument:
lit -- Is this a lit material?
"""
if self.display == SOLID and lit and self.lights and self._texture is not None:
return Shader.LIGHT_TEXTURE
elif self.display == SOLID and lit and self.lights:
return Shader.LIGHT_COLOR
elif self.display == SOLID and self._texture is not None:
return Shader.NO_LIGHT_TEXTURE
else:
return Shader.NO_LIGHT_COLOR
def render(
self,
lit: bool,
shader: Shader,
mode: Optional[int] = None,
) -> None:
"""Render the material
Keyword arguments:
lit -- Is this a lit material?
shader -- Shader to use for rendering the material
mode -- Material mode
"""
if not self._initialized:
self.build()
_mode = mode or self.material_mode(lit)
glEnable(GL_BLEND)
glDisable(GL_CULL_FACE)
blend = GL_ONE_MINUS_SRC_ALPHA
if self.display == POINTS:
blend = GL_ONE
glBlendFunc(GL_SRC_ALPHA, blend)
if self._texture is not None and self.display != POINTS:
check = shader.get_location("tex_unit")
if check > -1:
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self._texture)
shader.set_int("tex_unit", 0)
if self._particle_texture is not None and self.display == POINTS:
check = shader.get_location("tex_unit")
shader.set_float("particle_size", self.particle_size)
if check > -1:
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self._particle_texture)
shader.set_int("tex_unit", 0)
if not shader._depth_shader:
shader.set_vector3_np("object_color", self._color_np)
shader.set_float("opacity", self.opacity)
shader.set_int("material_mode", _mode)
shader.set_int("lit", 1 if lit else 0)
if not self.lights:
shader.set_int("lit", 0) | payton/scene/material.py | import os
from typing import Any, Dict, List, Optional
import numpy as np
from OpenGL.GL import (
GL_BLEND,
GL_CULL_FACE,
GL_LINEAR,
GL_LINEAR_MIPMAP_LINEAR,
GL_ONE,
GL_ONE_MINUS_SRC_ALPHA,
GL_REPEAT,
GL_RGB,
GL_RGBA,
GL_SRC_ALPHA,
GL_TEXTURE0,
GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER,
GL_TEXTURE_MIN_FILTER,
GL_TEXTURE_WRAP_S,
GL_TEXTURE_WRAP_T,
GL_UNPACK_ALIGNMENT,
GL_UNSIGNED_BYTE,
glActiveTexture,
glBindTexture,
glBlendFunc,
glDisable,
glEnable,
glGenerateMipmap,
glGenTextures,
glPixelStorei,
glTexImage2D,
glTexParameterf,
)
from PIL import Image
from payton.math.vector import Vector3D
from payton.scene.shader import Shader
from payton.scene.types import IList
SOLID = 0 # type: int
WIREFRAME = 1 # type: int
POINTS = 2 # type: int
RED = [1.0, 0.0, 0.0] # type: Vector3D
GREEN = [0.0, 1.0, 0.0] # type: Vector3D
BLUE = [0.0, 0.0, 1.0] # type: Vector3D
CRIMSON = [220 / 255.0, 20 / 255.0, 60 / 255.0] # type: Vector3D
PINK = [1.0, 192 / 255.0, 203 / 255.0] # type: Vector3D
VIOLET_RED = [1.0, 62 / 255.0, 150 / 255.0] # type: Vector3D
DEEP_PINK = [1.0, 20 / 255.0, 147 / 255.0] # type: Vector3D
ORCHID = [218 / 255.0, 112 / 255.0, 214 / 255.0] # type: Vector3D
PURPLE = [128 / 255.0, 0.0, 128 / 255.0] # type: Vector3D
NAVY = [0.0, 0.0, 0.5] # type: Vector3D
ROYAL_BLUE = [65 / 255.0, 105 / 255.0, 225 / 255.0] # type: Vector3D
LIGHT_STEEL_BLUE = [176 / 255.0, 196 / 255.0, 222 / 255.0] # type: Vector3D
STEEL_BLUE = [70 / 255.0, 130 / 255.0, 180 / 255.0] # type: Vector3D
TURQUOISE = [0.0, 245 / 255.0, 1.0] # type: Vector3D
YELLOW = [1.0, 1.0, 0.0] # type: Vector3D
GOLD = [1.0, 225 / 255.0, 0.0] # type: Vector3D
ORANGE = [1.0, 165 / 255.0, 0.0] # type: Vector3D
WHITE = [1.0, 1.0, 1.0] # type: Vector3D
BLACK = [0.0, 0.0, 0.0] # type: Vector3D
DARK_GRAY = [0.2, 0.2, 0.2] # type: Vector3D
LIGHT_GRAY = [0.8, 0.8, 0.8] # type: Vector3D
DEFAULT = "default"
NO_VERTEX_ARRAY = -1
NO_INDICE = -2
EMPTY_VERTEX_ARRAY = -3
BASE_PARTICLE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "particle.png")
_IMAGE_CACHE: Dict[str, int] = {}
class Material:
def __init__(
self,
color: Optional[Vector3D] = None,
display: int = SOLID,
lights: bool = True,
texture: str = "",
opacity: float = 1.0,
**kwargs: Any,
):
"""Payton materials are quite simple and does not support some
functionalities like Game Engines or design softwares.
Keyword arguments:
color -- Material color
display -- Display type of the material (SOLID, WIREFRAME, POINTS)
lights -- Is this material effected by the light shading?
texture -- Texture filename
opacity -- Opacity of the material
"""
self._color = [1.0, 1.0, 1.0] if color is None else color
self._color_np: np.ndarray = np.array(list(self._color), dtype=np.float32)
self.display: int = display
self.lights: bool = lights
self.texture: str = texture
self.particle_texture: str = BASE_PARTICLE
self.opacity: float = opacity
self.particle_size: float = 0.16
self._image: Optional[Image.Image] = None
self._indices: IList = []
self._vao: int = NO_VERTEX_ARRAY
self._vbos: List[int] = []
self._vertex_count: int = 0
self._index_count: int = 0
self._initialized: bool = False
self._texture: Optional[int] = None
self._particle_texture: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert the material into dictionary"""
return {
"color": self.color,
"display": self.display,
"texture": self.texture,
"opacity": self.opacity,
"indices": self._indices,
}
@property
def index_count(self) -> int:
"""Return the number of indexes for OpenGL"""
if self._index_count > 0:
return self._index_count
self._index_count = len(self._indices)
return self._index_count
@property
def color(self) -> Vector3D:
"""Return the material color"""
return self._color
@color.setter
def color(self, color: Vector3D) -> None:
"""Set the material color
Keyword arguments:
color -- Color to set
"""
self._color = color
self._color_np = np.array(list(self._color), dtype=np.float32)
@classmethod
def from_dict(cls, material_dictionary: Dict[str, Any]) -> "Material":
"""Import material from dictionary
material_dictionary -- Dictionary to import"""
res = cls()
res.color = material_dictionary["color"]
res.display = material_dictionary["display"]
res.texture = material_dictionary["texture"]
res.opacity = material_dictionary["opacity"]
res._indices = material_dictionary["indices"]
return res
def build(self) -> bool:
global _IMAGE_CACHE
"""Build the material"""
self._initialized = True
if os.path.isfile(self.texture):
if self.texture in _IMAGE_CACHE:
self._texture = _IMAGE_CACHE[self.texture]
else:
img = Image.open(self.texture)
_IMAGE_CACHE[self.texture] = self.load_texture(img)
if self._image is not None:
self.load_texture(self._image)
if os.path.isfile(self.particle_texture):
img = Image.open(self.particle_texture)
self.load_texture(img, particle=True)
return True
def load_texture(self, img: Image.Image, particle: bool = False) -> int:
"""Load texture directly from PIL Image object
Keyword arguments:
img -- Image to load
particle -- Is this a particle material?
"""
img_data = np.fromstring(img.tobytes(), np.uint8) # type: ignore
width, height = img.size
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
if particle:
self._particle_texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._particle_texture)
else:
self._texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._texture)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
mode = GL_RGBA
if img.mode == "RGB":
mode = GL_RGB
if img.mode == "P":
img = img.convert("RGB")
img_data = np.fromstring(img.tobytes(), np.uint8) # type: ignore
mode = GL_RGB
glTexImage2D(
GL_TEXTURE_2D,
0,
mode,
width,
height,
0,
mode,
GL_UNSIGNED_BYTE,
img_data,
)
glGenerateMipmap(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, 0)
return self._texture or -1
def refresh(self) -> None:
"""Refresh / apply the material changes into OpenGL context"""
self._initialized = False
def material_mode(self, lit: bool) -> int:
"""Return the material mode
Keyword argument:
lit -- Is this a lit material?
"""
if self.display == SOLID and lit and self.lights and self._texture is not None:
return Shader.LIGHT_TEXTURE
elif self.display == SOLID and lit and self.lights:
return Shader.LIGHT_COLOR
elif self.display == SOLID and self._texture is not None:
return Shader.NO_LIGHT_TEXTURE
else:
return Shader.NO_LIGHT_COLOR
def render(
self,
lit: bool,
shader: Shader,
mode: Optional[int] = None,
) -> None:
"""Render the material
Keyword arguments:
lit -- Is this a lit material?
shader -- Shader to use for rendering the material
mode -- Material mode
"""
if not self._initialized:
self.build()
_mode = mode or self.material_mode(lit)
glEnable(GL_BLEND)
glDisable(GL_CULL_FACE)
blend = GL_ONE_MINUS_SRC_ALPHA
if self.display == POINTS:
blend = GL_ONE
glBlendFunc(GL_SRC_ALPHA, blend)
if self._texture is not None and self.display != POINTS:
check = shader.get_location("tex_unit")
if check > -1:
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self._texture)
shader.set_int("tex_unit", 0)
if self._particle_texture is not None and self.display == POINTS:
check = shader.get_location("tex_unit")
shader.set_float("particle_size", self.particle_size)
if check > -1:
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self._particle_texture)
shader.set_int("tex_unit", 0)
if not shader._depth_shader:
shader.set_vector3_np("object_color", self._color_np)
shader.set_float("opacity", self.opacity)
shader.set_int("material_mode", _mode)
shader.set_int("lit", 1 if lit else 0)
if not self.lights:
shader.set_int("lit", 0) | 0.818918 | 0.291006 |
config={}
with open('etc/pkg/config', 'r') as config_file:
exec(config_file.read(), config)
print('pkg for FreeBSD %s %s' % (config['DIST'], config['ARCHITECTURE'][1]))
import os
import subprocess
import sys
import re
import io
import shutil
import copy
try:
import urllib2
except ImportError:
from urllib import request as urllib2
import bz2, lzma
try:
import cPickle as pickle
except ImportError:
import pickle
import tarfile
try:
from hashlib import md5
except ImportError:
from md5 import md5
desc="""
pkg {base | -h}
"""
available_package_list_file = 'var/cache/pkg/package_available.pkl'
installed_package_list_file = 'var/cache/pkg/package_installed.pkl'
link_package_list_file = 'var/cache/pkg/package_links.pkl'
package_folder = 'var/cache/pkg/archives'
def usage():
print(desc)
def download(url):
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write('\r[%0.2f%%] %s...'%(percent, url))
sys.stdout.flush()
else:
data_so_far = float(bytes_so_far)
unit = 'B'
if data_so_far > 1024*5:
data_so_far = data_so_far / 1024
unit = 'kB'
if data_so_far > 1024*5:
data_so_far = data_so_far / 1024
unit = 'MB'
sys.stdout.write('\r[%0.2f%s] %s...'%(data_so_far, unit, url))
sys.stdout.flush()
chunk_size = 8192
data = bytes()
response = urllib2.urlopen(url)
try:
total_size = response.info()['Content-length'].strip()
total_size = int(total_size)
except Exception as e:
print(e)
total_size = 0
bytes_so_far = 0
chunk_report(bytes_so_far, chunk_size, total_size)
while(1):
try:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
data += chunk
chunk_report(bytes_so_far, chunk_size, total_size)
except Exception as e:
print(e)
return None
print('')
return data
def base():
for pkg, required in [
('kernel.txz', True),
('base.txz', True),
('lib32.txz', False),
]:
base_url = config['MIRROR'] + config['ARCHITECTURE'][0] + '/' + config['ARCHITECTURE'][1] + '/' + config['DIST'] + '/' + pkg
if required or config['ARCHITECTURE'][2]:
try:
data = download(base_url)
except Exception as e:
if not optional:
raise e
else:
tar = tarfile.open(fileobj=io.BytesIO(data))
for tarinfo in tar:
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
try:
tar.extract(tarinfo, '.', set_attrs=False)
except ValueError as e:
print(e)
except OSError as e:
print(tarinfo.name)
os.unlink(tarinfo.name)
tar.extract(tarinfo, '.', set_attrs=False)
def fix_links(dir):
for l in os.listdir(dir):
p = os.path.join(dir, l)
if os.path.islink(p):
target = p
seen = set([target])
while os.path.islink(target):
real = os.readlink(target)
parent = os.path.split(target)[0]
if real[0] == '/':
target = '.' + real
else:
target = os.path.join(parent, real)
if target in seen:
print ('recursive link: %s => %s' % (p, target))
seen.add(target)
if os.path.exists(target):
print ('%s => %s' % (p, target))
os.unlink(p)
if os.path.isdir(target):
fix_links(target)
shutil.copytree(target, p)
else:
shutil.copy(target, p)
else:
print('broken link: %s => %s' % (p, target))
os.unlink(p)
elif os.path.isdir(p):
fix_links(p)
if sys.platform == 'win32':
fix_links('.')
if __name__ == '__main__':
command = sys.argv[1]
packages = sys.argv[2:]
try:
if command == '-h':
usage()
elif command == 'base':
if packages:
raise Exception(desc)
base()
else:
raise Exception('unknown command: %s\n\n%s' % (command, desc))
except Exception as e:
print(e.__class__, e)
exit(1)
else:
exit(0) | pc-freebsd-ppc64/sysroot/pkg.py | config={}
with open('etc/pkg/config', 'r') as config_file:
exec(config_file.read(), config)
print('pkg for FreeBSD %s %s' % (config['DIST'], config['ARCHITECTURE'][1]))
import os
import subprocess
import sys
import re
import io
import shutil
import copy
try:
import urllib2
except ImportError:
from urllib import request as urllib2
import bz2, lzma
try:
import cPickle as pickle
except ImportError:
import pickle
import tarfile
try:
from hashlib import md5
except ImportError:
from md5 import md5
desc="""
pkg {base | -h}
"""
available_package_list_file = 'var/cache/pkg/package_available.pkl'
installed_package_list_file = 'var/cache/pkg/package_installed.pkl'
link_package_list_file = 'var/cache/pkg/package_links.pkl'
package_folder = 'var/cache/pkg/archives'
def usage():
print(desc)
def download(url):
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write('\r[%0.2f%%] %s...'%(percent, url))
sys.stdout.flush()
else:
data_so_far = float(bytes_so_far)
unit = 'B'
if data_so_far > 1024*5:
data_so_far = data_so_far / 1024
unit = 'kB'
if data_so_far > 1024*5:
data_so_far = data_so_far / 1024
unit = 'MB'
sys.stdout.write('\r[%0.2f%s] %s...'%(data_so_far, unit, url))
sys.stdout.flush()
chunk_size = 8192
data = bytes()
response = urllib2.urlopen(url)
try:
total_size = response.info()['Content-length'].strip()
total_size = int(total_size)
except Exception as e:
print(e)
total_size = 0
bytes_so_far = 0
chunk_report(bytes_so_far, chunk_size, total_size)
while(1):
try:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
data += chunk
chunk_report(bytes_so_far, chunk_size, total_size)
except Exception as e:
print(e)
return None
print('')
return data
def base():
for pkg, required in [
('kernel.txz', True),
('base.txz', True),
('lib32.txz', False),
]:
base_url = config['MIRROR'] + config['ARCHITECTURE'][0] + '/' + config['ARCHITECTURE'][1] + '/' + config['DIST'] + '/' + pkg
if required or config['ARCHITECTURE'][2]:
try:
data = download(base_url)
except Exception as e:
if not optional:
raise e
else:
tar = tarfile.open(fileobj=io.BytesIO(data))
for tarinfo in tar:
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
try:
tar.extract(tarinfo, '.', set_attrs=False)
except ValueError as e:
print(e)
except OSError as e:
print(tarinfo.name)
os.unlink(tarinfo.name)
tar.extract(tarinfo, '.', set_attrs=False)
def fix_links(dir):
for l in os.listdir(dir):
p = os.path.join(dir, l)
if os.path.islink(p):
target = p
seen = set([target])
while os.path.islink(target):
real = os.readlink(target)
parent = os.path.split(target)[0]
if real[0] == '/':
target = '.' + real
else:
target = os.path.join(parent, real)
if target in seen:
print ('recursive link: %s => %s' % (p, target))
seen.add(target)
if os.path.exists(target):
print ('%s => %s' % (p, target))
os.unlink(p)
if os.path.isdir(target):
fix_links(target)
shutil.copytree(target, p)
else:
shutil.copy(target, p)
else:
print('broken link: %s => %s' % (p, target))
os.unlink(p)
elif os.path.isdir(p):
fix_links(p)
if sys.platform == 'win32':
fix_links('.')
if __name__ == '__main__':
command = sys.argv[1]
packages = sys.argv[2:]
try:
if command == '-h':
usage()
elif command == 'base':
if packages:
raise Exception(desc)
base()
else:
raise Exception('unknown command: %s\n\n%s' % (command, desc))
except Exception as e:
print(e.__class__, e)
exit(1)
else:
exit(0) | 0.087737 | 0.062245 |
__all__ = [
"MAX_RANGE",
"OrdinalError",
"__all__",
"__version__",
"decode",
"dump",
"encode",
"get_delimiter",
"load",
"parse",
"safeparse",
"set_delimiter",
"temporary_delimiter",
]
import contextlib
from typing import Literal, Generator, Optional
MAX_RANGE = 1114112
_delimiter = "-"
__version__ = "2.1.1"
class OrdinalError(ValueError):
pass
def __dir__():
return __all__
@contextlib.contextmanager
def temporary_delimiter(
delimiter: str, *, after: Optional[str] = None
) -> Generator[None, None, None]:
"""Set a temporary delimiter.
Ordinary's delimiter will be restored to
it's previous state after. Use this function
as a context manager.
"""
global _delimiter
current = _delimiter
set_delimiter(delimiter)
try:
yield
finally:
if after is None:
_delimiter = current
else:
# We want to clarify this is as a result
# of the after kwarg, so we make this amendment :)
try:
set_delimiter(after)
except (TypeError, ValueError) as exc:
raise exc.__class__(f"after {exc}") from None
def set_delimiter(delimiter: Optional[str] = None, /) -> None:
"""Sets the delimiter used by the encoder."""
if delimiter is None:
delimiter = "-"
else:
if not isinstance(delimiter, str):
raise TypeError("delimiter must be str")
if len(delimiter) != 1:
raise ValueError("delimiter length must be 1")
if delimiter.isdigit():
raise ValueError("delimeter must be a non numeric character")
global _delimiter
_delimiter = delimiter
def get_delimiter() -> str:
"""Gets the set Ordinary delimiter."""
return _delimiter
def parse(text: str) -> None:
"""Parses the given Ordinary to make sure it is syntactically correct."""
text = _delimiter.join(text.splitlines())
split = text.split(_delimiter)
for i in range(len(split)):
if not (n := split[i]).isdigit():
raise OrdinalError("value '%s' at position %s is not a digit" % (n, i))
if int(n) not in range(MAX_RANGE):
raise OrdinalError("value '%s' at position %s is not in range(%s)" % (n, i, MAX_RANGE))
def safeparse(text: str) -> bool:
"""Parses the given Ordinary, returning bool instead of raising."""
try:
parse(text)
except OrdinalError:
return False
else:
return True
def encode(text: str, *, cutoff: Optional[int] = None) -> str:
"""Encode a string into Ordinary.
Use the cutoff kwarg to control the number of ords per row.
"""
i = tuple(map(lambda x: str(ord(x)), text))
if not (cutoff is None or isinstance(cutoff, int)):
raise ValueError("cutoff kwarg must be None or int")
if cutoff is None or cutoff >= len(i):
return _delimiter.join(i)
ret = ""
for x in [i[x : x + cutoff] for x in range(0, len(i), cutoff)]:
ret += _delimiter.join(x) + "\n"
return ret
def decode(text: str) -> str:
"""Decode Ordinary into standard text."""
text = text.strip()
text = _delimiter.join(map(str.strip, text.splitlines()))
parse(text)
return "".join(map(lambda x: chr(int(x)), text.split(_delimiter)))
_mode_type = Literal["e", "d"]
def dump(text, fp, /, mode: _mode_type, **kwds) -> None:
"""Convert and write ordinary/text to a file-like object (.write()).
``text`` is the string to dump into the ``fp``.
``fp`` is a file-like object to write into.
``mode`` must be 'd' or 'e', 'e' standing for encode, 'd' standing for encode.
These modes decide whether encode() or decode() is used on the ``text`` when writing.
When using the mode 'e', add 'cutoff' as a keyword argument to be parsed into the
encode function.
"""
if mode == "e":
fp.write(encode(text, cutoff=kwds.get("cutoff", None)))
elif mode == "d":
fp.write(decode(text))
else:
raise ValueError(
"dump(mode=x): x must be 'd' for decode, or 'e' for encode, not '%s'" % mode
)
def load(fp, /, mode: _mode_type, **kwds) -> str:
"""Loads text from a file and converts, returning a string.
``fp`` is a file-like object to extract from.
``mode`` must be 'd' or 'e', 'e' standing for encode, 'd' standing for encode.
These modes decide whether encode() or decode() is used on the string
that is returned.
When using the mode 'e', add 'cutoff' as a keyword argument to be parsed into the
encode function.
"""
read = fp.read()
if mode == "e":
return encode(read, cutoff=kwds.get("cutoff", None))
elif mode == "d":
return decode(read)
else:
raise ValueError(
"load(mode=x): x must be 'd' for decode, or 'e' for encode, not '%s'" % mode
)
del contextlib
del Literal, Generator, Optional | ordinary.py | __all__ = [
"MAX_RANGE",
"OrdinalError",
"__all__",
"__version__",
"decode",
"dump",
"encode",
"get_delimiter",
"load",
"parse",
"safeparse",
"set_delimiter",
"temporary_delimiter",
]
import contextlib
from typing import Literal, Generator, Optional
MAX_RANGE = 1114112
_delimiter = "-"
__version__ = "2.1.1"
class OrdinalError(ValueError):
pass
def __dir__():
return __all__
@contextlib.contextmanager
def temporary_delimiter(
delimiter: str, *, after: Optional[str] = None
) -> Generator[None, None, None]:
"""Set a temporary delimiter.
Ordinary's delimiter will be restored to
it's previous state after. Use this function
as a context manager.
"""
global _delimiter
current = _delimiter
set_delimiter(delimiter)
try:
yield
finally:
if after is None:
_delimiter = current
else:
# We want to clarify this is as a result
# of the after kwarg, so we make this amendment :)
try:
set_delimiter(after)
except (TypeError, ValueError) as exc:
raise exc.__class__(f"after {exc}") from None
def set_delimiter(delimiter: Optional[str] = None, /) -> None:
"""Sets the delimiter used by the encoder."""
if delimiter is None:
delimiter = "-"
else:
if not isinstance(delimiter, str):
raise TypeError("delimiter must be str")
if len(delimiter) != 1:
raise ValueError("delimiter length must be 1")
if delimiter.isdigit():
raise ValueError("delimeter must be a non numeric character")
global _delimiter
_delimiter = delimiter
def get_delimiter() -> str:
"""Gets the set Ordinary delimiter."""
return _delimiter
def parse(text: str) -> None:
"""Parses the given Ordinary to make sure it is syntactically correct."""
text = _delimiter.join(text.splitlines())
split = text.split(_delimiter)
for i in range(len(split)):
if not (n := split[i]).isdigit():
raise OrdinalError("value '%s' at position %s is not a digit" % (n, i))
if int(n) not in range(MAX_RANGE):
raise OrdinalError("value '%s' at position %s is not in range(%s)" % (n, i, MAX_RANGE))
def safeparse(text: str) -> bool:
"""Parses the given Ordinary, returning bool instead of raising."""
try:
parse(text)
except OrdinalError:
return False
else:
return True
def encode(text: str, *, cutoff: Optional[int] = None) -> str:
"""Encode a string into Ordinary.
Use the cutoff kwarg to control the number of ords per row.
"""
i = tuple(map(lambda x: str(ord(x)), text))
if not (cutoff is None or isinstance(cutoff, int)):
raise ValueError("cutoff kwarg must be None or int")
if cutoff is None or cutoff >= len(i):
return _delimiter.join(i)
ret = ""
for x in [i[x : x + cutoff] for x in range(0, len(i), cutoff)]:
ret += _delimiter.join(x) + "\n"
return ret
def decode(text: str) -> str:
"""Decode Ordinary into standard text."""
text = text.strip()
text = _delimiter.join(map(str.strip, text.splitlines()))
parse(text)
return "".join(map(lambda x: chr(int(x)), text.split(_delimiter)))
_mode_type = Literal["e", "d"]
def dump(text, fp, /, mode: _mode_type, **kwds) -> None:
"""Convert and write ordinary/text to a file-like object (.write()).
``text`` is the string to dump into the ``fp``.
``fp`` is a file-like object to write into.
``mode`` must be 'd' or 'e', 'e' standing for encode, 'd' standing for encode.
These modes decide whether encode() or decode() is used on the ``text`` when writing.
When using the mode 'e', add 'cutoff' as a keyword argument to be parsed into the
encode function.
"""
if mode == "e":
fp.write(encode(text, cutoff=kwds.get("cutoff", None)))
elif mode == "d":
fp.write(decode(text))
else:
raise ValueError(
"dump(mode=x): x must be 'd' for decode, or 'e' for encode, not '%s'" % mode
)
def load(fp, /, mode: _mode_type, **kwds) -> str:
"""Loads text from a file and converts, returning a string.
``fp`` is a file-like object to extract from.
``mode`` must be 'd' or 'e', 'e' standing for encode, 'd' standing for encode.
These modes decide whether encode() or decode() is used on the string
that is returned.
When using the mode 'e', add 'cutoff' as a keyword argument to be parsed into the
encode function.
"""
read = fp.read()
if mode == "e":
return encode(read, cutoff=kwds.get("cutoff", None))
elif mode == "d":
return decode(read)
else:
raise ValueError(
"load(mode=x): x must be 'd' for decode, or 'e' for encode, not '%s'" % mode
)
del contextlib
del Literal, Generator, Optional | 0.890032 | 0.216632 |
from unittest import TestCase
from flowers.person import Person
from flowers.task import Task, OtherPhaseException
from tests import builder
from tests.builder import build_developer
class TestTask(TestCase):
def test_task_completed(self):
p: Person = build_developer()
p.effort_available = 10
self.assertEqual(p.effort_available, 10)
t: Task = Task(p.role.phase, 10)
self.assertEqual(t.current_effort, 10)
t.apply_effort_from(p)
self.assertEqual(t.current_effort, 0)
self.assertEqual(p.effort_available, 0)
def test_task_half_completed(self):
p: Person = build_developer()
p.effort_available = 3
self.assertEqual(p.effort_available, 3)
t: Task = Task(p.role.phase, 10)
self.assertEqual(t.current_effort, 10)
t.apply_effort_from(p)
self.assertEqual(t.current_effort, 7)
self.assertEqual(p.effort_available, 0)
def test_two_tasks_one_completed(self):
p: Person = build_developer()
p.effort_available = 15
self.assertEqual(p.effort_available, 15)
t1: Task = Task(p.role.phase, 10)
t1.apply_effort_from(p)
self.assertEqual(t1.current_effort, 0)
self.assertEqual(p.effort_available, 5)
t2: Task = Task(p.role.phase, 10)
self.assertEqual(t2.current_effort, 10)
t2.apply_effort_from(p)
self.assertEqual(t2.current_effort, 5)
self.assertEqual(p.effort_available, 0)
def test_person_exhausted(self):
p: Person = build_developer()
p.effort_available = 10
self.assertEqual(p.effort_available, 10)
t1: Task = Task(p.role.phase, 10)
t1.apply_effort_from(p)
self.assertEqual(t1.current_effort, 0)
self.assertEqual(p.effort_available, 0)
t2: Task = Task(p.role.phase, 1)
self.assertEqual(t2.current_effort, 1)
t2.apply_effort_from(p)
# nothing changes...
self.assertEqual(t2.current_effort, 1)
self.assertEqual(p.effort_available, 0)
def test_two_person_one_task(self):
p1: Person = build_developer()
p1.effort_available = 10
self.assertEqual(p1.effort_available, 10)
p2: Person = build_developer()
p2.effort_available = 7
self.assertEqual(p2.effort_available, 7)
t: Task = Task(p1.role.phase, 20)
t.apply_effort_from(p1)
self.assertEqual(t.current_effort, 10)
self.assertEqual(p1.effort_available, 0)
t.apply_effort_from(p2)
self.assertEqual(3, t.current_effort)
self.assertEqual(0, p1.effort_available)
def test_effort_on_different_phase(self):
p1: Person = build_developer()
t: Task = Task(builder.Test_phase, 0)
self.assertRaises(OtherPhaseException, lambda: t.apply_effort_from(p1)) | tests/test_tasks.py | from unittest import TestCase
from flowers.person import Person
from flowers.task import Task, OtherPhaseException
from tests import builder
from tests.builder import build_developer
class TestTask(TestCase):
def test_task_completed(self):
p: Person = build_developer()
p.effort_available = 10
self.assertEqual(p.effort_available, 10)
t: Task = Task(p.role.phase, 10)
self.assertEqual(t.current_effort, 10)
t.apply_effort_from(p)
self.assertEqual(t.current_effort, 0)
self.assertEqual(p.effort_available, 0)
def test_task_half_completed(self):
p: Person = build_developer()
p.effort_available = 3
self.assertEqual(p.effort_available, 3)
t: Task = Task(p.role.phase, 10)
self.assertEqual(t.current_effort, 10)
t.apply_effort_from(p)
self.assertEqual(t.current_effort, 7)
self.assertEqual(p.effort_available, 0)
def test_two_tasks_one_completed(self):
p: Person = build_developer()
p.effort_available = 15
self.assertEqual(p.effort_available, 15)
t1: Task = Task(p.role.phase, 10)
t1.apply_effort_from(p)
self.assertEqual(t1.current_effort, 0)
self.assertEqual(p.effort_available, 5)
t2: Task = Task(p.role.phase, 10)
self.assertEqual(t2.current_effort, 10)
t2.apply_effort_from(p)
self.assertEqual(t2.current_effort, 5)
self.assertEqual(p.effort_available, 0)
def test_person_exhausted(self):
p: Person = build_developer()
p.effort_available = 10
self.assertEqual(p.effort_available, 10)
t1: Task = Task(p.role.phase, 10)
t1.apply_effort_from(p)
self.assertEqual(t1.current_effort, 0)
self.assertEqual(p.effort_available, 0)
t2: Task = Task(p.role.phase, 1)
self.assertEqual(t2.current_effort, 1)
t2.apply_effort_from(p)
# nothing changes...
self.assertEqual(t2.current_effort, 1)
self.assertEqual(p.effort_available, 0)
def test_two_person_one_task(self):
p1: Person = build_developer()
p1.effort_available = 10
self.assertEqual(p1.effort_available, 10)
p2: Person = build_developer()
p2.effort_available = 7
self.assertEqual(p2.effort_available, 7)
t: Task = Task(p1.role.phase, 20)
t.apply_effort_from(p1)
self.assertEqual(t.current_effort, 10)
self.assertEqual(p1.effort_available, 0)
t.apply_effort_from(p2)
self.assertEqual(3, t.current_effort)
self.assertEqual(0, p1.effort_available)
def test_effort_on_different_phase(self):
p1: Person = build_developer()
t: Task = Task(builder.Test_phase, 0)
self.assertRaises(OtherPhaseException, lambda: t.apply_effort_from(p1)) | 0.490724 | 0.642531 |
import os
from abc import ABCMeta
from fnmatch import filter as fnfilter
from typing import Optional, Mapping, Union
from hbutils.model import get_repr_info
from hbutils.string import truncate
from ..base import _process_environ
from ...control.model import Identification, ResourceLimit
class _IGlobalConfig(metaclass=ABCMeta):
def __init__(self, identification, resources, environ, use_sys_env):
"""
:param identification: identification
:param resources: resource limits
:param environ: environment variable
:param use_sys_env: use environment variables from local environ
"""
self.__identification = identification
self.__resources = resources
self.__environ = environ
self.__use_sys_env = use_sys_env
def __repr__(self):
"""
:return: get representation string
"""
return get_repr_info(
cls=self.__class__,
args=[
('identification',
lambda: truncate(repr(self.__identification), width=48, show_length=True, tail_length=16),
lambda: self.__identification and self.__identification != Identification.loads({})),
('resources', lambda: truncate(repr(self.__resources), width=64, show_length=True, tail_length=16),
lambda: self.__resources and self.__resources != ResourceLimit.loads({})),
('environ',
lambda: truncate(repr(self.__environ), width=64, show_length=True, tail_length=16),
lambda: self.__environ),
('use_sys_env', lambda: truncate(repr(self.__use_sys_env), width=64, show_length=True, tail_length=16),
lambda: self.__use_sys_env is not None),
]
)
def _process_use_sys_env(use_sys_env) -> Union[set, bool]:
if isinstance(use_sys_env, (list, tuple, set)):
return set(use_sys_env)
elif isinstance(use_sys_env, bool) or use_sys_env is None:
return not not use_sys_env
else:
raise TypeError(
'Bool or list expected but {actual} found for use_sys_env.'.format(actual=repr(type(use_sys_env).__name__)))
def _load_local_environ(use_sys_env) -> Mapping[str, str]:
use_sys_env = _process_use_sys_env(use_sys_env)
_current_env = dict(os.environ)
if isinstance(use_sys_env, set):
_keys = set()
for pattern in use_sys_env:
_keys |= set(fnfilter(list(_current_env.keys()), pattern))
return {key: value for key, value in _current_env.items() if key in _keys}
else:
return _current_env if use_sys_env else {}
class GlobalConfigTemplate(_IGlobalConfig):
def __init__(self, identification=None, resources=None, environ=None, use_sys_env=None):
"""
:param identification: identification
:param resources: resource limits
:param environ: environment variable
:param use_sys_env: use environment variables from local environ
"""
self.__identification = Identification.loads(identification)
self.__resources = ResourceLimit.loads(resources)
self.__environ = _process_environ(environ)
self.__use_sys_env = _process_use_sys_env(use_sys_env)
_IGlobalConfig.__init__(self, self.__identification, self.__resources, self.__environ, self.__use_sys_env)
@property
def identification(self) -> Identification:
return self.__identification
@property
def resources(self) -> ResourceLimit:
return self.__resources
@property
def environ(self) -> Mapping[str, str]:
return self.__environ
@property
def use_sys_env(self) -> Union[set, bool]:
return self.__use_sys_env
def __call__(self, environ: Optional[Mapping[str, str]] = None,
environ_after: Optional[Mapping[str, str]] = None, **kwargs) -> 'GlobalConfig':
"""
generate global config
:param environ: environment variable
:param environ_after:
:param kwargs: other arguments
:return: global config
"""
_environ = _load_local_environ(self.__use_sys_env)
_environ = _process_environ(environ, _environ, enable_ext=True)
_environ = _process_environ(self.__environ, _environ, enable_ext=True)
_environ = _process_environ(environ_after, _environ, enable_ext=True)
return GlobalConfig(
identification=self.__identification,
resources=self.__resources, environ=_environ,
)
@classmethod
def loads(cls, data) -> 'GlobalConfigTemplate':
"""
load global config template from data
:param data: raw data
:return: global config template
"""
data = data or {}
if isinstance(data, cls):
return data
elif isinstance(data, dict):
return cls(**data)
else:
raise TypeError('Json or {type} expected but {actual} found.'.format(
type=cls.__name__, actual=repr(type(data).__name__)))
class GlobalConfig(_IGlobalConfig):
def __init__(self, identification, resources, environ):
"""
:param identification: identification
:param resources: resource limits
:param environ: environment variable
"""
self.__identification = identification
self.__resources = resources
self.__environ = environ
_IGlobalConfig.__init__(self, self.__identification, self.__resources, self.__environ, None)
@property
def identification(self) -> Identification:
return self.__identification
@property
def resources(self) -> ResourceLimit:
return self.__resources
@property
def environ(self) -> Mapping[str, str]:
return self.__environ
def __call__(self):
"""
get global config information
:return:
"""
return self.__identification, self.__resources, self.__environ | pji/service/dispatch/global_.py | import os
from abc import ABCMeta
from fnmatch import filter as fnfilter
from typing import Optional, Mapping, Union
from hbutils.model import get_repr_info
from hbutils.string import truncate
from ..base import _process_environ
from ...control.model import Identification, ResourceLimit
class _IGlobalConfig(metaclass=ABCMeta):
def __init__(self, identification, resources, environ, use_sys_env):
"""
:param identification: identification
:param resources: resource limits
:param environ: environment variable
:param use_sys_env: use environment variables from local environ
"""
self.__identification = identification
self.__resources = resources
self.__environ = environ
self.__use_sys_env = use_sys_env
def __repr__(self):
"""
:return: get representation string
"""
return get_repr_info(
cls=self.__class__,
args=[
('identification',
lambda: truncate(repr(self.__identification), width=48, show_length=True, tail_length=16),
lambda: self.__identification and self.__identification != Identification.loads({})),
('resources', lambda: truncate(repr(self.__resources), width=64, show_length=True, tail_length=16),
lambda: self.__resources and self.__resources != ResourceLimit.loads({})),
('environ',
lambda: truncate(repr(self.__environ), width=64, show_length=True, tail_length=16),
lambda: self.__environ),
('use_sys_env', lambda: truncate(repr(self.__use_sys_env), width=64, show_length=True, tail_length=16),
lambda: self.__use_sys_env is not None),
]
)
def _process_use_sys_env(use_sys_env) -> Union[set, bool]:
if isinstance(use_sys_env, (list, tuple, set)):
return set(use_sys_env)
elif isinstance(use_sys_env, bool) or use_sys_env is None:
return not not use_sys_env
else:
raise TypeError(
'Bool or list expected but {actual} found for use_sys_env.'.format(actual=repr(type(use_sys_env).__name__)))
def _load_local_environ(use_sys_env) -> Mapping[str, str]:
use_sys_env = _process_use_sys_env(use_sys_env)
_current_env = dict(os.environ)
if isinstance(use_sys_env, set):
_keys = set()
for pattern in use_sys_env:
_keys |= set(fnfilter(list(_current_env.keys()), pattern))
return {key: value for key, value in _current_env.items() if key in _keys}
else:
return _current_env if use_sys_env else {}
class GlobalConfigTemplate(_IGlobalConfig):
def __init__(self, identification=None, resources=None, environ=None, use_sys_env=None):
"""
:param identification: identification
:param resources: resource limits
:param environ: environment variable
:param use_sys_env: use environment variables from local environ
"""
self.__identification = Identification.loads(identification)
self.__resources = ResourceLimit.loads(resources)
self.__environ = _process_environ(environ)
self.__use_sys_env = _process_use_sys_env(use_sys_env)
_IGlobalConfig.__init__(self, self.__identification, self.__resources, self.__environ, self.__use_sys_env)
@property
def identification(self) -> Identification:
return self.__identification
@property
def resources(self) -> ResourceLimit:
return self.__resources
@property
def environ(self) -> Mapping[str, str]:
return self.__environ
@property
def use_sys_env(self) -> Union[set, bool]:
return self.__use_sys_env
def __call__(self, environ: Optional[Mapping[str, str]] = None,
environ_after: Optional[Mapping[str, str]] = None, **kwargs) -> 'GlobalConfig':
"""
generate global config
:param environ: environment variable
:param environ_after:
:param kwargs: other arguments
:return: global config
"""
_environ = _load_local_environ(self.__use_sys_env)
_environ = _process_environ(environ, _environ, enable_ext=True)
_environ = _process_environ(self.__environ, _environ, enable_ext=True)
_environ = _process_environ(environ_after, _environ, enable_ext=True)
return GlobalConfig(
identification=self.__identification,
resources=self.__resources, environ=_environ,
)
@classmethod
def loads(cls, data) -> 'GlobalConfigTemplate':
"""
load global config template from data
:param data: raw data
:return: global config template
"""
data = data or {}
if isinstance(data, cls):
return data
elif isinstance(data, dict):
return cls(**data)
else:
raise TypeError('Json or {type} expected but {actual} found.'.format(
type=cls.__name__, actual=repr(type(data).__name__)))
class GlobalConfig(_IGlobalConfig):
def __init__(self, identification, resources, environ):
"""
:param identification: identification
:param resources: resource limits
:param environ: environment variable
"""
self.__identification = identification
self.__resources = resources
self.__environ = environ
_IGlobalConfig.__init__(self, self.__identification, self.__resources, self.__environ, None)
@property
def identification(self) -> Identification:
return self.__identification
@property
def resources(self) -> ResourceLimit:
return self.__resources
@property
def environ(self) -> Mapping[str, str]:
return self.__environ
def __call__(self):
"""
get global config information
:return:
"""
return self.__identification, self.__resources, self.__environ | 0.763307 | 0.114072 |
from pprint import pformat
from six import iteritems
class IscsiInterfaceChangeableProperties(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
IscsiInterfaceChangeableProperties - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'tcp_listen_port': 'list[int]', # (required parameter)
'ipv4_address': 'list[str]', # (required parameter)
'ipv4_subnet_mask': 'list[str]', # (required parameter)
'ipv4_gateway_address': 'list[str]', # (required parameter)
'ipv4_address_config_method': 'list[str]', # (required parameter)
'maximum_frame_payload_size': 'list[int]', # (required parameter)
'ipv4_vlan_id': 'list[SettingControl]', # (required parameter)
'ipv4_outbound_packet_priority': 'list[SettingControl]', # (required parameter)
'ipv4_enabled': 'list[bool]', # (required parameter)
'ipv6_enabled': 'list[bool]', # (required parameter)
'ipv6_local_addresses': 'list[IpV6AddressDataBundle]', # (required parameter)
'ipv6_routable_addresses': 'list[IpV6AddressDataBundle]', # (required parameter)
'ipv6_port_router_address': 'list[IpV6AddressData]', # (required parameter)
'ipv6_address_config_method': 'list[str]', # (required parameter)
'ipv6_outbound_packet_priority': 'list[SettingControl]', # (required parameter)
'ipv6_vlan_id': 'list[SettingControl]', # (required parameter)
'ipv6_hop_limit': 'list[int]', # (required parameter)
'ipv6_nd_reachable_time': 'list[int]', # (required parameter)
'ipv6_nd_retransmit_time': 'list[int]', # (required parameter)
'ipv6_nd_stale_timeout': 'list[int]', # (required parameter)
'ipv6_duplicate_address_detection_attempts': 'list[int]', # (required parameter)
'maximum_interface_speed': 'list[str]'
}
self.attribute_map = {
'tcp_listen_port': 'tcpListenPort', # (required parameter)
'ipv4_address': 'ipv4Address', # (required parameter)
'ipv4_subnet_mask': 'ipv4SubnetMask', # (required parameter)
'ipv4_gateway_address': 'ipv4GatewayAddress', # (required parameter)
'ipv4_address_config_method': 'ipv4AddressConfigMethod', # (required parameter)
'maximum_frame_payload_size': 'maximumFramePayloadSize', # (required parameter)
'ipv4_vlan_id': 'ipv4VlanId', # (required parameter)
'ipv4_outbound_packet_priority': 'ipv4OutboundPacketPriority', # (required parameter)
'ipv4_enabled': 'ipv4Enabled', # (required parameter)
'ipv6_enabled': 'ipv6Enabled', # (required parameter)
'ipv6_local_addresses': 'ipv6LocalAddresses', # (required parameter)
'ipv6_routable_addresses': 'ipv6RoutableAddresses', # (required parameter)
'ipv6_port_router_address': 'ipv6PortRouterAddress', # (required parameter)
'ipv6_address_config_method': 'ipv6AddressConfigMethod', # (required parameter)
'ipv6_outbound_packet_priority': 'ipv6OutboundPacketPriority', # (required parameter)
'ipv6_vlan_id': 'ipv6VlanId', # (required parameter)
'ipv6_hop_limit': 'ipv6HopLimit', # (required parameter)
'ipv6_nd_reachable_time': 'ipv6NdReachableTime', # (required parameter)
'ipv6_nd_retransmit_time': 'ipv6NdRetransmitTime', # (required parameter)
'ipv6_nd_stale_timeout': 'ipv6NdStaleTimeout', # (required parameter)
'ipv6_duplicate_address_detection_attempts': 'ipv6DuplicateAddressDetectionAttempts', # (required parameter)
'maximum_interface_speed': 'maximumInterfaceSpeed'
}
self._tcp_listen_port = None
self._ipv4_address = None
self._ipv4_subnet_mask = None
self._ipv4_gateway_address = None
self._ipv4_address_config_method = None
self._maximum_frame_payload_size = None
self._ipv4_vlan_id = None
self._ipv4_outbound_packet_priority = None
self._ipv4_enabled = None
self._ipv6_enabled = None
self._ipv6_local_addresses = None
self._ipv6_routable_addresses = None
self._ipv6_port_router_address = None
self._ipv6_address_config_method = None
self._ipv6_outbound_packet_priority = None
self._ipv6_vlan_id = None
self._ipv6_hop_limit = None
self._ipv6_nd_reachable_time = None
self._ipv6_nd_retransmit_time = None
self._ipv6_nd_stale_timeout = None
self._ipv6_duplicate_address_detection_attempts = None
self._maximum_interface_speed = None
@property
def tcp_listen_port(self):
"""
Gets the tcp_listen_port of this IscsiInterfaceChangeableProperties.
The tcp port number on which to listen for incoming connections.
:return: The tcp_listen_port of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._tcp_listen_port
@tcp_listen_port.setter
def tcp_listen_port(self, tcp_listen_port):
"""
Sets the tcp_listen_port of this IscsiInterfaceChangeableProperties.
The tcp port number on which to listen for incoming connections.
:param tcp_listen_port: The tcp_listen_port of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._tcp_listen_port = tcp_listen_port
@property
def ipv4_address(self):
"""
Gets the ipv4_address of this IscsiInterfaceChangeableProperties.
The IPV4 address for the interface.
:return: The ipv4_address of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_address
@ipv4_address.setter
def ipv4_address(self, ipv4_address):
"""
Sets the ipv4_address of this IscsiInterfaceChangeableProperties.
The IPV4 address for the interface.
:param ipv4_address: The ipv4_address of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_address = ipv4_address
@property
def ipv4_subnet_mask(self):
"""
Gets the ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
The IPV4 subnet mask for the interface.
:return: The ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_subnet_mask
@ipv4_subnet_mask.setter
def ipv4_subnet_mask(self, ipv4_subnet_mask):
"""
Sets the ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
The IPV4 subnet mask for the interface.
:param ipv4_subnet_mask: The ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_subnet_mask = ipv4_subnet_mask
@property
def ipv4_gateway_address(self):
"""
Gets the ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
The gateway IPV4 address for the interface.
:return: The ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_gateway_address
@ipv4_gateway_address.setter
def ipv4_gateway_address(self, ipv4_gateway_address):
"""
Sets the ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
The gateway IPV4 address for the interface.
:param ipv4_gateway_address: The ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_gateway_address = ipv4_gateway_address
@property
def ipv4_address_config_method(self):
"""
Gets the ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
The IPV4 configuration method for the interface. The method is either by static setting of the IP address (IPV4_CONFIG_STATIC) or by use of the dynamic host configuration protocol (IPV4_CONFIG_DHCP). Whenever there is a transition of the configuration method from IPV4_CONFIG_STATIC to IPV4_CONFIG_DHCP, the storage array performs the equivalent of a refreshIscsiDhcpParameters operation.
:return: The ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_address_config_method
@ipv4_address_config_method.setter
def ipv4_address_config_method(self, ipv4_address_config_method):
"""
Sets the ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
The IPV4 configuration method for the interface. The method is either by static setting of the IP address (IPV4_CONFIG_STATIC) or by use of the dynamic host configuration protocol (IPV4_CONFIG_DHCP). Whenever there is a transition of the configuration method from IPV4_CONFIG_STATIC to IPV4_CONFIG_DHCP, the storage array performs the equivalent of a refreshIscsiDhcpParameters operation.
:param ipv4_address_config_method: The ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_address_config_method = ipv4_address_config_method
@property
def maximum_frame_payload_size(self):
"""
Gets the maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
The maximum size of the payload section in an Ethernet frame.
:return: The maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._maximum_frame_payload_size
@maximum_frame_payload_size.setter
def maximum_frame_payload_size(self, maximum_frame_payload_size):
"""
Sets the maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
The maximum size of the payload section in an Ethernet frame.
:param maximum_frame_payload_size: The maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._maximum_frame_payload_size = maximum_frame_payload_size
@property
def ipv4_vlan_id(self):
"""
Gets the ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern the value of the IPV4 VLAN identifier for the interface.
:return: The ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv4_vlan_id
@ipv4_vlan_id.setter
def ipv4_vlan_id(self, ipv4_vlan_id):
"""
Sets the ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern the value of the IPV4 VLAN identifier for the interface.
:param ipv4_vlan_id: The ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv4_vlan_id = ipv4_vlan_id
@property
def ipv4_outbound_packet_priority(self):
"""
Gets the ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern the priority to associate with outbound IPV4 packets sent over the interface.
:return: The ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv4_outbound_packet_priority
@ipv4_outbound_packet_priority.setter
def ipv4_outbound_packet_priority(self, ipv4_outbound_packet_priority):
"""
Sets the ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern the priority to associate with outbound IPV4 packets sent over the interface.
:param ipv4_outbound_packet_priority: The ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv4_outbound_packet_priority = ipv4_outbound_packet_priority
@property
def ipv4_enabled(self):
"""
Gets the ipv4_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV4 addressing should be enabled for the interface.
:return: The ipv4_enabled of this IscsiInterfaceChangeableProperties.
:rtype: list[bool]
:required/optional: required
"""
return self._ipv4_enabled
@ipv4_enabled.setter
def ipv4_enabled(self, ipv4_enabled):
"""
Sets the ipv4_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV4 addressing should be enabled for the interface.
:param ipv4_enabled: The ipv4_enabled of this IscsiInterfaceChangeableProperties.
:type: list[bool]
"""
self._ipv4_enabled = ipv4_enabled
@property
def ipv6_enabled(self):
"""
Gets the ipv6_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV6 addressing should be enabled for the interface.
:return: The ipv6_enabled of this IscsiInterfaceChangeableProperties.
:rtype: list[bool]
:required/optional: required
"""
return self._ipv6_enabled
@ipv6_enabled.setter
def ipv6_enabled(self, ipv6_enabled):
"""
Sets the ipv6_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV6 addressing should be enabled for the interface.
:param ipv6_enabled: The ipv6_enabled of this IscsiInterfaceChangeableProperties.
:type: list[bool]
"""
self._ipv6_enabled = ipv6_enabled
@property
def ipv6_local_addresses(self):
"""
Gets the ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 local addresses that are to be assigned to the interface. This set completely replaces the previous set.
:return: The ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
:rtype: list[IpV6AddressDataBundle]
:required/optional: required
"""
return self._ipv6_local_addresses
@ipv6_local_addresses.setter
def ipv6_local_addresses(self, ipv6_local_addresses):
"""
Sets the ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 local addresses that are to be assigned to the interface. This set completely replaces the previous set.
:param ipv6_local_addresses: The ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
:type: list[IpV6AddressDataBundle]
"""
self._ipv6_local_addresses = ipv6_local_addresses
@property
def ipv6_routable_addresses(self):
"""
Gets the ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 routable addresses that are to be assigned to the interface. This set completely replaces the previous set.
:return: The ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
:rtype: list[IpV6AddressDataBundle]
:required/optional: required
"""
return self._ipv6_routable_addresses
@ipv6_routable_addresses.setter
def ipv6_routable_addresses(self, ipv6_routable_addresses):
"""
Sets the ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 routable addresses that are to be assigned to the interface. This set completely replaces the previous set.
:param ipv6_routable_addresses: The ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
:type: list[IpV6AddressDataBundle]
"""
self._ipv6_routable_addresses = ipv6_routable_addresses
@property
def ipv6_port_router_address(self):
"""
Gets the ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
The address to set for the IPV6 port router.
:return: The ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
:rtype: list[IpV6AddressData]
:required/optional: required
"""
return self._ipv6_port_router_address
@ipv6_port_router_address.setter
def ipv6_port_router_address(self, ipv6_port_router_address):
"""
Sets the ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
The address to set for the IPV6 port router.
:param ipv6_port_router_address: The ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
:type: list[IpV6AddressData]
"""
self._ipv6_port_router_address = ipv6_port_router_address
@property
def ipv6_address_config_method(self):
"""
Gets the ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
The method to use in configuring IPV6 addresses for the interface.
:return: The ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv6_address_config_method
@ipv6_address_config_method.setter
def ipv6_address_config_method(self, ipv6_address_config_method):
"""
Sets the ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
The method to use in configuring IPV6 addresses for the interface.
:param ipv6_address_config_method: The ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv6_address_config_method = ipv6_address_config_method
@property
def ipv6_outbound_packet_priority(self):
"""
Gets the ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern priority assignment for packets sent over the interface.
:return: The ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv6_outbound_packet_priority
@ipv6_outbound_packet_priority.setter
def ipv6_outbound_packet_priority(self, ipv6_outbound_packet_priority):
"""
Sets the ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern priority assignment for packets sent over the interface.
:param ipv6_outbound_packet_priority: The ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv6_outbound_packet_priority = ipv6_outbound_packet_priority
@property
def ipv6_vlan_id(self):
"""
Gets the ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern VLAN identifier assignment for packets sent over the interface.
:return: The ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv6_vlan_id
@ipv6_vlan_id.setter
def ipv6_vlan_id(self, ipv6_vlan_id):
"""
Sets the ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern VLAN identifier assignment for packets sent over the interface.
:param ipv6_vlan_id: The ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv6_vlan_id = ipv6_vlan_id
@property
def ipv6_hop_limit(self):
"""
Gets the ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
The hop limit to use in IPV6 packets sent over the interface.
:return: The ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_hop_limit
@ipv6_hop_limit.setter
def ipv6_hop_limit(self, ipv6_hop_limit):
"""
Sets the ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
The hop limit to use in IPV6 packets sent over the interface.
:param ipv6_hop_limit: The ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_hop_limit = ipv6_hop_limit
@property
def ipv6_nd_reachable_time(self):
"""
Gets the ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
The amount of time in milliseconds, within which a neighbor is assumed to be reachable
:return: The ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_nd_reachable_time
@ipv6_nd_reachable_time.setter
def ipv6_nd_reachable_time(self, ipv6_nd_reachable_time):
"""
Sets the ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
The amount of time in milliseconds, within which a neighbor is assumed to be reachable
:param ipv6_nd_reachable_time: The ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_nd_reachable_time = ipv6_nd_reachable_time
@property
def ipv6_nd_retransmit_time(self):
"""
Gets the ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
The number of milliseconds between neighbor solicitation probes.
:return: The ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_nd_retransmit_time
@ipv6_nd_retransmit_time.setter
def ipv6_nd_retransmit_time(self, ipv6_nd_retransmit_time):
"""
Sets the ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
The number of milliseconds between neighbor solicitation probes.
:param ipv6_nd_retransmit_time: The ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_nd_retransmit_time = ipv6_nd_retransmit_time
@property
def ipv6_nd_stale_timeout(self):
"""
Gets the ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
The time in milliseconds after which information for a neighbor that cannot be verified as reachable will be considered \"stale.
:return: The ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_nd_stale_timeout
@ipv6_nd_stale_timeout.setter
def ipv6_nd_stale_timeout(self, ipv6_nd_stale_timeout):
"""
Sets the ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
The time in milliseconds after which information for a neighbor that cannot be verified as reachable will be considered \"stale.
:param ipv6_nd_stale_timeout: The ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_nd_stale_timeout = ipv6_nd_stale_timeout
@property
def ipv6_duplicate_address_detection_attempts(self):
"""
Gets the ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
The number of neighbor-solicitation messages to send in trying to determine IP address uniqueness.
:return: The ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_duplicate_address_detection_attempts
@ipv6_duplicate_address_detection_attempts.setter
def ipv6_duplicate_address_detection_attempts(self, ipv6_duplicate_address_detection_attempts):
"""
Sets the ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
The number of neighbor-solicitation messages to send in trying to determine IP address uniqueness.
:param ipv6_duplicate_address_detection_attempts: The ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_duplicate_address_detection_attempts = ipv6_duplicate_address_detection_attempts
@property
def maximum_interface_speed(self):
"""
Gets the maximum_interface_speed of this IscsiInterfaceChangeableProperties.
This field is used to set the maximum interface speed. If autoconfiguration is supported (see the autoconfigSupport field in the EthernetInterfaceData structure), the value in this field is ignored.
:return: The maximum_interface_speed of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._maximum_interface_speed
@maximum_interface_speed.setter
def maximum_interface_speed(self, maximum_interface_speed):
"""
Sets the maximum_interface_speed of this IscsiInterfaceChangeableProperties.
This field is used to set the maximum interface speed. If autoconfiguration is supported (see the autoconfigSupport field in the EthernetInterfaceData structure), the value in this field is ignored.
:param maximum_interface_speed: The maximum_interface_speed of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._maximum_interface_speed = maximum_interface_speed
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | netapp/santricity/models/symbol/iscsi_interface_changeable_properties.py | from pprint import pformat
from six import iteritems
class IscsiInterfaceChangeableProperties(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
IscsiInterfaceChangeableProperties - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'tcp_listen_port': 'list[int]', # (required parameter)
'ipv4_address': 'list[str]', # (required parameter)
'ipv4_subnet_mask': 'list[str]', # (required parameter)
'ipv4_gateway_address': 'list[str]', # (required parameter)
'ipv4_address_config_method': 'list[str]', # (required parameter)
'maximum_frame_payload_size': 'list[int]', # (required parameter)
'ipv4_vlan_id': 'list[SettingControl]', # (required parameter)
'ipv4_outbound_packet_priority': 'list[SettingControl]', # (required parameter)
'ipv4_enabled': 'list[bool]', # (required parameter)
'ipv6_enabled': 'list[bool]', # (required parameter)
'ipv6_local_addresses': 'list[IpV6AddressDataBundle]', # (required parameter)
'ipv6_routable_addresses': 'list[IpV6AddressDataBundle]', # (required parameter)
'ipv6_port_router_address': 'list[IpV6AddressData]', # (required parameter)
'ipv6_address_config_method': 'list[str]', # (required parameter)
'ipv6_outbound_packet_priority': 'list[SettingControl]', # (required parameter)
'ipv6_vlan_id': 'list[SettingControl]', # (required parameter)
'ipv6_hop_limit': 'list[int]', # (required parameter)
'ipv6_nd_reachable_time': 'list[int]', # (required parameter)
'ipv6_nd_retransmit_time': 'list[int]', # (required parameter)
'ipv6_nd_stale_timeout': 'list[int]', # (required parameter)
'ipv6_duplicate_address_detection_attempts': 'list[int]', # (required parameter)
'maximum_interface_speed': 'list[str]'
}
self.attribute_map = {
'tcp_listen_port': 'tcpListenPort', # (required parameter)
'ipv4_address': 'ipv4Address', # (required parameter)
'ipv4_subnet_mask': 'ipv4SubnetMask', # (required parameter)
'ipv4_gateway_address': 'ipv4GatewayAddress', # (required parameter)
'ipv4_address_config_method': 'ipv4AddressConfigMethod', # (required parameter)
'maximum_frame_payload_size': 'maximumFramePayloadSize', # (required parameter)
'ipv4_vlan_id': 'ipv4VlanId', # (required parameter)
'ipv4_outbound_packet_priority': 'ipv4OutboundPacketPriority', # (required parameter)
'ipv4_enabled': 'ipv4Enabled', # (required parameter)
'ipv6_enabled': 'ipv6Enabled', # (required parameter)
'ipv6_local_addresses': 'ipv6LocalAddresses', # (required parameter)
'ipv6_routable_addresses': 'ipv6RoutableAddresses', # (required parameter)
'ipv6_port_router_address': 'ipv6PortRouterAddress', # (required parameter)
'ipv6_address_config_method': 'ipv6AddressConfigMethod', # (required parameter)
'ipv6_outbound_packet_priority': 'ipv6OutboundPacketPriority', # (required parameter)
'ipv6_vlan_id': 'ipv6VlanId', # (required parameter)
'ipv6_hop_limit': 'ipv6HopLimit', # (required parameter)
'ipv6_nd_reachable_time': 'ipv6NdReachableTime', # (required parameter)
'ipv6_nd_retransmit_time': 'ipv6NdRetransmitTime', # (required parameter)
'ipv6_nd_stale_timeout': 'ipv6NdStaleTimeout', # (required parameter)
'ipv6_duplicate_address_detection_attempts': 'ipv6DuplicateAddressDetectionAttempts', # (required parameter)
'maximum_interface_speed': 'maximumInterfaceSpeed'
}
self._tcp_listen_port = None
self._ipv4_address = None
self._ipv4_subnet_mask = None
self._ipv4_gateway_address = None
self._ipv4_address_config_method = None
self._maximum_frame_payload_size = None
self._ipv4_vlan_id = None
self._ipv4_outbound_packet_priority = None
self._ipv4_enabled = None
self._ipv6_enabled = None
self._ipv6_local_addresses = None
self._ipv6_routable_addresses = None
self._ipv6_port_router_address = None
self._ipv6_address_config_method = None
self._ipv6_outbound_packet_priority = None
self._ipv6_vlan_id = None
self._ipv6_hop_limit = None
self._ipv6_nd_reachable_time = None
self._ipv6_nd_retransmit_time = None
self._ipv6_nd_stale_timeout = None
self._ipv6_duplicate_address_detection_attempts = None
self._maximum_interface_speed = None
@property
def tcp_listen_port(self):
"""
Gets the tcp_listen_port of this IscsiInterfaceChangeableProperties.
The tcp port number on which to listen for incoming connections.
:return: The tcp_listen_port of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._tcp_listen_port
@tcp_listen_port.setter
def tcp_listen_port(self, tcp_listen_port):
"""
Sets the tcp_listen_port of this IscsiInterfaceChangeableProperties.
The tcp port number on which to listen for incoming connections.
:param tcp_listen_port: The tcp_listen_port of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._tcp_listen_port = tcp_listen_port
@property
def ipv4_address(self):
"""
Gets the ipv4_address of this IscsiInterfaceChangeableProperties.
The IPV4 address for the interface.
:return: The ipv4_address of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_address
@ipv4_address.setter
def ipv4_address(self, ipv4_address):
"""
Sets the ipv4_address of this IscsiInterfaceChangeableProperties.
The IPV4 address for the interface.
:param ipv4_address: The ipv4_address of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_address = ipv4_address
@property
def ipv4_subnet_mask(self):
"""
Gets the ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
The IPV4 subnet mask for the interface.
:return: The ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_subnet_mask
@ipv4_subnet_mask.setter
def ipv4_subnet_mask(self, ipv4_subnet_mask):
"""
Sets the ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
The IPV4 subnet mask for the interface.
:param ipv4_subnet_mask: The ipv4_subnet_mask of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_subnet_mask = ipv4_subnet_mask
@property
def ipv4_gateway_address(self):
"""
Gets the ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
The gateway IPV4 address for the interface.
:return: The ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_gateway_address
@ipv4_gateway_address.setter
def ipv4_gateway_address(self, ipv4_gateway_address):
"""
Sets the ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
The gateway IPV4 address for the interface.
:param ipv4_gateway_address: The ipv4_gateway_address of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_gateway_address = ipv4_gateway_address
@property
def ipv4_address_config_method(self):
"""
Gets the ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
The IPV4 configuration method for the interface. The method is either by static setting of the IP address (IPV4_CONFIG_STATIC) or by use of the dynamic host configuration protocol (IPV4_CONFIG_DHCP). Whenever there is a transition of the configuration method from IPV4_CONFIG_STATIC to IPV4_CONFIG_DHCP, the storage array performs the equivalent of a refreshIscsiDhcpParameters operation.
:return: The ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv4_address_config_method
@ipv4_address_config_method.setter
def ipv4_address_config_method(self, ipv4_address_config_method):
"""
Sets the ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
The IPV4 configuration method for the interface. The method is either by static setting of the IP address (IPV4_CONFIG_STATIC) or by use of the dynamic host configuration protocol (IPV4_CONFIG_DHCP). Whenever there is a transition of the configuration method from IPV4_CONFIG_STATIC to IPV4_CONFIG_DHCP, the storage array performs the equivalent of a refreshIscsiDhcpParameters operation.
:param ipv4_address_config_method: The ipv4_address_config_method of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv4_address_config_method = ipv4_address_config_method
@property
def maximum_frame_payload_size(self):
"""
Gets the maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
The maximum size of the payload section in an Ethernet frame.
:return: The maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._maximum_frame_payload_size
@maximum_frame_payload_size.setter
def maximum_frame_payload_size(self, maximum_frame_payload_size):
"""
Sets the maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
The maximum size of the payload section in an Ethernet frame.
:param maximum_frame_payload_size: The maximum_frame_payload_size of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._maximum_frame_payload_size = maximum_frame_payload_size
@property
def ipv4_vlan_id(self):
"""
Gets the ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern the value of the IPV4 VLAN identifier for the interface.
:return: The ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv4_vlan_id
@ipv4_vlan_id.setter
def ipv4_vlan_id(self, ipv4_vlan_id):
"""
Sets the ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern the value of the IPV4 VLAN identifier for the interface.
:param ipv4_vlan_id: The ipv4_vlan_id of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv4_vlan_id = ipv4_vlan_id
@property
def ipv4_outbound_packet_priority(self):
"""
Gets the ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern the priority to associate with outbound IPV4 packets sent over the interface.
:return: The ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv4_outbound_packet_priority
@ipv4_outbound_packet_priority.setter
def ipv4_outbound_packet_priority(self, ipv4_outbound_packet_priority):
"""
Sets the ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern the priority to associate with outbound IPV4 packets sent over the interface.
:param ipv4_outbound_packet_priority: The ipv4_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv4_outbound_packet_priority = ipv4_outbound_packet_priority
@property
def ipv4_enabled(self):
"""
Gets the ipv4_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV4 addressing should be enabled for the interface.
:return: The ipv4_enabled of this IscsiInterfaceChangeableProperties.
:rtype: list[bool]
:required/optional: required
"""
return self._ipv4_enabled
@ipv4_enabled.setter
def ipv4_enabled(self, ipv4_enabled):
"""
Sets the ipv4_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV4 addressing should be enabled for the interface.
:param ipv4_enabled: The ipv4_enabled of this IscsiInterfaceChangeableProperties.
:type: list[bool]
"""
self._ipv4_enabled = ipv4_enabled
@property
def ipv6_enabled(self):
"""
Gets the ipv6_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV6 addressing should be enabled for the interface.
:return: The ipv6_enabled of this IscsiInterfaceChangeableProperties.
:rtype: list[bool]
:required/optional: required
"""
return self._ipv6_enabled
@ipv6_enabled.setter
def ipv6_enabled(self, ipv6_enabled):
"""
Sets the ipv6_enabled of this IscsiInterfaceChangeableProperties.
A boolean which, if set to true, indicates that IPV6 addressing should be enabled for the interface.
:param ipv6_enabled: The ipv6_enabled of this IscsiInterfaceChangeableProperties.
:type: list[bool]
"""
self._ipv6_enabled = ipv6_enabled
@property
def ipv6_local_addresses(self):
"""
Gets the ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 local addresses that are to be assigned to the interface. This set completely replaces the previous set.
:return: The ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
:rtype: list[IpV6AddressDataBundle]
:required/optional: required
"""
return self._ipv6_local_addresses
@ipv6_local_addresses.setter
def ipv6_local_addresses(self, ipv6_local_addresses):
"""
Sets the ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 local addresses that are to be assigned to the interface. This set completely replaces the previous set.
:param ipv6_local_addresses: The ipv6_local_addresses of this IscsiInterfaceChangeableProperties.
:type: list[IpV6AddressDataBundle]
"""
self._ipv6_local_addresses = ipv6_local_addresses
@property
def ipv6_routable_addresses(self):
"""
Gets the ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 routable addresses that are to be assigned to the interface. This set completely replaces the previous set.
:return: The ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
:rtype: list[IpV6AddressDataBundle]
:required/optional: required
"""
return self._ipv6_routable_addresses
@ipv6_routable_addresses.setter
def ipv6_routable_addresses(self, ipv6_routable_addresses):
"""
Sets the ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
The set of IPV6 routable addresses that are to be assigned to the interface. This set completely replaces the previous set.
:param ipv6_routable_addresses: The ipv6_routable_addresses of this IscsiInterfaceChangeableProperties.
:type: list[IpV6AddressDataBundle]
"""
self._ipv6_routable_addresses = ipv6_routable_addresses
@property
def ipv6_port_router_address(self):
"""
Gets the ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
The address to set for the IPV6 port router.
:return: The ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
:rtype: list[IpV6AddressData]
:required/optional: required
"""
return self._ipv6_port_router_address
@ipv6_port_router_address.setter
def ipv6_port_router_address(self, ipv6_port_router_address):
"""
Sets the ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
The address to set for the IPV6 port router.
:param ipv6_port_router_address: The ipv6_port_router_address of this IscsiInterfaceChangeableProperties.
:type: list[IpV6AddressData]
"""
self._ipv6_port_router_address = ipv6_port_router_address
@property
def ipv6_address_config_method(self):
"""
Gets the ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
The method to use in configuring IPV6 addresses for the interface.
:return: The ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._ipv6_address_config_method
@ipv6_address_config_method.setter
def ipv6_address_config_method(self, ipv6_address_config_method):
"""
Sets the ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
The method to use in configuring IPV6 addresses for the interface.
:param ipv6_address_config_method: The ipv6_address_config_method of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._ipv6_address_config_method = ipv6_address_config_method
@property
def ipv6_outbound_packet_priority(self):
"""
Gets the ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern priority assignment for packets sent over the interface.
:return: The ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv6_outbound_packet_priority
@ipv6_outbound_packet_priority.setter
def ipv6_outbound_packet_priority(self, ipv6_outbound_packet_priority):
"""
Sets the ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
Settings that govern priority assignment for packets sent over the interface.
:param ipv6_outbound_packet_priority: The ipv6_outbound_packet_priority of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv6_outbound_packet_priority = ipv6_outbound_packet_priority
@property
def ipv6_vlan_id(self):
"""
Gets the ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern VLAN identifier assignment for packets sent over the interface.
:return: The ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
:rtype: list[SettingControl]
:required/optional: required
"""
return self._ipv6_vlan_id
@ipv6_vlan_id.setter
def ipv6_vlan_id(self, ipv6_vlan_id):
"""
Sets the ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
Settings that govern VLAN identifier assignment for packets sent over the interface.
:param ipv6_vlan_id: The ipv6_vlan_id of this IscsiInterfaceChangeableProperties.
:type: list[SettingControl]
"""
self._ipv6_vlan_id = ipv6_vlan_id
@property
def ipv6_hop_limit(self):
"""
Gets the ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
The hop limit to use in IPV6 packets sent over the interface.
:return: The ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_hop_limit
@ipv6_hop_limit.setter
def ipv6_hop_limit(self, ipv6_hop_limit):
"""
Sets the ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
The hop limit to use in IPV6 packets sent over the interface.
:param ipv6_hop_limit: The ipv6_hop_limit of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_hop_limit = ipv6_hop_limit
@property
def ipv6_nd_reachable_time(self):
"""
Gets the ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
The amount of time in milliseconds, within which a neighbor is assumed to be reachable
:return: The ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_nd_reachable_time
@ipv6_nd_reachable_time.setter
def ipv6_nd_reachable_time(self, ipv6_nd_reachable_time):
"""
Sets the ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
The amount of time in milliseconds, within which a neighbor is assumed to be reachable
:param ipv6_nd_reachable_time: The ipv6_nd_reachable_time of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_nd_reachable_time = ipv6_nd_reachable_time
@property
def ipv6_nd_retransmit_time(self):
"""
Gets the ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
The number of milliseconds between neighbor solicitation probes.
:return: The ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_nd_retransmit_time
@ipv6_nd_retransmit_time.setter
def ipv6_nd_retransmit_time(self, ipv6_nd_retransmit_time):
"""
Sets the ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
The number of milliseconds between neighbor solicitation probes.
:param ipv6_nd_retransmit_time: The ipv6_nd_retransmit_time of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_nd_retransmit_time = ipv6_nd_retransmit_time
@property
def ipv6_nd_stale_timeout(self):
"""
Gets the ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
The time in milliseconds after which information for a neighbor that cannot be verified as reachable will be considered \"stale.
:return: The ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_nd_stale_timeout
@ipv6_nd_stale_timeout.setter
def ipv6_nd_stale_timeout(self, ipv6_nd_stale_timeout):
"""
Sets the ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
The time in milliseconds after which information for a neighbor that cannot be verified as reachable will be considered \"stale.
:param ipv6_nd_stale_timeout: The ipv6_nd_stale_timeout of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_nd_stale_timeout = ipv6_nd_stale_timeout
@property
def ipv6_duplicate_address_detection_attempts(self):
"""
Gets the ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
The number of neighbor-solicitation messages to send in trying to determine IP address uniqueness.
:return: The ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
:rtype: list[int]
:required/optional: required
"""
return self._ipv6_duplicate_address_detection_attempts
@ipv6_duplicate_address_detection_attempts.setter
def ipv6_duplicate_address_detection_attempts(self, ipv6_duplicate_address_detection_attempts):
"""
Sets the ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
The number of neighbor-solicitation messages to send in trying to determine IP address uniqueness.
:param ipv6_duplicate_address_detection_attempts: The ipv6_duplicate_address_detection_attempts of this IscsiInterfaceChangeableProperties.
:type: list[int]
"""
self._ipv6_duplicate_address_detection_attempts = ipv6_duplicate_address_detection_attempts
@property
def maximum_interface_speed(self):
"""
Gets the maximum_interface_speed of this IscsiInterfaceChangeableProperties.
This field is used to set the maximum interface speed. If autoconfiguration is supported (see the autoconfigSupport field in the EthernetInterfaceData structure), the value in this field is ignored.
:return: The maximum_interface_speed of this IscsiInterfaceChangeableProperties.
:rtype: list[str]
:required/optional: required
"""
return self._maximum_interface_speed
@maximum_interface_speed.setter
def maximum_interface_speed(self, maximum_interface_speed):
"""
Sets the maximum_interface_speed of this IscsiInterfaceChangeableProperties.
This field is used to set the maximum interface speed. If autoconfiguration is supported (see the autoconfigSupport field in the EthernetInterfaceData structure), the value in this field is ignored.
:param maximum_interface_speed: The maximum_interface_speed of this IscsiInterfaceChangeableProperties.
:type: list[str]
"""
self._maximum_interface_speed = maximum_interface_speed
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 0.742328 | 0.139426 |
import argparse
import datetime as dt
import json
from collections import defaultdict
from pathlib import Path
from typing import Dict
import pip._vendor.pkg_resources as pkg_resources
import pip._vendor.toml as toml
from dnevnik2 import Dnevnik2
def get_subject(item, subjects: Dict[str, str]) -> str:
subject_id = str(item['subject_id'])
return subjects.get(subject_id, item['subject_name'])
def to_date(text):
return dt.datetime.strptime(text, '%d.%m.%Y').date()
def main():
default_config_path = Path(pkg_resources.resource_filename('dnevnik2', 'app_config.toml')).resolve()
default_output_dir = Path('.').resolve()
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('cookies_path', type=Path)
arg_parser.add_argument('--config_path', type=Path, default=default_config_path)
arg_parser.add_argument('--output_dir', type=Path, default=default_output_dir)
args = arg_parser.parse_args()
cookies_path: Path = args.cookies_path
config_path: Path = args.config_path
base_dir: Path = args.output_dir
with config_path.open('r', encoding='utf-8') as f1:
config = toml.load(f1)
dnevnik = Dnevnik2.make_from_cookies_file(cookies_path)
data = dnevnik.fetch_marks_for_current_quarter()
with (base_dir / 'last_res.txt').open('w', encoding='utf-8') as f1:
print(json.dumps(data, ensure_ascii=False, indent=2), file=f1)
out_lines = []
grouped = defaultdict(list)
for item in sorted(data['data']['items'], key=lambda x: (to_date(x['date']), x['estimate_value_name'])):
s_name = item['subject_name'] = get_subject(item, config['subjects'])
mark = item['estimate_value_name']
if mark.isdigit():
grouped[s_name].append(int(mark))
comment = ('# ' + item['estimate_comment']) if item['estimate_comment'] else ''
out_lines.append((
to_date(item['date']),
"{subject_name:25s} {estimate_value_code:5s} {estimate_value_name:9s} {estimate_type_name:20s}".format(
**item),
comment
))
if not out_lines:
exit(1)
with (base_dir / f'marks.{dt.date.today()}.txt').open('w', encoding='utf-8') as f1:
for date, mark, comment in sorted(out_lines):
print(f'{date} {mark} {comment}', file=f1)
f1.write('\n\n')
for s_name in sorted(grouped):
avg = sum(grouped[s_name]) / len(grouped[s_name])
s_marks = ' '.join(str(mark) for mark in grouped[s_name])
print(f'{s_name:25s} : {avg:0.3f} {s_marks}', file=f1)
if __name__ == '__main__':
main() | dnevnik2/scripts/render_marks_for_current_quarter.py | import argparse
import datetime as dt
import json
from collections import defaultdict
from pathlib import Path
from typing import Dict
import pip._vendor.pkg_resources as pkg_resources
import pip._vendor.toml as toml
from dnevnik2 import Dnevnik2
def get_subject(item, subjects: Dict[str, str]) -> str:
subject_id = str(item['subject_id'])
return subjects.get(subject_id, item['subject_name'])
def to_date(text):
return dt.datetime.strptime(text, '%d.%m.%Y').date()
def main():
default_config_path = Path(pkg_resources.resource_filename('dnevnik2', 'app_config.toml')).resolve()
default_output_dir = Path('.').resolve()
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('cookies_path', type=Path)
arg_parser.add_argument('--config_path', type=Path, default=default_config_path)
arg_parser.add_argument('--output_dir', type=Path, default=default_output_dir)
args = arg_parser.parse_args()
cookies_path: Path = args.cookies_path
config_path: Path = args.config_path
base_dir: Path = args.output_dir
with config_path.open('r', encoding='utf-8') as f1:
config = toml.load(f1)
dnevnik = Dnevnik2.make_from_cookies_file(cookies_path)
data = dnevnik.fetch_marks_for_current_quarter()
with (base_dir / 'last_res.txt').open('w', encoding='utf-8') as f1:
print(json.dumps(data, ensure_ascii=False, indent=2), file=f1)
out_lines = []
grouped = defaultdict(list)
for item in sorted(data['data']['items'], key=lambda x: (to_date(x['date']), x['estimate_value_name'])):
s_name = item['subject_name'] = get_subject(item, config['subjects'])
mark = item['estimate_value_name']
if mark.isdigit():
grouped[s_name].append(int(mark))
comment = ('# ' + item['estimate_comment']) if item['estimate_comment'] else ''
out_lines.append((
to_date(item['date']),
"{subject_name:25s} {estimate_value_code:5s} {estimate_value_name:9s} {estimate_type_name:20s}".format(
**item),
comment
))
if not out_lines:
exit(1)
with (base_dir / f'marks.{dt.date.today()}.txt').open('w', encoding='utf-8') as f1:
for date, mark, comment in sorted(out_lines):
print(f'{date} {mark} {comment}', file=f1)
f1.write('\n\n')
for s_name in sorted(grouped):
avg = sum(grouped[s_name]) / len(grouped[s_name])
s_marks = ' '.join(str(mark) for mark in grouped[s_name])
print(f'{s_name:25s} : {avg:0.3f} {s_marks}', file=f1)
if __name__ == '__main__':
main() | 0.500732 | 0.103295 |
from torch.utils import data
import utils.utils as uu
import torch
import pandas as pd
class TabularDataset(data.Dataset):
def __init__(self, df, dep_var, cont_inputs, int_inputs, test_size, seed=None):
"""
Generates train/test and arr/tensor versions of the data.
Input data is raw.
After init, the data is scaled and transformed.
:param df: Original raw DataFrame
:param dep_var: Name of the dependent variable
:param cont_inputs: List of strings of names of continuous features
:param int_inputs: List of strings of names of integer features
:param test_size: Size of test set (number of rows)
:param seed: Random seed for reproducibility
"""
self.dep_var = dep_var
self.cont_inputs = cont_inputs
self.int_inputs = int_inputs
self.labels_list = list(df[dep_var].unique())
self.df_dtypes = df.dtypes
self.df_cols = df.columns
# Reorganize data set
df = uu.reorder_cols(df=df, dep_var=dep_var, cont_inputs=self.cont_inputs)
self.cat_inputs, self.cat_mask = uu.define_cat_inputs(df=df, dep_var=dep_var, cont_inputs=cont_inputs)
# Split data into train/test
x_train_arr, x_test_arr, y_train_arr, y_test_arr = uu.train_test_split(df.drop(columns=dep_var), df[dep_var],
test_size=test_size,
stratify=df[dep_var],
random_state=seed)
# Convert all categorical variables to dummies, and save two-way transformation
self.le_dict, self.ohe, x_train_arr, x_test_arr = uu.encode_categoricals_custom(df=df,
x_train=x_train_arr,
x_test=x_test_arr,
cat_inputs=self.cat_inputs,
cat_mask=self.cat_mask)
self.preprocessed_cat_mask = uu.create_preprocessed_cat_mask(le_dict=self.le_dict, x_train=x_train_arr)
# Scale continuous inputs
if len(self.cont_inputs) == 0:
self.scaler = None
else:
x_train_arr, self.scaler = uu.scale_cont_inputs(arr=x_train_arr,
preprocessed_cat_mask=self.preprocessed_cat_mask)
x_test_arr, _ = uu.scale_cont_inputs(arr=x_test_arr, preprocessed_cat_mask=self.preprocessed_cat_mask,
scaler=self.scaler)
# Convert to tensor-friendly format
self.x_train, self.x_test, self.y_train, self.y_test = self.preprocess_data(x_train_arr=x_train_arr,
y_train_arr=y_train_arr,
x_test_arr=x_test_arr,
y_test_arr=y_test_arr)
self.out_dim = self.x_train.shape[1]
self.eval_stratify = list(self.y_train.mean(0).detach().cpu().numpy())
# Set current device
self.device = self.get_dev()
def preprocess_data(self, x_train_arr, y_train_arr, x_test_arr, y_test_arr):
"""Converts input arrays of data into tensors ready for training"""
x_train = torch.tensor(x_train_arr, dtype=torch.float)
x_test = torch.tensor(x_test_arr, dtype=torch.float)
y_train_dummies = pd.get_dummies(y_train_arr)
y_train = torch.tensor(y_train_dummies.values, dtype=torch.float)
y_test_dummies = pd.get_dummies(y_test_arr)
y_test = torch.tensor(y_test_dummies.values, dtype=torch.float)
return x_train, x_test, y_train, y_test
def __len__(self):
return len(self.x_train)
def __getitem__(self, index):
return self.x_train[index], self.y_train[index]
def to_dev(self, device):
"""Moves entire data set to specified device. Can be helpful in speeding up training times for small data sets (~60-100x improvement in speed)."""
self.x_train, self.y_train, self.x_test, self.y_test = self.x_train.to(device), self.y_train.to(
device), self.x_test.to(device), self.y_test.to(device)
self.device = device
def get_dev(self):
return self.x_train.device | CSDGAN/classes/tabular/TabularDataset.py | from torch.utils import data
import utils.utils as uu
import torch
import pandas as pd
class TabularDataset(data.Dataset):
def __init__(self, df, dep_var, cont_inputs, int_inputs, test_size, seed=None):
"""
Generates train/test and arr/tensor versions of the data.
Input data is raw.
After init, the data is scaled and transformed.
:param df: Original raw DataFrame
:param dep_var: Name of the dependent variable
:param cont_inputs: List of strings of names of continuous features
:param int_inputs: List of strings of names of integer features
:param test_size: Size of test set (number of rows)
:param seed: Random seed for reproducibility
"""
self.dep_var = dep_var
self.cont_inputs = cont_inputs
self.int_inputs = int_inputs
self.labels_list = list(df[dep_var].unique())
self.df_dtypes = df.dtypes
self.df_cols = df.columns
# Reorganize data set
df = uu.reorder_cols(df=df, dep_var=dep_var, cont_inputs=self.cont_inputs)
self.cat_inputs, self.cat_mask = uu.define_cat_inputs(df=df, dep_var=dep_var, cont_inputs=cont_inputs)
# Split data into train/test
x_train_arr, x_test_arr, y_train_arr, y_test_arr = uu.train_test_split(df.drop(columns=dep_var), df[dep_var],
test_size=test_size,
stratify=df[dep_var],
random_state=seed)
# Convert all categorical variables to dummies, and save two-way transformation
self.le_dict, self.ohe, x_train_arr, x_test_arr = uu.encode_categoricals_custom(df=df,
x_train=x_train_arr,
x_test=x_test_arr,
cat_inputs=self.cat_inputs,
cat_mask=self.cat_mask)
self.preprocessed_cat_mask = uu.create_preprocessed_cat_mask(le_dict=self.le_dict, x_train=x_train_arr)
# Scale continuous inputs
if len(self.cont_inputs) == 0:
self.scaler = None
else:
x_train_arr, self.scaler = uu.scale_cont_inputs(arr=x_train_arr,
preprocessed_cat_mask=self.preprocessed_cat_mask)
x_test_arr, _ = uu.scale_cont_inputs(arr=x_test_arr, preprocessed_cat_mask=self.preprocessed_cat_mask,
scaler=self.scaler)
# Convert to tensor-friendly format
self.x_train, self.x_test, self.y_train, self.y_test = self.preprocess_data(x_train_arr=x_train_arr,
y_train_arr=y_train_arr,
x_test_arr=x_test_arr,
y_test_arr=y_test_arr)
self.out_dim = self.x_train.shape[1]
self.eval_stratify = list(self.y_train.mean(0).detach().cpu().numpy())
# Set current device
self.device = self.get_dev()
def preprocess_data(self, x_train_arr, y_train_arr, x_test_arr, y_test_arr):
"""Converts input arrays of data into tensors ready for training"""
x_train = torch.tensor(x_train_arr, dtype=torch.float)
x_test = torch.tensor(x_test_arr, dtype=torch.float)
y_train_dummies = pd.get_dummies(y_train_arr)
y_train = torch.tensor(y_train_dummies.values, dtype=torch.float)
y_test_dummies = pd.get_dummies(y_test_arr)
y_test = torch.tensor(y_test_dummies.values, dtype=torch.float)
return x_train, x_test, y_train, y_test
def __len__(self):
return len(self.x_train)
def __getitem__(self, index):
return self.x_train[index], self.y_train[index]
def to_dev(self, device):
"""Moves entire data set to specified device. Can be helpful in speeding up training times for small data sets (~60-100x improvement in speed)."""
self.x_train, self.y_train, self.x_test, self.y_test = self.x_train.to(device), self.y_train.to(
device), self.x_test.to(device), self.y_test.to(device)
self.device = device
def get_dev(self):
return self.x_train.device | 0.776581 | 0.514827 |
from initial import gen, undirected
from bijective import is_bijective
from itertools import permutations as per
from test import value_nonl
from datetime import datetime
from json import dumps
from data import limit1, limit
def getfilename():
"""
Returns the current timestamp and will be used as filename.
"""
timestamp = str(datetime.now())[:-7]
return timestamp.replace(' ', '-')
def travelling(all_perms, array, num):
"""
"""
global graph
graph = undirected(array, num)
cost_path = []
paths = {}
for perm in all_perms:
tupl = perm + (perm[0], )
path_val = cost(graph, tupl)
if path_val not in cost_path:
cost_path.append(path_val)
if path_val not in paths:
paths[path_val] = tupl
return cost_path, paths, graph
def dist(graph, i, j):
"""
Returns the weight of edge from i -> j or vice-versa.
The graph is undirected.
Paramters
---------
i : int
j : int
graph : Dict
"""
try:
i, j = i % 8, j % 8
if i < j:
return graph[i][j-i-1]
elif j < i:
return graph[j][i-j-1]
else:
return 0
except:
print(i, j)
exit()
def substitution(all_perms, array, array_mod, num):
"""
"""
cost_path, paths, graph = travelling(all_perms, array, num)
for index in paths[min(cost_path)]:
if array[8*num + index] not in array_mod:
array_mod.append(array[8*num + index])
return array_mod, graph
def cost(graph, set_Vertices):
"""
Returns cost of the minimum cost path visiting each vertex in set
set_Vertices exactly once, starting at 0 and ending at node.
Paramters
---------
set_Vertices : set
A set of vertices of graph.
node : int
The vertex to which we need minimum cost.
"""
cost_path = 0
for node in range(len(set_Vertices)-1):
cost_path += dist(graph, set_Vertices[node], set_Vertices[node+1])
return cost_path
if __name__ == '__main__':
graphs = []
initial_non = value_nonl(gen())
# dict conatining sbox
non_sbox = {initial_non: gen()}
all_perms = list(per(range(8)))
array = gen()
if is_bijective(array):
for var in range(10):
array_mod = []
for num in range(32):
array_mod, graph = substitution(
all_perms, array, array_mod, num)
graphs.append(graph)
# calculate non-linearity of modified Sbox
nn_array_mod = value_nonl(array_mod)
if nn_array_mod > limit1:
non_sbox[nn_array_mod] = [array_mod, graphs]
print(var, value_nonl(array), nn_array_mod)
else:
print('Is not bijective!')
if max(non_sbox) > limit:
with open('data/part-1/'+getfilename(), 'a') as f:
f.write(dumps(non_sbox))
print(non_sbox.keys(), max(non_sbox.keys())) | travel.py | from initial import gen, undirected
from bijective import is_bijective
from itertools import permutations as per
from test import value_nonl
from datetime import datetime
from json import dumps
from data import limit1, limit
def getfilename():
"""
Returns the current timestamp and will be used as filename.
"""
timestamp = str(datetime.now())[:-7]
return timestamp.replace(' ', '-')
def travelling(all_perms, array, num):
"""
"""
global graph
graph = undirected(array, num)
cost_path = []
paths = {}
for perm in all_perms:
tupl = perm + (perm[0], )
path_val = cost(graph, tupl)
if path_val not in cost_path:
cost_path.append(path_val)
if path_val not in paths:
paths[path_val] = tupl
return cost_path, paths, graph
def dist(graph, i, j):
"""
Returns the weight of edge from i -> j or vice-versa.
The graph is undirected.
Paramters
---------
i : int
j : int
graph : Dict
"""
try:
i, j = i % 8, j % 8
if i < j:
return graph[i][j-i-1]
elif j < i:
return graph[j][i-j-1]
else:
return 0
except:
print(i, j)
exit()
def substitution(all_perms, array, array_mod, num):
"""
"""
cost_path, paths, graph = travelling(all_perms, array, num)
for index in paths[min(cost_path)]:
if array[8*num + index] not in array_mod:
array_mod.append(array[8*num + index])
return array_mod, graph
def cost(graph, set_Vertices):
"""
Returns cost of the minimum cost path visiting each vertex in set
set_Vertices exactly once, starting at 0 and ending at node.
Paramters
---------
set_Vertices : set
A set of vertices of graph.
node : int
The vertex to which we need minimum cost.
"""
cost_path = 0
for node in range(len(set_Vertices)-1):
cost_path += dist(graph, set_Vertices[node], set_Vertices[node+1])
return cost_path
if __name__ == '__main__':
graphs = []
initial_non = value_nonl(gen())
# dict conatining sbox
non_sbox = {initial_non: gen()}
all_perms = list(per(range(8)))
array = gen()
if is_bijective(array):
for var in range(10):
array_mod = []
for num in range(32):
array_mod, graph = substitution(
all_perms, array, array_mod, num)
graphs.append(graph)
# calculate non-linearity of modified Sbox
nn_array_mod = value_nonl(array_mod)
if nn_array_mod > limit1:
non_sbox[nn_array_mod] = [array_mod, graphs]
print(var, value_nonl(array), nn_array_mod)
else:
print('Is not bijective!')
if max(non_sbox) > limit:
with open('data/part-1/'+getfilename(), 'a') as f:
f.write(dumps(non_sbox))
print(non_sbox.keys(), max(non_sbox.keys())) | 0.540681 | 0.419291 |
import argparse
import os
import random
import sys
from enum import Enum
from collections import deque
from ezcode.heap import PriorityMap
class Square:
class State(Enum):
Void = 0
Obstacle = 1
Path = 2
Searched = 3
colors = [
"\033[107m", # White 0 - Void
"\033[41m", # Red 1 - Obstacle
"\033[42m", # Green 2 - Path
"\033[43m", # Yellow 3 - Searched
"\033[0m", # Reset 4
]
characters = [
". ", # 0 - Void
"@ ", # 1 - Obstacle
"+ ", # 2 - Path
"S ", # 3 - Searched
]
def __init__(self, state, size: int = 2, text_only=False):
self.state = state
self.size = size
self.text_only = text_only
def __str__(self):
if self.text_only:
return Square.characters[self.state.value]
return Square.colors[self.state.value] + " " * self.size + Square.colors[-1]
class Maze:
def __init__(self, row: int = 10, col: int = 10, obstacle_percentage=0.1, text_only=False, show_searched=False):
self.row_len = row
self.col_len = col
self.maze = None
self.obstacle_percentage = obstacle_percentage
self.text_only = text_only
self.show_searched = show_searched
def build_maze(self, maze=None):
if maze is None:
self.maze = [[None for _ in range(self.col_len)] for _ in range(self.row_len)]
obstacles = self.row_len * self.col_len * self.obstacle_percentage
for row in range(self.row_len):
for col in range(self.col_len):
rand = random.randrange(self.row_len * self.col_len)
self.maze[row][col] = Square.State.Obstacle if rand < obstacles else Square.State.Void
else:
self.row_len, self.col_len = len(maze), len(maze[0])
self.maze = [[None for _ in range(self.col_len)] for _ in range(self.row_len)]
for row in range(self.row_len):
for col in range(self.col_len):
self.maze[row][col] = Square.State(maze[row][col])
def copy_maze(self):
maze_copy = [[None for _ in range(self.col_len)] for _ in range(self.row_len)]
for row in range(self.row_len):
for col in range(self.col_len):
maze_copy[row][col] = self.maze[row][col]
return maze_copy
def print_maze(self, maze, clear=False):
if clear:
os.system("clear")
print()
for row in range(len(maze)):
print(" ", end="")
for col in range(len(maze[row])):
print(Square(state=maze[row][col], text_only=self.text_only), end="")
print()
print()
def validate_selection(self, selection: str):
if selection == "exit":
sys.exit()
numbers = selection.split(",")
if len(numbers) != 2:
raise ValueError(f"[Error] Invalid delimiter: \"{selection}\"")
try:
row, col = int(numbers[0]), int(numbers[1])
except ValueError:
raise ValueError(f"[Error] Invalid selection: \"{selection}\"")
if row < 0 or row >= self.row_len:
raise ValueError(f"[Error] Invalid row: \"{row}\"")
if col < 0 or col >= self.col_len:
raise ValueError(f"[Error] Invalid column: \"{col}\"")
if self.maze[row][col] == Square.State.Obstacle:
raise ValueError(f"[Error] [{row}][{col}] is occupied!")
return (row, col)
def prompt_for_selection(self, name):
while True:
prompt = f"Select {name} ([0 ~ {self.row_len - 1}],[0 ~ {self.col_len - 1}]): "
try:
return self.validate_selection(input(prompt))
except ValueError as e:
print(e)
def approachable_neighbors(self, node) -> list:
row, col = node
neighbor_list = list()
if row > 0 and self.maze[row - 1][col] == Square.State.Void:
neighbor_list.append((row - 1, col))
if col > 0 and self.maze[row][col - 1] == Square.State.Void:
neighbor_list.append((row, col - 1))
if row + 1 < self.row_len and self.maze[row + 1][col] == Square.State.Void:
neighbor_list.append((row + 1, col))
if col + 1 < self.col_len and self.maze[row][col + 1] == Square.State.Void:
neighbor_list.append((row, col + 1))
return neighbor_list
def path_dict_to_path_list(self, path_dict, destination):
path_list = list([destination])
parent = path_dict[destination]
while parent:
path_list.append(parent)
parent = path_dict[parent] if parent in path_dict else None
return path_list[::-1]
"""
Path finding algorithms Summary:
Shortest Path Searched Area f_value
bfs no larger h_value >> g_value
dfs yes largest N/A
dijkstra yes larger h_value =0
A* yes small g_value + h_value
Notes:
A* f_value = g_value + h_value
The more accurate we can estimate the path length from a node to destination (h_value), the faster A* can run.
If h_value = 0, which means we don't give any estimation, it becomes Dijkstra, the lower h_value the more nodes to expand
If h_value is the same as real value, A* won't expand any node and only follow the shortest path
If h_value is larger than real value, A* won't guarantee the shortest path but it can run faster
If h_value >> g_value, which means we trust the heuristic path length, it becomes bfs and does not guarantee the shortest path
The heuristic path length must keep the same order as the real ones
e.g. if a > b then h_a > h_b
"""
def dfs(self, source, destination):
"""
candidates is a Stack
searched nodes will not be revisited
does not guarantee the shortest path
"""
path_dict, searched, candidates = dict(), set([source]), list() # path_dict = {child: parent}
candidates.append(source)
while len(candidates) > 0:
node = candidates.pop()
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in searched:
searched.add(neighbor)
candidates.append(neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def bfs(self, source, destination):
"""
candidates is a Queue
searched nodes will not be revisited
"""
path_dict, searched, candidates = dict(), set([source]), deque() # path_dict = {child: parent}
candidates.append(source)
while len(candidates) > 0:
node = candidates.popleft()
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in searched:
searched.add(neighbor)
candidates.append(neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def dijkstra(self, source, destination):
"""
candidates is a Priority Map
searched nodes can be put into candidates again
"""
path_dict, visited, searched, candidates = dict(), set(), set([source]), PriorityMap(min_heap=True) # path_dict = {child: parent}
g_values = {source: 0} # g_value: path cost to source
candidates.push(0, source) # priority = g_value
while len(candidates) > 0:
_, node = candidates.pop()
visited.add(node)
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in visited:
searched.add(neighbor)
if neighbor not in g_values:
g_values[neighbor] = float("inf")
g_values[neighbor] = min(g_values[neighbor], g_values[node] + 1)
candidates.push(g_values[neighbor], neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def a_star(self, source, destination):
"""
candidates is a Priority Map
searched nodes can be put into candidates again
h_value = 0, it becomes dijkstra which is slower than A*
h_value >> g_value, it becomes bfs which does not guarantee the shortest path
"""
def manhattan_distance(source, destination):
return abs(source[0] - destination[0]) + abs(source[1] - destination[1])
path_dict, visited, searched, candidates = dict(), set(), set([source]), PriorityMap(min_heap=True) # path_dict = {child: parent}
g_values = {source: 0} # g_value: path cost to source
h_value = manhattan_distance(source, destination) # h_value: huristic estimate of the path cost to destination
f_value = g_values[source] + h_value # f_value: g_value + h_value
candidates.push(f_value, source) # priority = f_value
while len(candidates) > 0:
_, node = candidates.pop()
visited.add(node)
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in visited:
searched.add(neighbor)
if neighbor not in g_values:
g_values[neighbor] = float("inf")
g_values[neighbor] = min(g_values[neighbor], g_values[node] + 1)
f_value = g_values[neighbor] + manhattan_distance(source, destination)
candidates.push(f_value, neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def update_maze(self, maze, path, searched):
if self.show_searched:
for node in searched:
maze[node[0]][node[1]] = Square.State.Searched
for node in path:
maze[node[0]][node[1]] = Square.State.Path
return maze
def run(self, maze=None):
# maze = [
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# ]
# source, destination = (35,29), (33,29)
self.build_maze(maze)
self.print_maze(self.maze)
source = self.prompt_for_selection("start point")
destination = self.prompt_for_selection(" end point")
path, searched = self.dfs(source, destination)
print(f"BFS - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
path, searched = self.bfs(source, destination)
print(f"DFS - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
path, searched = self.dijkstra(source, destination)
print(f"Dijkstra - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
path, searched = self.a_star(source, destination)
print(f"A* - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
parser = argparse.ArgumentParser(description="Maze")
parser.add_argument("-r", "--row", dest="row", type=int, default=10, help="Number of rows")
parser.add_argument("-c", "--column", dest="col", type=int, default=10, help="Number of columns")
parser.add_argument("-t", "--text-only", dest="text_only", action="store_true", default=False, help="Print Map in Text")
parser.add_argument("-s", "--show-searched-area", dest="show_searched", action="store_true", default=False)
parser.add_argument("-o", "--obstacles-percentage", dest="op", type=float, default=0.1)
args = parser.parse_args()
if __name__ == "__main__":
Maze(row=args.row, col=args.col, obstacle_percentage=args.op, text_only=args.text_only, show_searched=args.show_searched).run() | src/ezcode/matrix/maze.py | import argparse
import os
import random
import sys
from enum import Enum
from collections import deque
from ezcode.heap import PriorityMap
class Square:
class State(Enum):
Void = 0
Obstacle = 1
Path = 2
Searched = 3
colors = [
"\033[107m", # White 0 - Void
"\033[41m", # Red 1 - Obstacle
"\033[42m", # Green 2 - Path
"\033[43m", # Yellow 3 - Searched
"\033[0m", # Reset 4
]
characters = [
". ", # 0 - Void
"@ ", # 1 - Obstacle
"+ ", # 2 - Path
"S ", # 3 - Searched
]
def __init__(self, state, size: int = 2, text_only=False):
self.state = state
self.size = size
self.text_only = text_only
def __str__(self):
if self.text_only:
return Square.characters[self.state.value]
return Square.colors[self.state.value] + " " * self.size + Square.colors[-1]
class Maze:
def __init__(self, row: int = 10, col: int = 10, obstacle_percentage=0.1, text_only=False, show_searched=False):
self.row_len = row
self.col_len = col
self.maze = None
self.obstacle_percentage = obstacle_percentage
self.text_only = text_only
self.show_searched = show_searched
def build_maze(self, maze=None):
if maze is None:
self.maze = [[None for _ in range(self.col_len)] for _ in range(self.row_len)]
obstacles = self.row_len * self.col_len * self.obstacle_percentage
for row in range(self.row_len):
for col in range(self.col_len):
rand = random.randrange(self.row_len * self.col_len)
self.maze[row][col] = Square.State.Obstacle if rand < obstacles else Square.State.Void
else:
self.row_len, self.col_len = len(maze), len(maze[0])
self.maze = [[None for _ in range(self.col_len)] for _ in range(self.row_len)]
for row in range(self.row_len):
for col in range(self.col_len):
self.maze[row][col] = Square.State(maze[row][col])
def copy_maze(self):
maze_copy = [[None for _ in range(self.col_len)] for _ in range(self.row_len)]
for row in range(self.row_len):
for col in range(self.col_len):
maze_copy[row][col] = self.maze[row][col]
return maze_copy
def print_maze(self, maze, clear=False):
if clear:
os.system("clear")
print()
for row in range(len(maze)):
print(" ", end="")
for col in range(len(maze[row])):
print(Square(state=maze[row][col], text_only=self.text_only), end="")
print()
print()
def validate_selection(self, selection: str):
if selection == "exit":
sys.exit()
numbers = selection.split(",")
if len(numbers) != 2:
raise ValueError(f"[Error] Invalid delimiter: \"{selection}\"")
try:
row, col = int(numbers[0]), int(numbers[1])
except ValueError:
raise ValueError(f"[Error] Invalid selection: \"{selection}\"")
if row < 0 or row >= self.row_len:
raise ValueError(f"[Error] Invalid row: \"{row}\"")
if col < 0 or col >= self.col_len:
raise ValueError(f"[Error] Invalid column: \"{col}\"")
if self.maze[row][col] == Square.State.Obstacle:
raise ValueError(f"[Error] [{row}][{col}] is occupied!")
return (row, col)
def prompt_for_selection(self, name):
while True:
prompt = f"Select {name} ([0 ~ {self.row_len - 1}],[0 ~ {self.col_len - 1}]): "
try:
return self.validate_selection(input(prompt))
except ValueError as e:
print(e)
def approachable_neighbors(self, node) -> list:
row, col = node
neighbor_list = list()
if row > 0 and self.maze[row - 1][col] == Square.State.Void:
neighbor_list.append((row - 1, col))
if col > 0 and self.maze[row][col - 1] == Square.State.Void:
neighbor_list.append((row, col - 1))
if row + 1 < self.row_len and self.maze[row + 1][col] == Square.State.Void:
neighbor_list.append((row + 1, col))
if col + 1 < self.col_len and self.maze[row][col + 1] == Square.State.Void:
neighbor_list.append((row, col + 1))
return neighbor_list
def path_dict_to_path_list(self, path_dict, destination):
path_list = list([destination])
parent = path_dict[destination]
while parent:
path_list.append(parent)
parent = path_dict[parent] if parent in path_dict else None
return path_list[::-1]
"""
Path finding algorithms Summary:
Shortest Path Searched Area f_value
bfs no larger h_value >> g_value
dfs yes largest N/A
dijkstra yes larger h_value =0
A* yes small g_value + h_value
Notes:
A* f_value = g_value + h_value
The more accurate we can estimate the path length from a node to destination (h_value), the faster A* can run.
If h_value = 0, which means we don't give any estimation, it becomes Dijkstra, the lower h_value the more nodes to expand
If h_value is the same as real value, A* won't expand any node and only follow the shortest path
If h_value is larger than real value, A* won't guarantee the shortest path but it can run faster
If h_value >> g_value, which means we trust the heuristic path length, it becomes bfs and does not guarantee the shortest path
The heuristic path length must keep the same order as the real ones
e.g. if a > b then h_a > h_b
"""
def dfs(self, source, destination):
"""
candidates is a Stack
searched nodes will not be revisited
does not guarantee the shortest path
"""
path_dict, searched, candidates = dict(), set([source]), list() # path_dict = {child: parent}
candidates.append(source)
while len(candidates) > 0:
node = candidates.pop()
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in searched:
searched.add(neighbor)
candidates.append(neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def bfs(self, source, destination):
"""
candidates is a Queue
searched nodes will not be revisited
"""
path_dict, searched, candidates = dict(), set([source]), deque() # path_dict = {child: parent}
candidates.append(source)
while len(candidates) > 0:
node = candidates.popleft()
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in searched:
searched.add(neighbor)
candidates.append(neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def dijkstra(self, source, destination):
"""
candidates is a Priority Map
searched nodes can be put into candidates again
"""
path_dict, visited, searched, candidates = dict(), set(), set([source]), PriorityMap(min_heap=True) # path_dict = {child: parent}
g_values = {source: 0} # g_value: path cost to source
candidates.push(0, source) # priority = g_value
while len(candidates) > 0:
_, node = candidates.pop()
visited.add(node)
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in visited:
searched.add(neighbor)
if neighbor not in g_values:
g_values[neighbor] = float("inf")
g_values[neighbor] = min(g_values[neighbor], g_values[node] + 1)
candidates.push(g_values[neighbor], neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def a_star(self, source, destination):
"""
candidates is a Priority Map
searched nodes can be put into candidates again
h_value = 0, it becomes dijkstra which is slower than A*
h_value >> g_value, it becomes bfs which does not guarantee the shortest path
"""
def manhattan_distance(source, destination):
return abs(source[0] - destination[0]) + abs(source[1] - destination[1])
path_dict, visited, searched, candidates = dict(), set(), set([source]), PriorityMap(min_heap=True) # path_dict = {child: parent}
g_values = {source: 0} # g_value: path cost to source
h_value = manhattan_distance(source, destination) # h_value: huristic estimate of the path cost to destination
f_value = g_values[source] + h_value # f_value: g_value + h_value
candidates.push(f_value, source) # priority = f_value
while len(candidates) > 0:
_, node = candidates.pop()
visited.add(node)
for neighbor in self.approachable_neighbors(node):
if neighbor == destination:
searched.add(neighbor)
path_dict[destination] = node
return self.path_dict_to_path_list(path_dict, destination), searched
elif neighbor not in visited:
searched.add(neighbor)
if neighbor not in g_values:
g_values[neighbor] = float("inf")
g_values[neighbor] = min(g_values[neighbor], g_values[node] + 1)
f_value = g_values[neighbor] + manhattan_distance(source, destination)
candidates.push(f_value, neighbor)
path_dict[neighbor] = node
return self.path_dict_to_path_list(path_dict, destination), searched
def update_maze(self, maze, path, searched):
if self.show_searched:
for node in searched:
maze[node[0]][node[1]] = Square.State.Searched
for node in path:
maze[node[0]][node[1]] = Square.State.Path
return maze
def run(self, maze=None):
# maze = [
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# ]
# source, destination = (35,29), (33,29)
self.build_maze(maze)
self.print_maze(self.maze)
source = self.prompt_for_selection("start point")
destination = self.prompt_for_selection(" end point")
path, searched = self.dfs(source, destination)
print(f"BFS - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
path, searched = self.bfs(source, destination)
print(f"DFS - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
path, searched = self.dijkstra(source, destination)
print(f"Dijkstra - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
path, searched = self.a_star(source, destination)
print(f"A* - path: {len(path)}, searched area: {len(searched)}")
self.print_maze(self.update_maze(self.copy_maze(), path, searched))
parser = argparse.ArgumentParser(description="Maze")
parser.add_argument("-r", "--row", dest="row", type=int, default=10, help="Number of rows")
parser.add_argument("-c", "--column", dest="col", type=int, default=10, help="Number of columns")
parser.add_argument("-t", "--text-only", dest="text_only", action="store_true", default=False, help="Print Map in Text")
parser.add_argument("-s", "--show-searched-area", dest="show_searched", action="store_true", default=False)
parser.add_argument("-o", "--obstacles-percentage", dest="op", type=float, default=0.1)
args = parser.parse_args()
if __name__ == "__main__":
Maze(row=args.row, col=args.col, obstacle_percentage=args.op, text_only=args.text_only, show_searched=args.show_searched).run() | 0.415136 | 0.194291 |
import os
from pymongo import MongoClient
from bson.objectid import ObjectId
import datetime
import psycopg2
import psycopg2.extras
from collections import OrderedDict
from yuntu.core.datastore.utils import hashDict
def datastoreGetSpec(ds):
dSpec = {}
dSpec["hash"] = ds.getHash()
dSpec["type"] = ds.getType()
dSpec["conf"] = ds.getConf()
dSpec["metadata"] = ds.getMetadata()
return dSpec
def datastoreGetType(ds):
return ds.inputSpec["type"]
def datastoreGetConf(ds):
dConf = {}
for key in ds.inputSpec["conf"]:
dConf[key] = ds.inputSpec["conf"][key]
return dConf
def datastoreGetMetadata(ds):
return ds.inputSpec["metadata"]
def datastoreGetHash(ds):
formatedConf = ds.getConf()
return hashDict(formatedConf)
def datastorePostgresqlGetData(ds):
def f(dsSpec):
dsConf = dsSpec["conf"]
conn = psycopg2.connect("dbname='"+dsConf["datastore"]+"' user='"+dsConf["user"]+"' host='"+dsConf["host"]+"' password='"+dsConf["password"]+"'")
cur = conn.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute(dsConf["target"])
for row in cur:
obj = {}
fkey = str(row[dsConf["ukey"]])
for key in row.keys():
obj[key] = row[key]
obj[dsConf["ukey"]] = fkey
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
return f(ds.getSpec())
def datastoreMongodbGetData(ds):
def f(dsSpec):
dsConf = dsSpec["conf"]
client = MongoClient(dsConf["host"],maxPoolSize = 30)
mDb = client[dsConf["datastore"]]
collection = mDb[dsConf["target"]]
if isinstance(dsConf["filter"],list):
for rId in dsConf["filter"]:
obj = collection.find_one({"_id":ObjectId(rId)})
fkey = str(obj[dsConf["ukey"]])
obj[dsConf["ukey"]] = fkey
for key in obj:
if isinstance(obj[key],ObjectId):
obj[key] = str(obj[key])
elif isinstance(obj[key],dict):
for dkey in obj[key]:
if isinstance(obj[key][dkey],ObjectId):
obj[key][dkey] = str(obj[key][dkey])
elif isinstance(obj[key][dkey],dict):
for tkey in obj[key][dkey]:
if isinstance(obj[key][dkey][tkey],ObjectId):
obj[key][dkey][tkey] = str(obj[key][dkey][tkey])
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
else:
for obj in collection.find(dsConf["filter"],dsConf["fields"]):
fkey = str(obj[dsConf["ukey"]])
obj[dsConf["ukey"]] = fkey
for key in obj:
if isinstance(obj[key],ObjectId):
obj[key] = str(obj[key])
elif isinstance(obj[key],dict):
for dkey in obj[key]:
if isinstance(obj[key][dkey],ObjectId):
obj[key][dkey] = str(obj[key][dkey])
elif isinstance(obj[key][dkey],dict):
for tkey in obj[key][dkey]:
if isinstance(obj[key][dkey][tkey],ObjectId):
obj[key][dkey][tkey] = str(obj[key][dkey][tkey])
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
return f(ds.getSpec())
def datastoreAudioMothGetData(ds):
def f(dsSpec):
dsConf = dsSpec["conf"]
dataDir = dsConf["dataDir"]
allFiles = []
for filename in os.listdir(dsConf["dataDir"]):
if filename.endswith(".wav") or filename.endswith(".WAV"):
allFiles.append(filename)
for i in range(len(allFiles)):
fkey = allFiles[i]
obj = {}
obj["path"] = os.path.join(dsConf["dataDir"],fkey)
with open(obj["path"], 'rb') as file:
buf_header = file.read(200)
try:
obj["voltage"] = float(buf_header[166:169])
obj["time"] = buf_header[68:87].decode("utf-8")
obj["tZone"] = buf_header[89:92].decode("utf-8")
if "-" in buf_header[84:94].decode("utf-8"):
obj["tZone"] = buf_header[84:94].decode("utf-8")
obj["device_id"] = buf_header[107:123].decode("utf-8")
obj["gain"] = float(buf_header[140:141])
except:
obj["voltage"] = float(buf_header[168:171])
obj["time"] = buf_header[68:87].decode("utf-8")
obj["tZone"] = buf_header[89:92].decode("utf-8")
if "-" in buf_header[84:94].decode("utf-8"):
obj["tZone"] = buf_header[84:94].decode("utf-8")
obj["device_id"] = buf_header[109:125].decode("utf-8")
obj["gain"] = float(buf_header[142:143])
file.close()
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
return f(ds.getSpec())
def datastoreDirectGetData(ds):
def f(dsSpec,dataArr):
for i in range(len(dataArr)):
yield {"datastore":dsSpec,"source":{"fkey":i},"metadata":dataArr[i]}
return f(ds.getSpec(),ds.dataArr) | yuntu/core/datastore/methods.py | import os
from pymongo import MongoClient
from bson.objectid import ObjectId
import datetime
import psycopg2
import psycopg2.extras
from collections import OrderedDict
from yuntu.core.datastore.utils import hashDict
def datastoreGetSpec(ds):
dSpec = {}
dSpec["hash"] = ds.getHash()
dSpec["type"] = ds.getType()
dSpec["conf"] = ds.getConf()
dSpec["metadata"] = ds.getMetadata()
return dSpec
def datastoreGetType(ds):
return ds.inputSpec["type"]
def datastoreGetConf(ds):
dConf = {}
for key in ds.inputSpec["conf"]:
dConf[key] = ds.inputSpec["conf"][key]
return dConf
def datastoreGetMetadata(ds):
return ds.inputSpec["metadata"]
def datastoreGetHash(ds):
formatedConf = ds.getConf()
return hashDict(formatedConf)
def datastorePostgresqlGetData(ds):
def f(dsSpec):
dsConf = dsSpec["conf"]
conn = psycopg2.connect("dbname='"+dsConf["datastore"]+"' user='"+dsConf["user"]+"' host='"+dsConf["host"]+"' password='"+dsConf["password"]+"'")
cur = conn.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute(dsConf["target"])
for row in cur:
obj = {}
fkey = str(row[dsConf["ukey"]])
for key in row.keys():
obj[key] = row[key]
obj[dsConf["ukey"]] = fkey
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
return f(ds.getSpec())
def datastoreMongodbGetData(ds):
def f(dsSpec):
dsConf = dsSpec["conf"]
client = MongoClient(dsConf["host"],maxPoolSize = 30)
mDb = client[dsConf["datastore"]]
collection = mDb[dsConf["target"]]
if isinstance(dsConf["filter"],list):
for rId in dsConf["filter"]:
obj = collection.find_one({"_id":ObjectId(rId)})
fkey = str(obj[dsConf["ukey"]])
obj[dsConf["ukey"]] = fkey
for key in obj:
if isinstance(obj[key],ObjectId):
obj[key] = str(obj[key])
elif isinstance(obj[key],dict):
for dkey in obj[key]:
if isinstance(obj[key][dkey],ObjectId):
obj[key][dkey] = str(obj[key][dkey])
elif isinstance(obj[key][dkey],dict):
for tkey in obj[key][dkey]:
if isinstance(obj[key][dkey][tkey],ObjectId):
obj[key][dkey][tkey] = str(obj[key][dkey][tkey])
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
else:
for obj in collection.find(dsConf["filter"],dsConf["fields"]):
fkey = str(obj[dsConf["ukey"]])
obj[dsConf["ukey"]] = fkey
for key in obj:
if isinstance(obj[key],ObjectId):
obj[key] = str(obj[key])
elif isinstance(obj[key],dict):
for dkey in obj[key]:
if isinstance(obj[key][dkey],ObjectId):
obj[key][dkey] = str(obj[key][dkey])
elif isinstance(obj[key][dkey],dict):
for tkey in obj[key][dkey]:
if isinstance(obj[key][dkey][tkey],ObjectId):
obj[key][dkey][tkey] = str(obj[key][dkey][tkey])
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
return f(ds.getSpec())
def datastoreAudioMothGetData(ds):
def f(dsSpec):
dsConf = dsSpec["conf"]
dataDir = dsConf["dataDir"]
allFiles = []
for filename in os.listdir(dsConf["dataDir"]):
if filename.endswith(".wav") or filename.endswith(".WAV"):
allFiles.append(filename)
for i in range(len(allFiles)):
fkey = allFiles[i]
obj = {}
obj["path"] = os.path.join(dsConf["dataDir"],fkey)
with open(obj["path"], 'rb') as file:
buf_header = file.read(200)
try:
obj["voltage"] = float(buf_header[166:169])
obj["time"] = buf_header[68:87].decode("utf-8")
obj["tZone"] = buf_header[89:92].decode("utf-8")
if "-" in buf_header[84:94].decode("utf-8"):
obj["tZone"] = buf_header[84:94].decode("utf-8")
obj["device_id"] = buf_header[107:123].decode("utf-8")
obj["gain"] = float(buf_header[140:141])
except:
obj["voltage"] = float(buf_header[168:171])
obj["time"] = buf_header[68:87].decode("utf-8")
obj["tZone"] = buf_header[89:92].decode("utf-8")
if "-" in buf_header[84:94].decode("utf-8"):
obj["tZone"] = buf_header[84:94].decode("utf-8")
obj["device_id"] = buf_header[109:125].decode("utf-8")
obj["gain"] = float(buf_header[142:143])
file.close()
yield {"datastore":dsSpec, "source":{"fkey":fkey},"metadata":obj}
return f(ds.getSpec())
def datastoreDirectGetData(ds):
def f(dsSpec,dataArr):
for i in range(len(dataArr)):
yield {"datastore":dsSpec,"source":{"fkey":i},"metadata":dataArr[i]}
return f(ds.getSpec(),ds.dataArr) | 0.312685 | 0.122418 |
from manual_test.manual_test_base import \
ManualTestBase, \
handle_command_line, \
CLEAN_SERVER_RECORD_TYPE, \
POPULATED_SERVER_RECORD_TYPE
from manual_test.utilities.notification_utilities import NotificationUtilities
from manual_test.utilities.workspace_utilities import WorkspaceUtilities
from typing import Any, Dict, List, Optional
SERVICE_NAME = 'TagRuleEngine'
TAG_RULE_DATABASE_NAME = 'nitagrule'
TEST_NAME = 'TagRuleMigrationTest'
CREATE_TAG_RULE_ROUTE = 'nitagrule/v1/rules'
QUERY_TAG_RULES_ROUTE = 'nitagrule/v1/query-rules'
class TestTagRule(ManualTestBase):
def populate_data(self) -> None:
notification_strategy_id = self.__create_test_notification_strategy()
workspace_utilities = WorkspaceUtilities()
workspace_utilities.create_workspace_for_test(self)
for workspace_id in workspace_utilities.get_workspaces(self):
self.__create_test_rules(workspace_id, notification_strategy_id)
self.record_json_data(
SERVICE_NAME,
TAG_RULE_DATABASE_NAME,
POPULATED_SERVER_RECORD_TYPE,
self.__get_all_rules()
)
def record_initial_data(self) -> None:
self.record_json_data(SERVICE_NAME, TAG_RULE_DATABASE_NAME, CLEAN_SERVER_RECORD_TYPE, self.__get_all_rules())
def validate_data(self) -> None:
source_service_snapshot = self.read_recorded_json_data(
SERVICE_NAME,
TAG_RULE_DATABASE_NAME,
POPULATED_SERVER_RECORD_TYPE,
required=True)
target_service_snaphot = self.read_recorded_json_data(
SERVICE_NAME,
TAG_RULE_DATABASE_NAME,
CLEAN_SERVER_RECORD_TYPE,
required=False)
current_snapshot = self.__get_all_rules()
workspaces = WorkspaceUtilities().get_workspaces(self)
notification_strategies = NotificationUtilities().get_all_notification_strategies(self)
migrated_record_count = 0
for rule in current_snapshot:
expected_rule = self.find_record_with_matching_id(rule, source_service_snapshot)
if expected_rule is not None:
self.__assert_rules_equal(expected_rule, rule)
self.__assert_rule_has_valid_workspace(rule, workspaces)
self.__assert_rule_has_valid_notification_strategies(rule, notification_strategies)
migrated_record_count = migrated_record_count + 1
else:
# Verify items that are generated by the target version and not present in the source.
expected_rule = self.__find_rule_by_display_name(rule, target_service_snaphot)
assert expected_rule is not None
self.__assert_rules_equal(expected_rule, rule)
self.__assert_rule_has_valid_workspace(rule, workspaces)
self.__assert_rule_has_valid_notification_strategies(rule, notification_strategies)
assert len(source_service_snapshot) == migrated_record_count
def __get_all_rules(self) -> List[Dict[str, Any]]:
# NOTE: workspace="*" does not work as normal for the tag rule API. No value is used for all workspaces.
query: Dict[str, str] = {}
response = self.post(QUERY_TAG_RULES_ROUTE, json=query)
response.raise_for_status()
return response.json()['rules']
def __create_test_rules(self, workspace_id: str, notification_strategy_id: str):
self.__create_test_rule(workspace_id, notification_strategy_id, enabled=True)
self.__create_test_rule(workspace_id, notification_strategy_id, enabled=False)
def __create_test_rule(self, workspace_id: str, notification_strategy_id: str, enabled: bool):
state_description = 'Enabled' if enabled else 'Disabled'
rule = {
'searchPath': 'test.tag.for.tag.rule.migration',
'workspace': workspace_id,
'tagDataType': 'DOUBLE',
'conditions': [self.__build_test_rule_condition(notification_strategy_id)],
'disabled': not enabled,
'displayName': f'{state_description} Test Tag Rule',
'description': f'Test tag rule with state set to {state_description} for workspace {workspace_id}',
'alarmInstanceDisplayNameTemplate': 'Test Tag Rule Alarm',
'alarmInstanceDescriptionTempalte': 'Alarm created for testing migration of the Tag Rule Engine',
'keywords': [TEST_NAME],
'properties': {'forTest': 'True'}
}
response = self.post(CREATE_TAG_RULE_ROUTE, retries=self.build_default_400_retry(), json=rule)
response.raise_for_status()
def __build_test_rule_condition(self, notification_strategy_id: str) -> Dict[str, Any]:
return {
'setPoints': ['0'],
'comparator': 'LESS_THAN',
'deadband': '0',
'securityLevel': '2',
'notificationStrategyIds': [notification_strategy_id]
}
def __create_test_notification_strategy(self) -> str:
result = NotificationUtilities().create_simple_smtp_notification_strategy(
self,
f'Notification strategy for {TEST_NAME}',
'Test notification strategy')
return result['notification_strategy']['id']
def __assert_rules_equal(self, expected: Dict[str, Any], actual: Dict[str, Any]):
if self.__is_test_rule(expected):
assert expected == actual
else:
# Minimal checks for a rule we didn't create.
assert expected['displayName'] == actual['displayName']
def __assert_rule_has_valid_workspace(self, rule: Dict[str, Any], workspaces: List[str]):
matching_workspace = next((workspace for workspace in workspaces if workspace == rule['workspace']), None)
assert matching_workspace is not None
def __assert_rule_has_valid_notification_strategies(
self,
rule: Dict[str, Any],
notification_strategies: List[Dict[str, Any]]
):
for condition in rule['conditions']:
if self.__is_test_rule(rule):
assert len(condition['notificationStrategyIds']) > 0
for strategy_id in condition['notificationStrategyIds']:
matches = (strategy for strategy in notification_strategies if strategy['id'] == strategy_id)
assert next(matches, None) is not None
def __find_rule_by_display_name(
self,
rule: Dict[str, Any],
collection: List[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
return self.find_record_with_matching_property_value(rule, collection, 'displayName')
def __is_test_rule(self, rule: Dict[str, Any]) -> bool:
return 'forTest' in rule['properties']
if __name__ == '__main__':
handle_command_line(TestTagRule) | manual_test/test_tag_rule.py | from manual_test.manual_test_base import \
ManualTestBase, \
handle_command_line, \
CLEAN_SERVER_RECORD_TYPE, \
POPULATED_SERVER_RECORD_TYPE
from manual_test.utilities.notification_utilities import NotificationUtilities
from manual_test.utilities.workspace_utilities import WorkspaceUtilities
from typing import Any, Dict, List, Optional
SERVICE_NAME = 'TagRuleEngine'
TAG_RULE_DATABASE_NAME = 'nitagrule'
TEST_NAME = 'TagRuleMigrationTest'
CREATE_TAG_RULE_ROUTE = 'nitagrule/v1/rules'
QUERY_TAG_RULES_ROUTE = 'nitagrule/v1/query-rules'
class TestTagRule(ManualTestBase):
def populate_data(self) -> None:
notification_strategy_id = self.__create_test_notification_strategy()
workspace_utilities = WorkspaceUtilities()
workspace_utilities.create_workspace_for_test(self)
for workspace_id in workspace_utilities.get_workspaces(self):
self.__create_test_rules(workspace_id, notification_strategy_id)
self.record_json_data(
SERVICE_NAME,
TAG_RULE_DATABASE_NAME,
POPULATED_SERVER_RECORD_TYPE,
self.__get_all_rules()
)
def record_initial_data(self) -> None:
self.record_json_data(SERVICE_NAME, TAG_RULE_DATABASE_NAME, CLEAN_SERVER_RECORD_TYPE, self.__get_all_rules())
def validate_data(self) -> None:
source_service_snapshot = self.read_recorded_json_data(
SERVICE_NAME,
TAG_RULE_DATABASE_NAME,
POPULATED_SERVER_RECORD_TYPE,
required=True)
target_service_snaphot = self.read_recorded_json_data(
SERVICE_NAME,
TAG_RULE_DATABASE_NAME,
CLEAN_SERVER_RECORD_TYPE,
required=False)
current_snapshot = self.__get_all_rules()
workspaces = WorkspaceUtilities().get_workspaces(self)
notification_strategies = NotificationUtilities().get_all_notification_strategies(self)
migrated_record_count = 0
for rule in current_snapshot:
expected_rule = self.find_record_with_matching_id(rule, source_service_snapshot)
if expected_rule is not None:
self.__assert_rules_equal(expected_rule, rule)
self.__assert_rule_has_valid_workspace(rule, workspaces)
self.__assert_rule_has_valid_notification_strategies(rule, notification_strategies)
migrated_record_count = migrated_record_count + 1
else:
# Verify items that are generated by the target version and not present in the source.
expected_rule = self.__find_rule_by_display_name(rule, target_service_snaphot)
assert expected_rule is not None
self.__assert_rules_equal(expected_rule, rule)
self.__assert_rule_has_valid_workspace(rule, workspaces)
self.__assert_rule_has_valid_notification_strategies(rule, notification_strategies)
assert len(source_service_snapshot) == migrated_record_count
def __get_all_rules(self) -> List[Dict[str, Any]]:
# NOTE: workspace="*" does not work as normal for the tag rule API. No value is used for all workspaces.
query: Dict[str, str] = {}
response = self.post(QUERY_TAG_RULES_ROUTE, json=query)
response.raise_for_status()
return response.json()['rules']
def __create_test_rules(self, workspace_id: str, notification_strategy_id: str):
self.__create_test_rule(workspace_id, notification_strategy_id, enabled=True)
self.__create_test_rule(workspace_id, notification_strategy_id, enabled=False)
def __create_test_rule(self, workspace_id: str, notification_strategy_id: str, enabled: bool):
state_description = 'Enabled' if enabled else 'Disabled'
rule = {
'searchPath': 'test.tag.for.tag.rule.migration',
'workspace': workspace_id,
'tagDataType': 'DOUBLE',
'conditions': [self.__build_test_rule_condition(notification_strategy_id)],
'disabled': not enabled,
'displayName': f'{state_description} Test Tag Rule',
'description': f'Test tag rule with state set to {state_description} for workspace {workspace_id}',
'alarmInstanceDisplayNameTemplate': 'Test Tag Rule Alarm',
'alarmInstanceDescriptionTempalte': 'Alarm created for testing migration of the Tag Rule Engine',
'keywords': [TEST_NAME],
'properties': {'forTest': 'True'}
}
response = self.post(CREATE_TAG_RULE_ROUTE, retries=self.build_default_400_retry(), json=rule)
response.raise_for_status()
def __build_test_rule_condition(self, notification_strategy_id: str) -> Dict[str, Any]:
return {
'setPoints': ['0'],
'comparator': 'LESS_THAN',
'deadband': '0',
'securityLevel': '2',
'notificationStrategyIds': [notification_strategy_id]
}
def __create_test_notification_strategy(self) -> str:
result = NotificationUtilities().create_simple_smtp_notification_strategy(
self,
f'Notification strategy for {TEST_NAME}',
'Test notification strategy')
return result['notification_strategy']['id']
def __assert_rules_equal(self, expected: Dict[str, Any], actual: Dict[str, Any]):
if self.__is_test_rule(expected):
assert expected == actual
else:
# Minimal checks for a rule we didn't create.
assert expected['displayName'] == actual['displayName']
def __assert_rule_has_valid_workspace(self, rule: Dict[str, Any], workspaces: List[str]):
matching_workspace = next((workspace for workspace in workspaces if workspace == rule['workspace']), None)
assert matching_workspace is not None
def __assert_rule_has_valid_notification_strategies(
self,
rule: Dict[str, Any],
notification_strategies: List[Dict[str, Any]]
):
for condition in rule['conditions']:
if self.__is_test_rule(rule):
assert len(condition['notificationStrategyIds']) > 0
for strategy_id in condition['notificationStrategyIds']:
matches = (strategy for strategy in notification_strategies if strategy['id'] == strategy_id)
assert next(matches, None) is not None
def __find_rule_by_display_name(
self,
rule: Dict[str, Any],
collection: List[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
return self.find_record_with_matching_property_value(rule, collection, 'displayName')
def __is_test_rule(self, rule: Dict[str, Any]) -> bool:
return 'forTest' in rule['properties']
if __name__ == '__main__':
handle_command_line(TestTagRule) | 0.740174 | 0.291516 |
import sys, os, getopt, json
def main(argv):
layer_names = ['**conv1**',
'**relu1**',
'**pool1**',
'**lrn1**',
'**conv2**',
'**relu2**',
'**pool2**',
'**lrn2**',
'**conv3**',
'**relu3**',
'**conv4**',
'**relu4**',
'**conv5**',
'**relu5**',
'**pool5**',
'**fc6**',
'**relu6**',
'**drop6**',
'**fc7**',
'**relu7**',
'**drop7**',
'**fc8**',
'**prob**']
# Parse command line arguments
script_name = argv[0]
in_file_path = ''
out_file_path = ''
try:
opts, args = getopt.getopt(argv[1:],"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print script_name, '-i <in_file_path> -o <out_file_path>'
sys.exit(2)
if len(opts) < 2:
print script_name, '-i <in_file_path> -o <out_file_path>'
sys.exit()
for opt, arg in opts:
if opt == '-h':
print script_name, '-i <in_file_path> -o <out_file_path>'
sys.exit()
elif opt in ("-i", "--ifile"):
if not os.path.isfile(arg):
print 'Input file not found or not a regular file:', arg
sys.exit(2)
in_file_path = arg
elif opt in ("-o", "--ofile"):
out_dir = os.path.dirname(arg)
if out_dir != '' and not os.path.exists(out_dir):
print 'Output dir not found:', os.path.dirname(arg)
sys.exit(2)
out_file_path = arg
else:
print 'Unknown option', opt
sys.exit(2)
print 'Converting', in_file_path,'to', out_file_path
# Parse json input
with open(in_file_path) as in_file:
data = json.load(in_file)
# Open md file output
out_file = open(out_file_path, 'w')
# Separate caffe benchmarks from tiny-dnn
caffe_benchmarks = []
tiny_dnn_benchmarks = []
for benchmark in data['benchmarks']:
if 'CaffeLayerTest' in benchmark['name']:
caffe_benchmarks.append(benchmark)
elif 'TinyDNNLayerTest' in benchmark['name']:
tiny_dnn_benchmarks.append(benchmark)
# Validate number of Caffe and tiny-dnn benchmarks matches
if len(caffe_benchmarks) != len(tiny_dnn_benchmarks):
print 'Error: number of Caffe and tiny-dnn benchmarks must match'
print 'Caffe =', len(caffe_benchmarks), 'tiny-dnn =', len(tiny_dnn_benchmarks)
sys.exit(2)
# Write header
c = data['context']
out_file.write('### ' + caffe_benchmarks[0]['name'].split('/')[0] + ':\n-\n')
out_file.write('Date: **' + c['date'] + '** \n')
out_file.write('Threads: ' + "{0:.4f}".format(c['num_cpus']) + ' @ ' + "{0:.4f}".format(c['mhz_per_cpu']) + ' Mhz \n')
out_file.write('Build: ' + c['library_build_type'] + ' \n\n')
# Write benchmarks into a markdown table
out_file.write('| Layer | Caffe CPU | tiny-dnn CPU | Caffe time | tiny-dnn time |\n')
out_file.write(':---:| ---:| ---:| ---:| ---:\n')
for c, t in zip(caffe_benchmarks, tiny_dnn_benchmarks):
caffe_layer_idx = int(c['name'].split('/')[-1])
tiny_dnn_layer_idx = int(t['name'].split('/')[-1])
if caffe_layer_idx != tiny_dnn_layer_idx:
print 'Error: layer index of Caffe and tiny-dnn must match'
print 'Caffe =', caffe_layer_idx, 'tiny-dnn =', tiny_dnn_layer_idx
sys.exit(2)
out_file.write(layer_names[caffe_layer_idx-1] + ' | ' + "{0:.4f}".format(c['cpu_time'] / 1000000.0) + ' ms | ' + "{0:.4f}".format(t['cpu_time'] / 1000000.0) + ' ms | ' + "{0:.4f}".format(c['real_time'] / 1000000.0) + ' ms | ' + "{0:.4f}".format(t['real_time'] / 1000000.0) + ' ms\n')
if __name__ == '__main__':
main(sys.argv) | scripts/json2md.py |
import sys, os, getopt, json
def main(argv):
layer_names = ['**conv1**',
'**relu1**',
'**pool1**',
'**lrn1**',
'**conv2**',
'**relu2**',
'**pool2**',
'**lrn2**',
'**conv3**',
'**relu3**',
'**conv4**',
'**relu4**',
'**conv5**',
'**relu5**',
'**pool5**',
'**fc6**',
'**relu6**',
'**drop6**',
'**fc7**',
'**relu7**',
'**drop7**',
'**fc8**',
'**prob**']
# Parse command line arguments
script_name = argv[0]
in_file_path = ''
out_file_path = ''
try:
opts, args = getopt.getopt(argv[1:],"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print script_name, '-i <in_file_path> -o <out_file_path>'
sys.exit(2)
if len(opts) < 2:
print script_name, '-i <in_file_path> -o <out_file_path>'
sys.exit()
for opt, arg in opts:
if opt == '-h':
print script_name, '-i <in_file_path> -o <out_file_path>'
sys.exit()
elif opt in ("-i", "--ifile"):
if not os.path.isfile(arg):
print 'Input file not found or not a regular file:', arg
sys.exit(2)
in_file_path = arg
elif opt in ("-o", "--ofile"):
out_dir = os.path.dirname(arg)
if out_dir != '' and not os.path.exists(out_dir):
print 'Output dir not found:', os.path.dirname(arg)
sys.exit(2)
out_file_path = arg
else:
print 'Unknown option', opt
sys.exit(2)
print 'Converting', in_file_path,'to', out_file_path
# Parse json input
with open(in_file_path) as in_file:
data = json.load(in_file)
# Open md file output
out_file = open(out_file_path, 'w')
# Separate caffe benchmarks from tiny-dnn
caffe_benchmarks = []
tiny_dnn_benchmarks = []
for benchmark in data['benchmarks']:
if 'CaffeLayerTest' in benchmark['name']:
caffe_benchmarks.append(benchmark)
elif 'TinyDNNLayerTest' in benchmark['name']:
tiny_dnn_benchmarks.append(benchmark)
# Validate number of Caffe and tiny-dnn benchmarks matches
if len(caffe_benchmarks) != len(tiny_dnn_benchmarks):
print 'Error: number of Caffe and tiny-dnn benchmarks must match'
print 'Caffe =', len(caffe_benchmarks), 'tiny-dnn =', len(tiny_dnn_benchmarks)
sys.exit(2)
# Write header
c = data['context']
out_file.write('### ' + caffe_benchmarks[0]['name'].split('/')[0] + ':\n-\n')
out_file.write('Date: **' + c['date'] + '** \n')
out_file.write('Threads: ' + "{0:.4f}".format(c['num_cpus']) + ' @ ' + "{0:.4f}".format(c['mhz_per_cpu']) + ' Mhz \n')
out_file.write('Build: ' + c['library_build_type'] + ' \n\n')
# Write benchmarks into a markdown table
out_file.write('| Layer | Caffe CPU | tiny-dnn CPU | Caffe time | tiny-dnn time |\n')
out_file.write(':---:| ---:| ---:| ---:| ---:\n')
for c, t in zip(caffe_benchmarks, tiny_dnn_benchmarks):
caffe_layer_idx = int(c['name'].split('/')[-1])
tiny_dnn_layer_idx = int(t['name'].split('/')[-1])
if caffe_layer_idx != tiny_dnn_layer_idx:
print 'Error: layer index of Caffe and tiny-dnn must match'
print 'Caffe =', caffe_layer_idx, 'tiny-dnn =', tiny_dnn_layer_idx
sys.exit(2)
out_file.write(layer_names[caffe_layer_idx-1] + ' | ' + "{0:.4f}".format(c['cpu_time'] / 1000000.0) + ' ms | ' + "{0:.4f}".format(t['cpu_time'] / 1000000.0) + ' ms | ' + "{0:.4f}".format(c['real_time'] / 1000000.0) + ' ms | ' + "{0:.4f}".format(t['real_time'] / 1000000.0) + ' ms\n')
if __name__ == '__main__':
main(sys.argv) | 0.258794 | 0.084304 |
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from styleframe import StyleFrame, Styler, Container, Series, utils
class SeriesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pandas_series = pd.Series((None, 1))
cls.sf_series = Series((Container(None), Container(1)))
def test_isnull(self):
self.assertTrue(all(p_val == sf_val
for p_val, sf_val in zip(self.pandas_series.isnull(), self.sf_series.isnull())))
def test_notnull(self):
self.assertTrue(all(p_val == sf_val
for p_val, sf_val in zip(self.pandas_series.notnull(), self.sf_series.notnull())))
def test_style_accessor(self):
sf = StyleFrame({'a': list(range(10))})
sf.apply_style_by_indexes(sf[sf['a'] % 2 == 0], styler_obj=Styler(bold=True, bg_color=utils.colors.yellow),
complement_style=Styler(bold=False, font=utils.fonts.calibri))
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.font == utils.fonts.arial].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.bg_color == utils.colors.yellow].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[(sf['a'].style.bg_color == utils.colors.yellow)
&
sf['a'].style.font].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.font == utils.fonts.calibri].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[~sf['a'].style.bold].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[~sf['a'].style.bold
&
(sf['a'].style.font == utils.fonts.calibri)].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df) | styleframe/tests/series_tests.py | import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from styleframe import StyleFrame, Styler, Container, Series, utils
class SeriesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pandas_series = pd.Series((None, 1))
cls.sf_series = Series((Container(None), Container(1)))
def test_isnull(self):
self.assertTrue(all(p_val == sf_val
for p_val, sf_val in zip(self.pandas_series.isnull(), self.sf_series.isnull())))
def test_notnull(self):
self.assertTrue(all(p_val == sf_val
for p_val, sf_val in zip(self.pandas_series.notnull(), self.sf_series.notnull())))
def test_style_accessor(self):
sf = StyleFrame({'a': list(range(10))})
sf.apply_style_by_indexes(sf[sf['a'] % 2 == 0], styler_obj=Styler(bold=True, bg_color=utils.colors.yellow),
complement_style=Styler(bold=False, font=utils.fonts.calibri))
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.font == utils.fonts.arial].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.bg_color == utils.colors.yellow].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[(sf['a'].style.bg_color == utils.colors.yellow)
&
sf['a'].style.font].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.font == utils.fonts.calibri].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[~sf['a'].style.bold].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[~sf['a'].style.bold
&
(sf['a'].style.font == utils.fonts.calibri)].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df) | 0.607896 | 0.552057 |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__maintainer__ = "<NAME>"
__status__ = "Production"
import time
from rpi_ws281x import *
from control.ledstrip import set_brightness_depending_on_daytime
from functions.effects import clear
from logger import LOGGER
COLOR_HOUR = Color(200, 0, 0)
COLOR_HOUR_DIMMED = Color(50, 0, 0)
COLOR_MINUTE = Color(0, 0, 200)
COLOR_MINUTE_DIMMED = Color(0, 0, 40)
def run_clock2(strip):
LOGGER.debug("running...")
from control import get_stop_flag
while not get_stop_flag():
try:
hour, minute, next_minute = _get_pointer(strip)
while not minute == next_minute:
# hour
if 12 < minute <= 23:
strip.setPixelColor(hour, COLOR_HOUR)
strip.setPixelColor(hour + 1,
COLOR_HOUR_DIMMED)
else:
strip.setPixelColor(hour, COLOR_HOUR)
# minute
if minute == hour:
if 12 < minute < strip.numPixels():
if hour <= 23:
strip.setPixelColor(hour + 1, COLOR_HOUR)
strip.setPixelColor(minute, COLOR_MINUTE)
else:
strip.setPixelColor(0, COLOR_HOUR)
strip.setPixelColor(minute - 1, COLOR_MINUTE)
else:
strip.setPixelColor(minute + 1, COLOR_MINUTE)
else:
strip.setPixelColor(minute, COLOR_MINUTE)
strip.show()
time.sleep(0.2)
minute = _get_pointer(strip)[1]
_wipe_second(strip, COLOR_MINUTE_DIMMED, minute - 1,
backward=True)
clear(strip)
except KeyboardInterrupt:
print()
LOGGER.warn("KeyboardInterrupt.")
exit()
except Exception as e:
LOGGER.error(f"Any error occurs: {e}")
exit()
clear(strip)
def _get_pointer(strip):
now = set_brightness_depending_on_daytime(strip)[0]
hour = int(int(now.hour) % 12 * 2)
minute = int(now.minute // 2.5)
next_minute = minute + 1 if minute <= 22 else 0
return hour, minute, next_minute
def _wipe_second(stripe, color: Color, begin=0, backward=False):
wait_ms = ((1000.0 // stripe.numPixels()) // 2) / 1000.0 \
if backward else (1000.0 // stripe.numPixels()) / 1000.0
for i in range(begin + 1, stripe.numPixels() + begin):
if i >= stripe.numPixels():
i -= stripe.numPixels()
stripe.setPixelColor(i, color)
stripe.show()
time.sleep(wait_ms)
if backward:
for i in range(stripe.numPixels() + begin - 1, begin, -1):
if i >= stripe.numPixels():
i -= stripe.numPixels()
stripe.setPixelColor(i, Color(0, 0, 0))
stripe.show()
time.sleep(wait_ms)
if __name__ == '__main__':
pass | functions/clock2.py |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__maintainer__ = "<NAME>"
__status__ = "Production"
import time
from rpi_ws281x import *
from control.ledstrip import set_brightness_depending_on_daytime
from functions.effects import clear
from logger import LOGGER
COLOR_HOUR = Color(200, 0, 0)
COLOR_HOUR_DIMMED = Color(50, 0, 0)
COLOR_MINUTE = Color(0, 0, 200)
COLOR_MINUTE_DIMMED = Color(0, 0, 40)
def run_clock2(strip):
LOGGER.debug("running...")
from control import get_stop_flag
while not get_stop_flag():
try:
hour, minute, next_minute = _get_pointer(strip)
while not minute == next_minute:
# hour
if 12 < minute <= 23:
strip.setPixelColor(hour, COLOR_HOUR)
strip.setPixelColor(hour + 1,
COLOR_HOUR_DIMMED)
else:
strip.setPixelColor(hour, COLOR_HOUR)
# minute
if minute == hour:
if 12 < minute < strip.numPixels():
if hour <= 23:
strip.setPixelColor(hour + 1, COLOR_HOUR)
strip.setPixelColor(minute, COLOR_MINUTE)
else:
strip.setPixelColor(0, COLOR_HOUR)
strip.setPixelColor(minute - 1, COLOR_MINUTE)
else:
strip.setPixelColor(minute + 1, COLOR_MINUTE)
else:
strip.setPixelColor(minute, COLOR_MINUTE)
strip.show()
time.sleep(0.2)
minute = _get_pointer(strip)[1]
_wipe_second(strip, COLOR_MINUTE_DIMMED, minute - 1,
backward=True)
clear(strip)
except KeyboardInterrupt:
print()
LOGGER.warn("KeyboardInterrupt.")
exit()
except Exception as e:
LOGGER.error(f"Any error occurs: {e}")
exit()
clear(strip)
def _get_pointer(strip):
now = set_brightness_depending_on_daytime(strip)[0]
hour = int(int(now.hour) % 12 * 2)
minute = int(now.minute // 2.5)
next_minute = minute + 1 if minute <= 22 else 0
return hour, minute, next_minute
def _wipe_second(stripe, color: Color, begin=0, backward=False):
wait_ms = ((1000.0 // stripe.numPixels()) // 2) / 1000.0 \
if backward else (1000.0 // stripe.numPixels()) / 1000.0
for i in range(begin + 1, stripe.numPixels() + begin):
if i >= stripe.numPixels():
i -= stripe.numPixels()
stripe.setPixelColor(i, color)
stripe.show()
time.sleep(wait_ms)
if backward:
for i in range(stripe.numPixels() + begin - 1, begin, -1):
if i >= stripe.numPixels():
i -= stripe.numPixels()
stripe.setPixelColor(i, Color(0, 0, 0))
stripe.show()
time.sleep(wait_ms)
if __name__ == '__main__':
pass | 0.351422 | 0.07393 |
from dcf_test_app.models import Brand, Product
from django.test import TestCase
from rest_framework.test import APIClient
from django_client_framework import permissions as p
from django_client_framework.models import get_user_model
class TestPostPerms(TestCase):
"""POSTing to the related collection api creates new relations."""
def setUp(self) -> None:
User = get_user_model()
self.user = User.objects.create(username="testuser")
self.user_client = APIClient()
self.user_client.force_authenticate(self.user)
self.brand = Brand.objects.create(name="brand")
self.product = Product.objects.create(barcode="product")
def test_full_permission_post(self) -> None:
"""
Post with read and field write permissions.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(200, resp.status_code)
self.product.refresh_from_db()
self.assertEquals(self.brand, self.product.brand)
self.assertEquals(1, data["objects_count"])
self.assertDictContainsSubset({"id": str(self.product.id)}, data["objects"][0])
def test_no_child_read(self) -> None:
"""
If product has no read permission, post should be successful but hidden.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.product.refresh_from_db()
self.assertEquals(self.brand, self.product.brand, "product should be updated")
self.assertEqual(1, self.brand.products.count(), "product should be updated")
self.assertEquals(200, resp.status_code)
self.assertEquals(0, data["objects_count"])
def test_no_child_write(self) -> None:
"""
Has no product write permission, post should be 403.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(403, resp.status_code)
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
self.assertEquals(
data,
f"You have no write permission on product({self.product.id})'s brand field.",
)
def test_no_child_perm(self) -> None:
"""
Has no product read / write permission, post should be 404.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(404, resp.status_code)
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
self.assertEquals(data, f"Not Found: product({self.product.id})")
def test_no_parent_write(self) -> None:
"""
Has no brand write perm, should 403.
"""
p.add_perms_shortcut(self.user, self.brand, "r", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(403, resp.status_code)
self.assertEquals(
data,
f"You have no write permission on brand({self.brand.id})'s products field.",
)
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
def test_no_parent_read(self) -> None:
"""
Has no brand read perm, but since can write to brand, the response is
200.
"""
p.add_perms_shortcut(self.user, self.brand, "w", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(200, resp.status_code)
data = resp.json()
self.assertEqual(
data["message"],
"Action was successful but you have no permission to view the result.",
)
self.product.refresh_from_db()
self.assertEquals(self.brand, self.product.brand, "product should be updated")
self.assertEqual(1, self.brand.products.count(), "product should be updated")
def test_no_parent_perm(self) -> None:
"""
Has no brand perm, should 404.
"""
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(404, resp.status_code)
self.assertEqual(f"Not Found: brand({self.brand.id})", resp.json())
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
def test_post_no_permissions(self) -> None:
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(404, resp.status_code)
def test_post_correct_parent_perms(self) -> None:
p.add_perms_shortcut(self.user, Brand, "w", field_name="products")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(404, resp.status_code) | unit-tests/dcf_test_suites/related_collection_api/post_related_collection_perms.py | from dcf_test_app.models import Brand, Product
from django.test import TestCase
from rest_framework.test import APIClient
from django_client_framework import permissions as p
from django_client_framework.models import get_user_model
class TestPostPerms(TestCase):
"""POSTing to the related collection api creates new relations."""
def setUp(self) -> None:
User = get_user_model()
self.user = User.objects.create(username="testuser")
self.user_client = APIClient()
self.user_client.force_authenticate(self.user)
self.brand = Brand.objects.create(name="brand")
self.product = Product.objects.create(barcode="product")
def test_full_permission_post(self) -> None:
"""
Post with read and field write permissions.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(200, resp.status_code)
self.product.refresh_from_db()
self.assertEquals(self.brand, self.product.brand)
self.assertEquals(1, data["objects_count"])
self.assertDictContainsSubset({"id": str(self.product.id)}, data["objects"][0])
def test_no_child_read(self) -> None:
"""
If product has no read permission, post should be successful but hidden.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.product.refresh_from_db()
self.assertEquals(self.brand, self.product.brand, "product should be updated")
self.assertEqual(1, self.brand.products.count(), "product should be updated")
self.assertEquals(200, resp.status_code)
self.assertEquals(0, data["objects_count"])
def test_no_child_write(self) -> None:
"""
Has no product write permission, post should be 403.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(403, resp.status_code)
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
self.assertEquals(
data,
f"You have no write permission on product({self.product.id})'s brand field.",
)
def test_no_child_perm(self) -> None:
"""
Has no product read / write permission, post should be 404.
"""
p.add_perms_shortcut(self.user, self.brand, "rw", field_name="products")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(404, resp.status_code)
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
self.assertEquals(data, f"Not Found: product({self.product.id})")
def test_no_parent_write(self) -> None:
"""
Has no brand write perm, should 403.
"""
p.add_perms_shortcut(self.user, self.brand, "r", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
data = resp.json()
self.assertEquals(403, resp.status_code)
self.assertEquals(
data,
f"You have no write permission on brand({self.brand.id})'s products field.",
)
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
def test_no_parent_read(self) -> None:
"""
Has no brand read perm, but since can write to brand, the response is
200.
"""
p.add_perms_shortcut(self.user, self.brand, "w", field_name="products")
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(200, resp.status_code)
data = resp.json()
self.assertEqual(
data["message"],
"Action was successful but you have no permission to view the result.",
)
self.product.refresh_from_db()
self.assertEquals(self.brand, self.product.brand, "product should be updated")
self.assertEqual(1, self.brand.products.count(), "product should be updated")
def test_no_parent_perm(self) -> None:
"""
Has no brand perm, should 404.
"""
p.add_perms_shortcut(self.user, self.product, "w", field_name="brand")
p.add_perms_shortcut(self.user, self.product, "r")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(404, resp.status_code)
self.assertEqual(f"Not Found: brand({self.brand.id})", resp.json())
self.assertIsNone(self.product.brand_id) # product is not updated
self.assertEqual(0, self.brand.products.count()) # product is not updated
def test_post_no_permissions(self) -> None:
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(404, resp.status_code)
def test_post_correct_parent_perms(self) -> None:
p.add_perms_shortcut(self.user, Brand, "w", field_name="products")
resp = self.user_client.post(
f"/brand/{self.brand.id}/products",
data=[self.product.id],
format="json",
)
self.assertEquals(404, resp.status_code) | 0.66769 | 0.185357 |
from le_utils.constants import format_presets, licenses, exercises
from le_utils.constants.languages import getlang # see also getlang_by_name, getlang_by_alpha2
from ricecooker.chefs import SushiChef
from ricecooker.classes.nodes import TopicNode
from ricecooker.classes.nodes import DocumentNode, AudioNode, VideoNode, HTML5AppNode
from ricecooker.classes.files import DocumentFile, AudioFile, VideoFile, HTMLZipFile
from ricecooker.classes.nodes import ExerciseNode
from ricecooker.classes.questions import SingleSelectQuestion, MultipleSelectQuestion, InputQuestion, PerseusQuestion
from ricecooker.classes.licenses import get_license
from ricecooker.exceptions import raise_for_invalid_channel
from ricecooker.config import LOGGER
import logging
LOGGER.setLevel(logging.INFO)
class ContentNodeDependencyChef(SushiChef):
"""
The chef class that takes care of uploading channel to Kolibri Studio.
We'll call its `main()` method from the command line script.
"""
channel_info = {
'CHANNEL_SOURCE_DOMAIN': 'learningequality.org', # content provider's domain
'CHANNEL_SOURCE_ID': 'content-node-dependency-test', # an alphanumeric channel ID
'CHANNEL_TITLE': 'ContentNode Dependency Test Channel', # a humand-readbale title
'CHANNEL_LANGUAGE': getlang('en').id, # language code of channel
'CHANNEL_THUMBNAIL': 'https://s3-us-west-2.amazonaws.com/testdrivenlearningbucket/htmlcss.jpg', # (optional) local path or url to image file
'CHANNEL_DESCRIPTION': 'Test if content node dependecies work given iframe sandboxing'
}
def construct_channel(self, *args, **kwargs):
"""
Create ChannelNode and build topic tree.
"""
channel = self.get_channel(*args, **kwargs) # create ChannelNode from data in self.channel_info
topic1 = TopicNode(
source_id='121232ms',
title='Content Nodes',
description='Put folder description here',
author=None,
language=getlang('en').id,
thumbnail=None,
)
channel.add_child(topic1)
# HTML5 APPS
topic13 = TopicNode(
source_id='asasa331',
title='HTML5App Nodes',
description='Put folder description here',
author=None,
language=getlang('en').id,
thumbnail=None,
)
topic1.add_child(topic13)
content13a = HTML5AppNode(
source_id='302723b4',
title='Shared Zip File app',
author='<NAME> (author\'s name)',
description='Put file description here',
language=getlang('en').id,
license=get_license(licenses.CC_BY, copyright_holder='Copyright holder name'),
thumbnail=None,
files=[HTMLZipFile(
path='./content/zipfiles/shared.zip',
language=getlang('en').id
)]
)
topic13.add_child(content13a)
content13b = HTML5AppNode(
source_id='302723b5',
title='Thin app 1',
author='<NAME> (author\'s name)',
description='Put file description here',
language=getlang('en').id,
license=get_license(licenses.CC_BY, copyright_holder='Copyright holder name'),
thumbnail=None,
files=[HTMLZipFile(
path='./content/zipfiles/thinapp1.zip',
language=getlang('en').id
)]
)
topic13.add_child(content13b)
raise_for_invalid_channel(channel)
return channel
if __name__ == '__main__':
"""
This code will run when the sushi chef scripy is called on the command line.
"""
chef = ContentNodeDependencyChef()
chef.main() | channels/contentnode_dependency/sushichef.py |
from le_utils.constants import format_presets, licenses, exercises
from le_utils.constants.languages import getlang # see also getlang_by_name, getlang_by_alpha2
from ricecooker.chefs import SushiChef
from ricecooker.classes.nodes import TopicNode
from ricecooker.classes.nodes import DocumentNode, AudioNode, VideoNode, HTML5AppNode
from ricecooker.classes.files import DocumentFile, AudioFile, VideoFile, HTMLZipFile
from ricecooker.classes.nodes import ExerciseNode
from ricecooker.classes.questions import SingleSelectQuestion, MultipleSelectQuestion, InputQuestion, PerseusQuestion
from ricecooker.classes.licenses import get_license
from ricecooker.exceptions import raise_for_invalid_channel
from ricecooker.config import LOGGER
import logging
LOGGER.setLevel(logging.INFO)
class ContentNodeDependencyChef(SushiChef):
"""
The chef class that takes care of uploading channel to Kolibri Studio.
We'll call its `main()` method from the command line script.
"""
channel_info = {
'CHANNEL_SOURCE_DOMAIN': 'learningequality.org', # content provider's domain
'CHANNEL_SOURCE_ID': 'content-node-dependency-test', # an alphanumeric channel ID
'CHANNEL_TITLE': 'ContentNode Dependency Test Channel', # a humand-readbale title
'CHANNEL_LANGUAGE': getlang('en').id, # language code of channel
'CHANNEL_THUMBNAIL': 'https://s3-us-west-2.amazonaws.com/testdrivenlearningbucket/htmlcss.jpg', # (optional) local path or url to image file
'CHANNEL_DESCRIPTION': 'Test if content node dependecies work given iframe sandboxing'
}
def construct_channel(self, *args, **kwargs):
"""
Create ChannelNode and build topic tree.
"""
channel = self.get_channel(*args, **kwargs) # create ChannelNode from data in self.channel_info
topic1 = TopicNode(
source_id='121232ms',
title='Content Nodes',
description='Put folder description here',
author=None,
language=getlang('en').id,
thumbnail=None,
)
channel.add_child(topic1)
# HTML5 APPS
topic13 = TopicNode(
source_id='asasa331',
title='HTML5App Nodes',
description='Put folder description here',
author=None,
language=getlang('en').id,
thumbnail=None,
)
topic1.add_child(topic13)
content13a = HTML5AppNode(
source_id='302723b4',
title='Shared Zip File app',
author='<NAME> (author\'s name)',
description='Put file description here',
language=getlang('en').id,
license=get_license(licenses.CC_BY, copyright_holder='Copyright holder name'),
thumbnail=None,
files=[HTMLZipFile(
path='./content/zipfiles/shared.zip',
language=getlang('en').id
)]
)
topic13.add_child(content13a)
content13b = HTML5AppNode(
source_id='302723b5',
title='Thin app 1',
author='<NAME> (author\'s name)',
description='Put file description here',
language=getlang('en').id,
license=get_license(licenses.CC_BY, copyright_holder='Copyright holder name'),
thumbnail=None,
files=[HTMLZipFile(
path='./content/zipfiles/thinapp1.zip',
language=getlang('en').id
)]
)
topic13.add_child(content13b)
raise_for_invalid_channel(channel)
return channel
if __name__ == '__main__':
"""
This code will run when the sushi chef scripy is called on the command line.
"""
chef = ContentNodeDependencyChef()
chef.main() | 0.561575 | 0.21102 |
from blinkenlights import setup, cleanup
from fourleds import light, clear
from time import sleep
import random
pins = [37, 33, 31, 29, 36, 32, 22, 18]
# yp ym gp gm rp rm bp bm
setup(pins)
### Test pattern
clear(pins)
for i in pins:
light(i)
sleep(0.1)
clear(pins)
#### Definitions
class Ball:
def __init__(self, LL, UL, LR, UR):
self.field = [[LL, UL], [LR, UR]]
self.field_pins = [LL, UL, LR, UR]
self.x = random.randint(0,1)
self.y = random.randint(0,1)
clear(self.field_pins)
light(self.field[self.x][self.y])
def hit(self):
self.x = self.x ^ 1 # always go to opposite side
self.y = random.randint(0,1)
clear(self.field_pins)
light(self.field[self.x][self.y])
sleep(1)
def miss(self):
clear(self.field_pins)
for i in range(4): ### blink the whole field
light(self.field_pins)
sleep(0.2)
clear(self.field_pins)
sleep(0.2)
def swing_by(self, player):
if player.y == self.y:
self.hit()
return True
else:
self.miss()
return False
class Player:
def __init__(self, low, high):
self.range = [low, high]
self.y = random.randint(0,1)
clear(self.range)
for i in range(6):
light(self.range[self.y])
sleep(0.1)
clear(self.range)
sleep(0.1)
light(self.range[self.y])
def move(self, direction):
assert (direction==0 or direction==1)
self.y = direction
clear(self.range)
light(self.range[self.y])
def imperfect(player, ball, success_rate=0.99):
assert 0 <= success_rate <= 1
if random.uniform(0,1) < success_rate:
player.move(ball.y)
else:
player.move(ball.y ^ 1)
#### Set up the global variables
myball = Ball(33, 29, 32, 18)
p1 = Player(37, 31)
p2 = Player(36, 22)
winner = None
order = []
if myball.x == 0:
order = [p1, p2]
else:
order = [p2, p1]
#### Main game loop
while True:
imperfect(order[0], myball)
sleep(0.5)
inplay = myball.swing_by(order[0])
if not inplay:
winner = order[1]
break
imperfect(order[1], myball)
sleep(0.5)
inplay = myball.swing_by(order[1])
if not inplay:
winner = order[0]
break
### A little victory dance
clear(pins)
for i in range(25):
winner.move(0)
sleep(0.2)
winner.move(1)
sleep(0.2)
cleanup() | pong.py | from blinkenlights import setup, cleanup
from fourleds import light, clear
from time import sleep
import random
pins = [37, 33, 31, 29, 36, 32, 22, 18]
# yp ym gp gm rp rm bp bm
setup(pins)
### Test pattern
clear(pins)
for i in pins:
light(i)
sleep(0.1)
clear(pins)
#### Definitions
class Ball:
def __init__(self, LL, UL, LR, UR):
self.field = [[LL, UL], [LR, UR]]
self.field_pins = [LL, UL, LR, UR]
self.x = random.randint(0,1)
self.y = random.randint(0,1)
clear(self.field_pins)
light(self.field[self.x][self.y])
def hit(self):
self.x = self.x ^ 1 # always go to opposite side
self.y = random.randint(0,1)
clear(self.field_pins)
light(self.field[self.x][self.y])
sleep(1)
def miss(self):
clear(self.field_pins)
for i in range(4): ### blink the whole field
light(self.field_pins)
sleep(0.2)
clear(self.field_pins)
sleep(0.2)
def swing_by(self, player):
if player.y == self.y:
self.hit()
return True
else:
self.miss()
return False
class Player:
def __init__(self, low, high):
self.range = [low, high]
self.y = random.randint(0,1)
clear(self.range)
for i in range(6):
light(self.range[self.y])
sleep(0.1)
clear(self.range)
sleep(0.1)
light(self.range[self.y])
def move(self, direction):
assert (direction==0 or direction==1)
self.y = direction
clear(self.range)
light(self.range[self.y])
def imperfect(player, ball, success_rate=0.99):
assert 0 <= success_rate <= 1
if random.uniform(0,1) < success_rate:
player.move(ball.y)
else:
player.move(ball.y ^ 1)
#### Set up the global variables
myball = Ball(33, 29, 32, 18)
p1 = Player(37, 31)
p2 = Player(36, 22)
winner = None
order = []
if myball.x == 0:
order = [p1, p2]
else:
order = [p2, p1]
#### Main game loop
while True:
imperfect(order[0], myball)
sleep(0.5)
inplay = myball.swing_by(order[0])
if not inplay:
winner = order[1]
break
imperfect(order[1], myball)
sleep(0.5)
inplay = myball.swing_by(order[1])
if not inplay:
winner = order[0]
break
### A little victory dance
clear(pins)
for i in range(25):
winner.move(0)
sleep(0.2)
winner.move(1)
sleep(0.2)
cleanup() | 0.302803 | 0.271674 |
import json
from datetime import datetime, timedelta
import pathlib
import pandas as pd
import networkx as nx
from statistics import median, mean
from itertools import combinations
from minepy import MINE
import warnings
warnings.simplefilter("ignore", UserWarning)
from sklearn.metrics import mutual_info_score
def loadTraces(pathToTraces):
def loadJson(link):
with open(link) as f:
data = json.load(f)
return data
operations = sorted(list(map(lambda x: x.name, list(pathToTraces.glob('**'))[1:])))
traces = {}
for operation in operations:
pathToOperation = pathToTraces / operation
pathes = sorted(list(pathToOperation.glob('*.json')))
traces[operation] = {}
traces[operation]['id'] = list(map(lambda x: x.name[:x.name.find('.json')], pathes))
traces[operation]['data'] = list(map(lambda x: loadJson(x), pathes))
return operations, traces
def loadMetrics(pathToData):
pathToMetrics = pathToData / 'fixed_metrics'
nodeNames = sorted(list(map(lambda x: x.name[:x.name.find('_')], list(pathToMetrics.glob('*.csv')))))
nodes = {}
for name in nodeNames:
nodes[name] = {}
nodes[name]['data'] = pd.read_csv(pathToMetrics / (name + '_metrics.csv'))
for name in nodeNames:
nodes[name]['data']['now'] = nodes[name]['data']['now'].map(
lambda x: datetime.strptime(str(x), '%Y-%m-%d %H:%M:%S CEST'))
metrics = list(nodes[nodeNames[0]]['data'].keys())
metrics.remove('now')
metrics.remove('load.cpucore') # always == 8
metrics = sorted(metrics)
return nodeNames, metrics, nodes
def parseTrace(operation, df, graph):
G = graph
for item in df['children']:
trace = {}
trace['operation'] = operation
trace['host'] = item.get('info').get('host')
trace['name'] = item.get('info').get('name')
trace['service'] = item.get('info').get('service')
trace['project'] = item.get('info').get('project')
trace['startTimestamp'] = datetime.strptime(
item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('timestamp'),
'%Y-%m-%dT%H:%M:%S.%f')
endTimestamp = item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop',
{'timestamp': 'Null'}).get('timestamp')
if endTimestamp != 'Null':
trace['endTimestamp'] = datetime.strptime(endTimestamp, '%Y-%m-%dT%H:%M:%S.%f')
trace['duration'] = trace['endTimestamp'] - trace['startTimestamp']
else:
trace['endTimestamp'] = 'Null'
trace['duration'] = 'Null'
trace['trace_id'] = item.get('trace_id')
trace['parent_id'] = item.get('parent_id')
trace['base_id'] = item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id')
trace['isRoot'] = trace['parent_id'] == trace['base_id']
G.add_nodes_from([(trace['trace_id'], trace)])
if not (trace['isRoot']):
G.add_edge(trace['parent_id'], trace['trace_id'])
if len(item['children']) != 0:
G = parseTrace(operation, item, G)
return G
# fix non-endTimestamp problem
def fixTraces(operations, traces):
for operation in operations:
for trace in traces[operation]['graph']:
spans = trace.nodes(data=True)
for span in spans:
if span[1]['endTimestamp'] == 'Null':
children = list(nx.descendants(trace, span[0]))
if children == []:
continue
endTimestamp = span[1]['startTimestamp']
for child in children:
time = spans[child]['endTimestamp']
if time != 'Null':
endTimestamp = max(endTimestamp, time)
span[1]['endTimestamp'] = endTimestamp
span[1]['duration'] = span[1]['endTimestamp'] - span[1]['startTimestamp']
return traces
def createWindowing(windowSize, overlapping):
n_s = int(windowSize * (1 - overlapping))
windows = []
timeStart = datetime.strptime('2019-11-19 17:38:38', '%Y-%m-%d %H:%M:%S')
timeEnd = datetime.strptime('2019-11-20 01:30:00', '%Y-%m-%d %H:%M:%S')
time = timeStart
while time + timedelta(seconds=windowSize) <= timeEnd:
windows.append([time + timedelta(seconds=1), time + timedelta(seconds=windowSize)])
time += timedelta(seconds=n_s)
ds = pd.DataFrame({'window': windows})
return windows, ds
# create label from features
def combineLabel(features, combination):
label = features[0]
for i in combination:
label = label + '_' + features[i]
return label
def createModes():
features_p = ['host_1', 'operation_1', 'name_1', 'service_1', 'project_1']
features = ['host_2', 'operation_2', 'name_2', 'service_2', 'project_2']
featuresNonCommunication = ['host', 'operation', 'name', 'service', 'project']
columns = []
columns.append(featuresNonCommunication[0])
columns.append(features_p[0] + '->' + features[0])
for l in range(1, len(features)):
for combination in combinations(list(range(1, len(features))), l):
label_r = combineLabel(featuresNonCommunication, list(combination))
columns.append(label_r)
label_r = combineLabel(features, list(combination))
if len(features_p) != 0:
label_l = combineLabel(features_p, list(combination))
columns.append(label_l + '->' + label_r)
modes = {}
for i in range(len(columns)):
k = (i // 2 + 1, i // 2 + 17)[i % 2]
modes[k] = {'name': columns[i], 'combinations': []}
return modes
def createColumns(pathToTraces, operations, nodeNames, metrics, traces, modes, ds):
def addCombinationToMode(i, label):
k = (i // 2 + 1, i // 2 + 17)[i % 2]
if label not in modes.get(k).get('combinations'):
modes[k]['combinations'].append(label)
modes[k]['combinations'].append(label + '__duration')
def addCombintaionToColumns(label):
if label not in list(ds.keys()):
ds[label] = 0
ds[label + '__duration'] = 0
# get all possible combinations of two types of aggregation
for operation in operations:
for trace in traces[operation]['graph']:
spans = trace.nodes(data=True)
for span in spans:
i = 0
features_p = []
if not (span[1]['isRoot']):
span_p = spans[list(trace.predecessors(span[0]))[0]]
features_p = [span_p['host'], span_p['operation'], span_p['name'], span_p['service'],
span_p['project']]
features = [span[1]['host'], span[1]['operation'], span[1]['name'], span[1]['service'],
span[1]['project']]
addCombintaionToColumns(features[0])
addCombinationToMode(i, features[0])
i += 1
if len(features_p) != 0:
addCombintaionToColumns(features_p[0] + '->' + features[0])
addCombinationToMode(i, features_p[0] + '->' + features[0])
i += 1
for l in range(1, len(features)):
for combination in combinations(list(range(1, len(features))), l):
label_r = combineLabel(features, list(combination))
addCombintaionToColumns(label_r)
addCombinationToMode(i, label_r)
i += 1
if len(features_p) != 0:
label_l = combineLabel(features_p, list(combination))
addCombintaionToColumns(label_l + '->' + label_r)
addCombinationToMode(i, label_l + '->' + label_r)
i += 1
# save JSON of modes
with open(pathToTraces / 'modes.json', 'w') as f:
json.dump(modes, f)
# Metrics columns
for metric in metrics:
for name in nodeNames:
ds[name + '_' + metric] = 0.0
# MI columns
for p in range(len(metrics)):
for l in range(p, len(metrics)):
for i in range(len(nodeNames)):
t = (0, 1)[p == l]
for j in range(i + t, len(nodeNames)):
ds['MI' + '_' + nodeNames[i] + '_' + metrics[p] + '_' + nodeNames[j] + '_' + metrics[l]] = 0.0
return ds
def computeMedianOfMetric(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds):
n_s = int(windowSize * (1 - overlapping))
f = 0
k = 0
while f < len(windows):
for metric in metrics:
for name in nodeNames:
m = median(list(nodes[name]['data'][metric])[k:k + windowSize])
# m = mean(list(nodes[name]['data'][metric])[k:k + windowSize])
ds.at[f, name + '_' + metric] = m
k += n_s
f += 1
return ds
def computeMI(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds):
n_s = int(windowSize * (1 - overlapping))
f = 0
k = 0
while f < len(windows):
for p in range(len(metrics)):
for l in range(p, len(metrics)):
for i in range(len(nodeNames)):
t = (0, 1)[p == l]
for j in range(i + t, len(nodeNames)):
mi = mutual_info_score(list(nodes[nodeNames[i]]['data'][metrics[p]])[k:k + windowSize],
list(nodes[nodeNames[j]]['data'][metrics[l]])[k:k + windowSize])
# mine = MINE(alpha=0.6, c=15, est="mic_approx")
# mine.compute_score(list(nodes[nodeNames[i]]['data'][metrics[p]])[k:k + windowSize],
# list(nodes[nodeNames[j]]['data'][metrics[l]])[k:k + windowSize])
# mi = mine.mic()
ds.at[f, 'MI' + '_' + nodeNames[i] + '_' + metrics[p] + '_' + nodeNames[j] + '_' + metrics[
l]] = mi
k += n_s
f += 1
return ds
def collectData(operations, windows, traces, ds):
# find index of window
def findIndex(time):
for i in range(len(windows)):
if windows[i][0] <= time < (windows[i][1] + timedelta(seconds=1)):
return i
return -1
def increaseNumberAndDuration(row, column, duration):
ds.at[row, column + '__duration'] += duration
ds.at[row, column] += 1
def fillWindow(i_s, i_e, span, column):
if (i_s == i_e):
increaseNumberAndDuration(i_s, column,
(span['endTimestamp'] - span['startTimestamp']) // timedelta(microseconds=1))
else:
if (i_e == -1):
increaseNumberAndDuration(i_s, column, (
windows[i_s][1] + timedelta(seconds=1) - span['startTimestamp']) // timedelta(
microseconds=1))
else:
increaseNumberAndDuration(i_s, column, (
windows[i_s][1] + timedelta(seconds=1) - span['startTimestamp']) // timedelta(
microseconds=1))
increaseNumberAndDuration(i_e, column,
(span['endTimestamp'] - windows[i_e][0]) // timedelta(microseconds=1))
for i in range(1, i_e - i_s):
increaseNumberAndDuration(i_s + i, column, (
windows[i_s + i][1] + timedelta(seconds=1) - windows[i_s + i][0]) // timedelta(
microseconds=1))
for operation in operations:
for trace in traces[operation]['graph']:
spans = trace.nodes(data=True)
for span in spans:
i_s, i_e = findIndex(span[1]['startTimestamp']), -1
if span[1]['endTimestamp'] != 'Null':
i_e = findIndex(span[1]['endTimestamp'])
features = [span[1]['host'], span[1]['operation'], span[1]['name'], span[1]['service'],
span[1]['project']]
fillWindow(i_s, i_e, span[1], features[0])
features_p = []
if not (span[1]['isRoot']):
span_p = spans[list(trace.predecessors(span[0]))[0]]
features_p = [span_p['host'], span_p['operation'], span_p['name'], span_p['service'],
span_p['project']]
if len(features_p) != 0:
fillWindow(i_s, i_e, span[1], features_p[0] + '->' + features[0])
for l in range(1, len(features)):
for combination in combinations(list(range(1, len(features))), l):
label_r = combineLabel(features, list(combination))
fillWindow(i_s, i_e, span[1], label_r)
if len(features_p) != 0:
label_l = combineLabel(features_p, list(combination))
fillWindow(i_s, i_e, span[1], label_l + '->' + label_r)
return ds
def saveData(overlapping, pathToTraces, ds):
title = ('non', str(int(overlapping * 100)) + '%')[overlapping != 0]
ds.to_csv(pathToTraces / ('parsed_traces_with_' + title + '_overlapping.csv'), index=False)
def main(windowSize=60, overlapping=0):
assert 0 < windowSize < 28282
assert 0 <= overlapping < 1
relativePathToData = 'data/sequential_data'
pathToData = pathlib.Path().absolute().parent / relativePathToData
pathToTraces = pathToData / 'traces'
operations, traces = loadTraces(pathToTraces)
nodeNames, metrics, nodes = loadMetrics(pathToData)
for operation in operations:
traces[operation]['graph'] = list(
map(lambda x: parseTrace(operation, x, nx.DiGraph()), traces[operation]['data']))
traces = fixTraces(operations, traces)
windows, ds = createWindowing(windowSize, overlapping)
modes = createModes()
ds = createColumns(pathToTraces, operations, nodeNames, metrics, traces, modes, ds)
ds = computeMedianOfMetric(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds)
ds = computeMI(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds)
ds = collectData(operations, windows, traces, ds)
saveData(overlapping, pathToTraces, ds)
main() | src/Parsing.py | import json
from datetime import datetime, timedelta
import pathlib
import pandas as pd
import networkx as nx
from statistics import median, mean
from itertools import combinations
from minepy import MINE
import warnings
warnings.simplefilter("ignore", UserWarning)
from sklearn.metrics import mutual_info_score
def loadTraces(pathToTraces):
def loadJson(link):
with open(link) as f:
data = json.load(f)
return data
operations = sorted(list(map(lambda x: x.name, list(pathToTraces.glob('**'))[1:])))
traces = {}
for operation in operations:
pathToOperation = pathToTraces / operation
pathes = sorted(list(pathToOperation.glob('*.json')))
traces[operation] = {}
traces[operation]['id'] = list(map(lambda x: x.name[:x.name.find('.json')], pathes))
traces[operation]['data'] = list(map(lambda x: loadJson(x), pathes))
return operations, traces
def loadMetrics(pathToData):
pathToMetrics = pathToData / 'fixed_metrics'
nodeNames = sorted(list(map(lambda x: x.name[:x.name.find('_')], list(pathToMetrics.glob('*.csv')))))
nodes = {}
for name in nodeNames:
nodes[name] = {}
nodes[name]['data'] = pd.read_csv(pathToMetrics / (name + '_metrics.csv'))
for name in nodeNames:
nodes[name]['data']['now'] = nodes[name]['data']['now'].map(
lambda x: datetime.strptime(str(x), '%Y-%m-%d %H:%M:%S CEST'))
metrics = list(nodes[nodeNames[0]]['data'].keys())
metrics.remove('now')
metrics.remove('load.cpucore') # always == 8
metrics = sorted(metrics)
return nodeNames, metrics, nodes
def parseTrace(operation, df, graph):
G = graph
for item in df['children']:
trace = {}
trace['operation'] = operation
trace['host'] = item.get('info').get('host')
trace['name'] = item.get('info').get('name')
trace['service'] = item.get('info').get('service')
trace['project'] = item.get('info').get('project')
trace['startTimestamp'] = datetime.strptime(
item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('timestamp'),
'%Y-%m-%dT%H:%M:%S.%f')
endTimestamp = item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop',
{'timestamp': 'Null'}).get('timestamp')
if endTimestamp != 'Null':
trace['endTimestamp'] = datetime.strptime(endTimestamp, '%Y-%m-%dT%H:%M:%S.%f')
trace['duration'] = trace['endTimestamp'] - trace['startTimestamp']
else:
trace['endTimestamp'] = 'Null'
trace['duration'] = 'Null'
trace['trace_id'] = item.get('trace_id')
trace['parent_id'] = item.get('parent_id')
trace['base_id'] = item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id')
trace['isRoot'] = trace['parent_id'] == trace['base_id']
G.add_nodes_from([(trace['trace_id'], trace)])
if not (trace['isRoot']):
G.add_edge(trace['parent_id'], trace['trace_id'])
if len(item['children']) != 0:
G = parseTrace(operation, item, G)
return G
# fix non-endTimestamp problem
def fixTraces(operations, traces):
for operation in operations:
for trace in traces[operation]['graph']:
spans = trace.nodes(data=True)
for span in spans:
if span[1]['endTimestamp'] == 'Null':
children = list(nx.descendants(trace, span[0]))
if children == []:
continue
endTimestamp = span[1]['startTimestamp']
for child in children:
time = spans[child]['endTimestamp']
if time != 'Null':
endTimestamp = max(endTimestamp, time)
span[1]['endTimestamp'] = endTimestamp
span[1]['duration'] = span[1]['endTimestamp'] - span[1]['startTimestamp']
return traces
def createWindowing(windowSize, overlapping):
n_s = int(windowSize * (1 - overlapping))
windows = []
timeStart = datetime.strptime('2019-11-19 17:38:38', '%Y-%m-%d %H:%M:%S')
timeEnd = datetime.strptime('2019-11-20 01:30:00', '%Y-%m-%d %H:%M:%S')
time = timeStart
while time + timedelta(seconds=windowSize) <= timeEnd:
windows.append([time + timedelta(seconds=1), time + timedelta(seconds=windowSize)])
time += timedelta(seconds=n_s)
ds = pd.DataFrame({'window': windows})
return windows, ds
# create label from features
def combineLabel(features, combination):
label = features[0]
for i in combination:
label = label + '_' + features[i]
return label
def createModes():
features_p = ['host_1', 'operation_1', 'name_1', 'service_1', 'project_1']
features = ['host_2', 'operation_2', 'name_2', 'service_2', 'project_2']
featuresNonCommunication = ['host', 'operation', 'name', 'service', 'project']
columns = []
columns.append(featuresNonCommunication[0])
columns.append(features_p[0] + '->' + features[0])
for l in range(1, len(features)):
for combination in combinations(list(range(1, len(features))), l):
label_r = combineLabel(featuresNonCommunication, list(combination))
columns.append(label_r)
label_r = combineLabel(features, list(combination))
if len(features_p) != 0:
label_l = combineLabel(features_p, list(combination))
columns.append(label_l + '->' + label_r)
modes = {}
for i in range(len(columns)):
k = (i // 2 + 1, i // 2 + 17)[i % 2]
modes[k] = {'name': columns[i], 'combinations': []}
return modes
def createColumns(pathToTraces, operations, nodeNames, metrics, traces, modes, ds):
def addCombinationToMode(i, label):
k = (i // 2 + 1, i // 2 + 17)[i % 2]
if label not in modes.get(k).get('combinations'):
modes[k]['combinations'].append(label)
modes[k]['combinations'].append(label + '__duration')
def addCombintaionToColumns(label):
if label not in list(ds.keys()):
ds[label] = 0
ds[label + '__duration'] = 0
# get all possible combinations of two types of aggregation
for operation in operations:
for trace in traces[operation]['graph']:
spans = trace.nodes(data=True)
for span in spans:
i = 0
features_p = []
if not (span[1]['isRoot']):
span_p = spans[list(trace.predecessors(span[0]))[0]]
features_p = [span_p['host'], span_p['operation'], span_p['name'], span_p['service'],
span_p['project']]
features = [span[1]['host'], span[1]['operation'], span[1]['name'], span[1]['service'],
span[1]['project']]
addCombintaionToColumns(features[0])
addCombinationToMode(i, features[0])
i += 1
if len(features_p) != 0:
addCombintaionToColumns(features_p[0] + '->' + features[0])
addCombinationToMode(i, features_p[0] + '->' + features[0])
i += 1
for l in range(1, len(features)):
for combination in combinations(list(range(1, len(features))), l):
label_r = combineLabel(features, list(combination))
addCombintaionToColumns(label_r)
addCombinationToMode(i, label_r)
i += 1
if len(features_p) != 0:
label_l = combineLabel(features_p, list(combination))
addCombintaionToColumns(label_l + '->' + label_r)
addCombinationToMode(i, label_l + '->' + label_r)
i += 1
# save JSON of modes
with open(pathToTraces / 'modes.json', 'w') as f:
json.dump(modes, f)
# Metrics columns
for metric in metrics:
for name in nodeNames:
ds[name + '_' + metric] = 0.0
# MI columns
for p in range(len(metrics)):
for l in range(p, len(metrics)):
for i in range(len(nodeNames)):
t = (0, 1)[p == l]
for j in range(i + t, len(nodeNames)):
ds['MI' + '_' + nodeNames[i] + '_' + metrics[p] + '_' + nodeNames[j] + '_' + metrics[l]] = 0.0
return ds
def computeMedianOfMetric(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds):
n_s = int(windowSize * (1 - overlapping))
f = 0
k = 0
while f < len(windows):
for metric in metrics:
for name in nodeNames:
m = median(list(nodes[name]['data'][metric])[k:k + windowSize])
# m = mean(list(nodes[name]['data'][metric])[k:k + windowSize])
ds.at[f, name + '_' + metric] = m
k += n_s
f += 1
return ds
def computeMI(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds):
n_s = int(windowSize * (1 - overlapping))
f = 0
k = 0
while f < len(windows):
for p in range(len(metrics)):
for l in range(p, len(metrics)):
for i in range(len(nodeNames)):
t = (0, 1)[p == l]
for j in range(i + t, len(nodeNames)):
mi = mutual_info_score(list(nodes[nodeNames[i]]['data'][metrics[p]])[k:k + windowSize],
list(nodes[nodeNames[j]]['data'][metrics[l]])[k:k + windowSize])
# mine = MINE(alpha=0.6, c=15, est="mic_approx")
# mine.compute_score(list(nodes[nodeNames[i]]['data'][metrics[p]])[k:k + windowSize],
# list(nodes[nodeNames[j]]['data'][metrics[l]])[k:k + windowSize])
# mi = mine.mic()
ds.at[f, 'MI' + '_' + nodeNames[i] + '_' + metrics[p] + '_' + nodeNames[j] + '_' + metrics[
l]] = mi
k += n_s
f += 1
return ds
def collectData(operations, windows, traces, ds):
# find index of window
def findIndex(time):
for i in range(len(windows)):
if windows[i][0] <= time < (windows[i][1] + timedelta(seconds=1)):
return i
return -1
def increaseNumberAndDuration(row, column, duration):
ds.at[row, column + '__duration'] += duration
ds.at[row, column] += 1
def fillWindow(i_s, i_e, span, column):
if (i_s == i_e):
increaseNumberAndDuration(i_s, column,
(span['endTimestamp'] - span['startTimestamp']) // timedelta(microseconds=1))
else:
if (i_e == -1):
increaseNumberAndDuration(i_s, column, (
windows[i_s][1] + timedelta(seconds=1) - span['startTimestamp']) // timedelta(
microseconds=1))
else:
increaseNumberAndDuration(i_s, column, (
windows[i_s][1] + timedelta(seconds=1) - span['startTimestamp']) // timedelta(
microseconds=1))
increaseNumberAndDuration(i_e, column,
(span['endTimestamp'] - windows[i_e][0]) // timedelta(microseconds=1))
for i in range(1, i_e - i_s):
increaseNumberAndDuration(i_s + i, column, (
windows[i_s + i][1] + timedelta(seconds=1) - windows[i_s + i][0]) // timedelta(
microseconds=1))
for operation in operations:
for trace in traces[operation]['graph']:
spans = trace.nodes(data=True)
for span in spans:
i_s, i_e = findIndex(span[1]['startTimestamp']), -1
if span[1]['endTimestamp'] != 'Null':
i_e = findIndex(span[1]['endTimestamp'])
features = [span[1]['host'], span[1]['operation'], span[1]['name'], span[1]['service'],
span[1]['project']]
fillWindow(i_s, i_e, span[1], features[0])
features_p = []
if not (span[1]['isRoot']):
span_p = spans[list(trace.predecessors(span[0]))[0]]
features_p = [span_p['host'], span_p['operation'], span_p['name'], span_p['service'],
span_p['project']]
if len(features_p) != 0:
fillWindow(i_s, i_e, span[1], features_p[0] + '->' + features[0])
for l in range(1, len(features)):
for combination in combinations(list(range(1, len(features))), l):
label_r = combineLabel(features, list(combination))
fillWindow(i_s, i_e, span[1], label_r)
if len(features_p) != 0:
label_l = combineLabel(features_p, list(combination))
fillWindow(i_s, i_e, span[1], label_l + '->' + label_r)
return ds
def saveData(overlapping, pathToTraces, ds):
title = ('non', str(int(overlapping * 100)) + '%')[overlapping != 0]
ds.to_csv(pathToTraces / ('parsed_traces_with_' + title + '_overlapping.csv'), index=False)
def main(windowSize=60, overlapping=0):
assert 0 < windowSize < 28282
assert 0 <= overlapping < 1
relativePathToData = 'data/sequential_data'
pathToData = pathlib.Path().absolute().parent / relativePathToData
pathToTraces = pathToData / 'traces'
operations, traces = loadTraces(pathToTraces)
nodeNames, metrics, nodes = loadMetrics(pathToData)
for operation in operations:
traces[operation]['graph'] = list(
map(lambda x: parseTrace(operation, x, nx.DiGraph()), traces[operation]['data']))
traces = fixTraces(operations, traces)
windows, ds = createWindowing(windowSize, overlapping)
modes = createModes()
ds = createColumns(pathToTraces, operations, nodeNames, metrics, traces, modes, ds)
ds = computeMedianOfMetric(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds)
ds = computeMI(windowSize, overlapping, nodeNames, metrics, windows, nodes, ds)
ds = collectData(operations, windows, traces, ds)
saveData(overlapping, pathToTraces, ds)
main() | 0.425844 | 0.187765 |
from typing import List, Union, Optional, Dict, Tuple, Iterable
from requests import Session, Response
from datetime import datetime, timezone, timedelta
from .model import Alarm, AlarmLevel, AlarmKind, AlarmDetail
import ast
from bidict import bidict
import json
__all__ = ('AlarmCrawler')
class AlarmCrawler():
"气象预警爬虫"
url: str = 'https://product.weather.com.cn/alarm/grepalarm_cn.php'
session: Session
TIMEZONE: timezone = timezone(timedelta(hours=8), "Asia/Shanghai")
#cache_IDs: bidict
#cache_levels: bidict
def __init__(self, session: Optional[Session] = None):
self.session: Session = session if session else Session()
#self.cache_IDs: bidict = bidict()
#self.cache_levels: bidict = bidict()
def getAlarms(self) -> List[Alarm]:
resp: Response = self.session.get(self.url)
resp.encoding = resp.apparent_encoding
alarms_list: List[List[str]] = self._paramJsVar(resp.text)['data']
alarms: List[Alarm] = []
for alarm_l in alarms_list:
short_url: str = alarm_l[1]
url_info: List[str] = short_url[:-5].split('-')
time: datetime = datetime(int(url_info[1][:4]),
int(url_info[1][4:6]),
int(url_info[1][6:8]),
int(url_info[1][8:10]),
int(url_info[1][10:12]),
int(url_info[1][12:]),
tzinfo=self.TIMEZONE)
alarms.append(
Alarm(location=alarm_l[0],
lng_E=float(alarm_l[2]),
lat_N=float(alarm_l[3]),
location_id=int(url_info[0]),
short_url=short_url,
time=time,
kind=AlarmKind(int(url_info[2][:2])),
level=AlarmLevel(int(url_info[2][2:]))))
return alarms
def getAlarmDetail(self, short_url: str) -> AlarmDetail:
def timeStrToUTC8(text: str) -> datetime:
time_tzless: datetime = datetime.fromisoformat(text)
return datetime.combine(time_tzless.date(),
time_tzless.time(),
tzinfo=self.TIMEZONE)
resp: Response = self.session.get(self.shortUrlToCompleted(short_url))
resp.encoding = resp.apparent_encoding
info: Dict[str, str] = self._paramJsVar(resp.text)
return AlarmDetail(title=info['head'],
alarm_id=info['ALERTID'],
province_name=info['PROVINCE'],
city_name=info["CITY"],
time=timeStrToUTC8(info["ISSUETIME"]),
content=info["ISSUECONTENT"],
relieve_time=timeStrToUTC8(info["RELIEVETIME"]),
kind=AlarmKind(int(info["TYPECODE"])),
level=AlarmLevel(int(info["LEVELCODE"])),
raw_info=info)
@staticmethod
def shortUrlToCompleted(url: str) -> str:
return f"http://product.weather.com.cn/alarm/webdata/{url}"
@staticmethod
def shortUrlToHuman(url: str) -> str:
return f"http://www.weather.com.cn/alarm/newalarmcontent.shtml?file={url}"
@staticmethod
def _paramJsVar(data: str) -> Union[list, dict]:
'解析 weather.com.cn 上作为数据的 js 变量定义'
info: List[str] = data.strip().split('=', maxsplit=1)
return ast.literal_eval(
info[1] if info[1][-1] != ';' else info[1][:-1])
def getSession(self) -> Session:
return self.session | weather_com_cn/alarm.py | from typing import List, Union, Optional, Dict, Tuple, Iterable
from requests import Session, Response
from datetime import datetime, timezone, timedelta
from .model import Alarm, AlarmLevel, AlarmKind, AlarmDetail
import ast
from bidict import bidict
import json
__all__ = ('AlarmCrawler')
class AlarmCrawler():
"气象预警爬虫"
url: str = 'https://product.weather.com.cn/alarm/grepalarm_cn.php'
session: Session
TIMEZONE: timezone = timezone(timedelta(hours=8), "Asia/Shanghai")
#cache_IDs: bidict
#cache_levels: bidict
def __init__(self, session: Optional[Session] = None):
self.session: Session = session if session else Session()
#self.cache_IDs: bidict = bidict()
#self.cache_levels: bidict = bidict()
def getAlarms(self) -> List[Alarm]:
resp: Response = self.session.get(self.url)
resp.encoding = resp.apparent_encoding
alarms_list: List[List[str]] = self._paramJsVar(resp.text)['data']
alarms: List[Alarm] = []
for alarm_l in alarms_list:
short_url: str = alarm_l[1]
url_info: List[str] = short_url[:-5].split('-')
time: datetime = datetime(int(url_info[1][:4]),
int(url_info[1][4:6]),
int(url_info[1][6:8]),
int(url_info[1][8:10]),
int(url_info[1][10:12]),
int(url_info[1][12:]),
tzinfo=self.TIMEZONE)
alarms.append(
Alarm(location=alarm_l[0],
lng_E=float(alarm_l[2]),
lat_N=float(alarm_l[3]),
location_id=int(url_info[0]),
short_url=short_url,
time=time,
kind=AlarmKind(int(url_info[2][:2])),
level=AlarmLevel(int(url_info[2][2:]))))
return alarms
def getAlarmDetail(self, short_url: str) -> AlarmDetail:
def timeStrToUTC8(text: str) -> datetime:
time_tzless: datetime = datetime.fromisoformat(text)
return datetime.combine(time_tzless.date(),
time_tzless.time(),
tzinfo=self.TIMEZONE)
resp: Response = self.session.get(self.shortUrlToCompleted(short_url))
resp.encoding = resp.apparent_encoding
info: Dict[str, str] = self._paramJsVar(resp.text)
return AlarmDetail(title=info['head'],
alarm_id=info['ALERTID'],
province_name=info['PROVINCE'],
city_name=info["CITY"],
time=timeStrToUTC8(info["ISSUETIME"]),
content=info["ISSUECONTENT"],
relieve_time=timeStrToUTC8(info["RELIEVETIME"]),
kind=AlarmKind(int(info["TYPECODE"])),
level=AlarmLevel(int(info["LEVELCODE"])),
raw_info=info)
@staticmethod
def shortUrlToCompleted(url: str) -> str:
return f"http://product.weather.com.cn/alarm/webdata/{url}"
@staticmethod
def shortUrlToHuman(url: str) -> str:
return f"http://www.weather.com.cn/alarm/newalarmcontent.shtml?file={url}"
@staticmethod
def _paramJsVar(data: str) -> Union[list, dict]:
'解析 weather.com.cn 上作为数据的 js 变量定义'
info: List[str] = data.strip().split('=', maxsplit=1)
return ast.literal_eval(
info[1] if info[1][-1] != ';' else info[1][:-1])
def getSession(self) -> Session:
return self.session | 0.775987 | 0.101634 |
from qtpy import QtCore
from qtpy.QtWidgets import *
class ROIItemWidget(QWidget):
"""
Item in the ROI list, takes care of everything except for color part which is
handled by ROIItemModule
"""
def __init__(self, roi_tab, color, roi_list, id, roi_num, parent=None,
display_time=True):
self.roi_tab = roi_tab
self.roi_list = roi_list
self.display_time = display_time
self.roi_num = roi_num
self.id = id
super(ROIItemWidget, self).__init__(parent)
self.setStyleSheet("""QPushButton {background-color: rgba(0,0,0,0%);
padding-left:3px;
padding-right:3px;
color: #CCCCCC;}
QPushButton:hover {
border: 1px solid #148CD2;
background-color: #505F69;
color: #F0F0F0;
}
QPushButton:pressed {
background-color: #19232D;
border: 1px solid #19232D;
}
QPushButton:pressed:hover {
border: 1px solid #148CD2;
}
QPushButton:selected {
background-color: rgba(0,0,0,0%);
color: #32414B;
}
QLabel {
background-color: rgba(0,0,0,0%)
}QCheckBox {
background-color: rgba(0,0,0,0%)
}""")
self.zoom_button = QPushButton("Zoom To")
self.zoom_button.clicked.connect(
lambda x: self.roi_tab.image_view.zoomRoi(self.id, input_key=True))
self.check_box = QCheckBox()
self.check_box.toggled.connect(lambda: self.check_box_toggled())
self.check_box_time_trace = QCheckBox()
self.check_box_time_trace.toggled.connect(lambda: self.time_check_box_toggled())
lay = QHBoxLayout(self)
lay.addWidget(self.check_box, alignment=QtCore.Qt.AlignLeft)
lay.addWidget(QLabel(text="#" + str(id)), alignment=QtCore.Qt.AlignLeft)
if display_time:
lay.addWidget(QLabel())
lay.addWidget(QLabel())
lay.addWidget(QLabel())
# lay.addWidget(
# QLabel(str(round(self.roi_tab.data_handler.roi_circ_list[roi_num - 1], 3))))
lay.addWidget(self.zoom_button)
if display_time:
lay.addWidget(self.check_box_time_trace, alignment=QtCore.Qt.AlignRight)
lay.setContentsMargins(0, 0, 0, 0)
def keyPressEvent(self, event):
self.roi_tab.keyPressEvent(event)
def select_check_box(self, force_on=False):
if not self.check_box.checkState() or force_on:
if not self.roi_list.select_multiple:
for x in self.roi_list.roi_item_list:
if x != self:
x.check_box.setChecked(False)
self.check_box.setChecked(True)
if not self.display_time:
self.check_box_time_trace.setChecked(True)
self.roi_list.current_selected_roi = self.roi_num
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
else:
self.check_box.setChecked(False)
if not self.display_time:
self.check_box_time_trace.setChecked(False)
self.roi_list.current_selected_roi = None
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
self.roi_list.update_select_number()
def selected(self):
return self.check_box.checkState()
def select_time_check_box(self):
self.check_box_time_trace.setChecked(not self.check_box_time_trace.checkState())
def check_box_toggled(self):
if self.check_box.checkState():
if not self.roi_list.select_multiple:
for x in self.roi_list.roi_item_list:
if x != self:
x.check_box.setChecked(False)
self.roi_list.current_selected_roi = self.roi_num
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
self.check_box_time_trace.setChecked(True)
if not self.display_time:
self.roi_tab.image_view.selectRoi(self.roi_num)
else:
self.roi_list.current_selected_roi = None
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
if not self.display_time:
self.check_box_time_trace.setChecked(False)
self.roi_tab.image_view.deselectRoi(self.roi_num, other_selected=self.roi_list.currently_selected_rois_list)
self.roi_list.update_select_number()
def time_check_box_toggled(self):
self.roi_list.roi_time_check_list[
self.roi_num] = self.check_box_time_trace.checkState()
try:
if self.check_box_time_trace.checkState():
self.roi_tab.selectRoiTime(self.roi_num)
else:
self.roi_tab.deselectRoiTime()
except AttributeError:
pass | cidan/GUI/ListWidgets/ROIItemWidget.py | from qtpy import QtCore
from qtpy.QtWidgets import *
class ROIItemWidget(QWidget):
"""
Item in the ROI list, takes care of everything except for color part which is
handled by ROIItemModule
"""
def __init__(self, roi_tab, color, roi_list, id, roi_num, parent=None,
display_time=True):
self.roi_tab = roi_tab
self.roi_list = roi_list
self.display_time = display_time
self.roi_num = roi_num
self.id = id
super(ROIItemWidget, self).__init__(parent)
self.setStyleSheet("""QPushButton {background-color: rgba(0,0,0,0%);
padding-left:3px;
padding-right:3px;
color: #CCCCCC;}
QPushButton:hover {
border: 1px solid #148CD2;
background-color: #505F69;
color: #F0F0F0;
}
QPushButton:pressed {
background-color: #19232D;
border: 1px solid #19232D;
}
QPushButton:pressed:hover {
border: 1px solid #148CD2;
}
QPushButton:selected {
background-color: rgba(0,0,0,0%);
color: #32414B;
}
QLabel {
background-color: rgba(0,0,0,0%)
}QCheckBox {
background-color: rgba(0,0,0,0%)
}""")
self.zoom_button = QPushButton("Zoom To")
self.zoom_button.clicked.connect(
lambda x: self.roi_tab.image_view.zoomRoi(self.id, input_key=True))
self.check_box = QCheckBox()
self.check_box.toggled.connect(lambda: self.check_box_toggled())
self.check_box_time_trace = QCheckBox()
self.check_box_time_trace.toggled.connect(lambda: self.time_check_box_toggled())
lay = QHBoxLayout(self)
lay.addWidget(self.check_box, alignment=QtCore.Qt.AlignLeft)
lay.addWidget(QLabel(text="#" + str(id)), alignment=QtCore.Qt.AlignLeft)
if display_time:
lay.addWidget(QLabel())
lay.addWidget(QLabel())
lay.addWidget(QLabel())
# lay.addWidget(
# QLabel(str(round(self.roi_tab.data_handler.roi_circ_list[roi_num - 1], 3))))
lay.addWidget(self.zoom_button)
if display_time:
lay.addWidget(self.check_box_time_trace, alignment=QtCore.Qt.AlignRight)
lay.setContentsMargins(0, 0, 0, 0)
def keyPressEvent(self, event):
self.roi_tab.keyPressEvent(event)
def select_check_box(self, force_on=False):
if not self.check_box.checkState() or force_on:
if not self.roi_list.select_multiple:
for x in self.roi_list.roi_item_list:
if x != self:
x.check_box.setChecked(False)
self.check_box.setChecked(True)
if not self.display_time:
self.check_box_time_trace.setChecked(True)
self.roi_list.current_selected_roi = self.roi_num
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
else:
self.check_box.setChecked(False)
if not self.display_time:
self.check_box_time_trace.setChecked(False)
self.roi_list.current_selected_roi = None
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
self.roi_list.update_select_number()
def selected(self):
return self.check_box.checkState()
def select_time_check_box(self):
self.check_box_time_trace.setChecked(not self.check_box_time_trace.checkState())
def check_box_toggled(self):
if self.check_box.checkState():
if not self.roi_list.select_multiple:
for x in self.roi_list.roi_item_list:
if x != self:
x.check_box.setChecked(False)
self.roi_list.current_selected_roi = self.roi_num
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
self.check_box_time_trace.setChecked(True)
if not self.display_time:
self.roi_tab.image_view.selectRoi(self.roi_num)
else:
self.roi_list.current_selected_roi = None
try:
self.roi_tab.update_current_roi_selected()
except AttributeError:
pass
if not self.display_time:
self.check_box_time_trace.setChecked(False)
self.roi_tab.image_view.deselectRoi(self.roi_num, other_selected=self.roi_list.currently_selected_rois_list)
self.roi_list.update_select_number()
def time_check_box_toggled(self):
self.roi_list.roi_time_check_list[
self.roi_num] = self.check_box_time_trace.checkState()
try:
if self.check_box_time_trace.checkState():
self.roi_tab.selectRoiTime(self.roi_num)
else:
self.roi_tab.deselectRoiTime()
except AttributeError:
pass | 0.498047 | 0.106598 |
import pexpect
import unittest
import PexpectTestCase
import time
import os
class TestCtrlChars(PexpectTestCase.PexpectTestCase):
def test_control_chars (self):
'''FIXME: Python unicode was too hard to figure out, so
this tests only the true ASCII characters. This is lame
and should be fixed. I'm leaving this script here as a
placeholder so that it will remind me to fix this one day.
This is what it used to do:
This tests that we can send all 256 8-bit ASCII characters
to a child process.'''
# FIXME: Getting this to support Python's Unicode was
# too hard, so I disabled this. I should fix this one day.
return 0
child = pexpect.spawn('python getch.py')
try:
for i in range(256):
# child.send(unicode('%d'%i, encoding='utf-8'))
child.send(chr(i))
child.expect ('%d\r\n' % i)
except Exception, e:
msg = "Did not echo character value: " + str(i) + "\n"
msg = msg + str(e)
self.fail(msg)
def test_sendintr (self):
try:
child = pexpect.spawn('python getch.py')
child.sendintr()
child.expect ('3\r\n')
except Exception, e:
msg = "Did not echo character value: 3\n"
msg = msg + str(e)
self.fail(msg)
def test_bad_sendcontrol_chars (self):
'''This tests that sendcontrol will return 0 for an unknown char. '''
child = pexpect.spawn('python getch.py')
retval = child.sendcontrol('1')
assert retval == 0, "sendcontrol() should have returned 0 because there is no such thing as ctrl-1."
def test_sendcontrol(self):
'''This tests that we can send all special control codes by name.
'''
child = pexpect.spawn('python getch.py')
#child.delaybeforesend = 0.1
for i in 'abcdefghijklmnopqrstuvwxyz':
child.sendcontrol(i)
child.expect ('[0-9]+\r\n')
#print child.after
child.sendcontrol('@')
child.expect ('0\r\n')
#print child.after
child.sendcontrol('[')
child.expect ('27\r\n')
#print child.after
child.sendcontrol('\\')
child.expect ('28\r\n')
#print child.after
child.sendcontrol(']')
child.expect ('29\r\n')
#print child.after
child.sendcontrol('^')
child.expect ('30\r\n')
#print child.after
child.sendcontrol('_')
child.expect ('31\r\n')
#print child.after
child.sendcontrol('?')
child.expect ('127\r\n')
#print child.after
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(TestCtrlChars,'test') | tests/test_ctrl_chars.py | import pexpect
import unittest
import PexpectTestCase
import time
import os
class TestCtrlChars(PexpectTestCase.PexpectTestCase):
def test_control_chars (self):
'''FIXME: Python unicode was too hard to figure out, so
this tests only the true ASCII characters. This is lame
and should be fixed. I'm leaving this script here as a
placeholder so that it will remind me to fix this one day.
This is what it used to do:
This tests that we can send all 256 8-bit ASCII characters
to a child process.'''
# FIXME: Getting this to support Python's Unicode was
# too hard, so I disabled this. I should fix this one day.
return 0
child = pexpect.spawn('python getch.py')
try:
for i in range(256):
# child.send(unicode('%d'%i, encoding='utf-8'))
child.send(chr(i))
child.expect ('%d\r\n' % i)
except Exception, e:
msg = "Did not echo character value: " + str(i) + "\n"
msg = msg + str(e)
self.fail(msg)
def test_sendintr (self):
try:
child = pexpect.spawn('python getch.py')
child.sendintr()
child.expect ('3\r\n')
except Exception, e:
msg = "Did not echo character value: 3\n"
msg = msg + str(e)
self.fail(msg)
def test_bad_sendcontrol_chars (self):
'''This tests that sendcontrol will return 0 for an unknown char. '''
child = pexpect.spawn('python getch.py')
retval = child.sendcontrol('1')
assert retval == 0, "sendcontrol() should have returned 0 because there is no such thing as ctrl-1."
def test_sendcontrol(self):
'''This tests that we can send all special control codes by name.
'''
child = pexpect.spawn('python getch.py')
#child.delaybeforesend = 0.1
for i in 'abcdefghijklmnopqrstuvwxyz':
child.sendcontrol(i)
child.expect ('[0-9]+\r\n')
#print child.after
child.sendcontrol('@')
child.expect ('0\r\n')
#print child.after
child.sendcontrol('[')
child.expect ('27\r\n')
#print child.after
child.sendcontrol('\\')
child.expect ('28\r\n')
#print child.after
child.sendcontrol(']')
child.expect ('29\r\n')
#print child.after
child.sendcontrol('^')
child.expect ('30\r\n')
#print child.after
child.sendcontrol('_')
child.expect ('31\r\n')
#print child.after
child.sendcontrol('?')
child.expect ('127\r\n')
#print child.after
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(TestCtrlChars,'test') | 0.168344 | 0.372962 |
import argparse
import json
import numpy as np
import paho.mqtt.client as mqtt
from PIL import Image
SAVED_IMAGE_DIR = 'images'
IMAGE_DATA_TOPIC = "image/data"
device_door_map = {
"web_61f3442604cb": "door1", # Samsung Galaxy
"web_4342e44ea8da": "door2", # Dorcas' iPhone
"web_40cf1dd6a603": "door2", # Laptop
}
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Successfully connected to broker.")
client.subscribe(IMAGE_DATA_TOPIC)
else:
print("Connection failed with code: %d." % rc)
def on_message(client, userdata, msg):
recv_dict = json.loads(msg.payload)
filename = recv_dict["filename"]
device = recv_dict["device"]
data = recv_dict["data"]
print("Received '%s' from %s. Size: %s." %
(filename, device, np.shape(data)))
if device not in device_door_map:
print("Error: unrecognised device")
return
door_id = device_door_map[device]
img_data = np.array(data).astype(np.uint8)
img = Image.fromarray(img_data)
img.save('%s/%s/%s' % (SAVED_IMAGE_DIR, door_id, filename))
def setup(hostname, username, password, tls=False):
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(username, password)
port = 1883
if tls:
client.tls_set()
port = 8883
client.connect(hostname, port=port)
client.loop_start()
return client
def main():
parser = argparse.ArgumentParser(
description='Run image classifying service.')
parser.add_argument('-u', '-username', dest='username', required=True,
help='username for connecting to MQTT broker')
parser.add_argument('-p', '-password', dest='password', required=True,
help='password for connecting to MQTT broker')
args = parser.parse_args()
setup("locksense.dorcastan.com", args.username, args.password, tls=True)
while True:
pass
if __name__ == '__main__':
main() | storage/image_storage.py | import argparse
import json
import numpy as np
import paho.mqtt.client as mqtt
from PIL import Image
SAVED_IMAGE_DIR = 'images'
IMAGE_DATA_TOPIC = "image/data"
device_door_map = {
"web_61f3442604cb": "door1", # Samsung Galaxy
"web_4342e44ea8da": "door2", # Dorcas' iPhone
"web_40cf1dd6a603": "door2", # Laptop
}
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Successfully connected to broker.")
client.subscribe(IMAGE_DATA_TOPIC)
else:
print("Connection failed with code: %d." % rc)
def on_message(client, userdata, msg):
recv_dict = json.loads(msg.payload)
filename = recv_dict["filename"]
device = recv_dict["device"]
data = recv_dict["data"]
print("Received '%s' from %s. Size: %s." %
(filename, device, np.shape(data)))
if device not in device_door_map:
print("Error: unrecognised device")
return
door_id = device_door_map[device]
img_data = np.array(data).astype(np.uint8)
img = Image.fromarray(img_data)
img.save('%s/%s/%s' % (SAVED_IMAGE_DIR, door_id, filename))
def setup(hostname, username, password, tls=False):
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(username, password)
port = 1883
if tls:
client.tls_set()
port = 8883
client.connect(hostname, port=port)
client.loop_start()
return client
def main():
parser = argparse.ArgumentParser(
description='Run image classifying service.')
parser.add_argument('-u', '-username', dest='username', required=True,
help='username for connecting to MQTT broker')
parser.add_argument('-p', '-password', dest='password', required=True,
help='password for connecting to MQTT broker')
args = parser.parse_args()
setup("locksense.dorcastan.com", args.username, args.password, tls=True)
while True:
pass
if __name__ == '__main__':
main() | 0.254972 | 0.07989 |
try :
from . import cudaext
except :
import sys
if sys.version_info[0] == 2 :
del cudaext
raise
import numpy as np
import weakref
from .native_qubit_processor import NativeQubitProcessor
from .native_qubit_states import NativeQubitStates
from .native_qubits_states_getter import NativeQubitsStatesGetter
from .native_sampling_pool import NativeSamplingPool
from . import glue
import sys
this = sys.modules[__name__]
# initialization flag.
this.initialized = False
# dictionary that holds native instances.
this.native_instances = weakref.WeakValueDictionary()
def set_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
if this.initialized :
raise RuntimeError('already initialized.')
this.max_po2idx_per_chunk = max_po2idx_per_chunk
this.device_ids = device_ids
this.memory_store_size = memory_store_size
def set_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
if this.initialized :
raise RuntimeError('already initialized.')
if len(device_ids) != 0 :
this.device_ids = device_ids
if max_po2idx_per_chunk != -1 :
this.max_po2idx_per_chunk = max_po2idx_per_chunk
if memory_store_size != -1 :
this.memory_store_size = memory_store_size
def reset_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
this.device_ids = []
this.max_po2idx_per_chunk = -1
this.memory_store_size = -1
def create_qubit_states(dtype) :
if not this.initialized :
module_init()
# create qubit_processor
qproc = NativeQubitProcessor(dtype, cudaext.qubit_processor_new(dtype))
this.native_instances[id(qproc)] = qproc
# create qubit states
ptr = cudaext.qubit_states_new(dtype)
qstates = NativeQubitStates(ptr, qproc)
this.native_instances[id(qstates)] = qstates
return qstates
def create_qubits_states_getter(dtype) :
ptr = cudaext.qubits_states_getter_new(dtype)
return CUDAQubitsStatesGetter(dtype, ptr)
class CUDAQubitsStatesGetter(NativeQubitsStatesGetter) :
def __init__(self, dtype, ptr) :
NativeQubitsStatesGetter.__init__(self, dtype, ptr)
def create_sampling_pool(self, qreg_ordering,
n_lanes, n_hidden_lanes, lane_trans, empty_lanes,
sampling_pool_factory = None) :
return self._create_sampling_pool(qreg_ordering, n_lanes, n_hidden_lanes, lane_trans,
empty_lanes, True, sampling_pool_factory)
def module_init() :
cudaext.devices_initialize(this.device_ids, this.max_po2idx_per_chunk, this.memory_store_size)
this.initialized = True
def module_finalize() :
instances = this.native_instances.values()
for ptr in instances :
ptr.delete()
if this.initialized :
cudaext.devices_clear()
this.initialized = False
import atexit
atexit.register(module_finalize)
# set default preference
this.reset_preference() | qgate/simulator/cudaruntime.py | try :
from . import cudaext
except :
import sys
if sys.version_info[0] == 2 :
del cudaext
raise
import numpy as np
import weakref
from .native_qubit_processor import NativeQubitProcessor
from .native_qubit_states import NativeQubitStates
from .native_qubits_states_getter import NativeQubitsStatesGetter
from .native_sampling_pool import NativeSamplingPool
from . import glue
import sys
this = sys.modules[__name__]
# initialization flag.
this.initialized = False
# dictionary that holds native instances.
this.native_instances = weakref.WeakValueDictionary()
def set_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
if this.initialized :
raise RuntimeError('already initialized.')
this.max_po2idx_per_chunk = max_po2idx_per_chunk
this.device_ids = device_ids
this.memory_store_size = memory_store_size
def set_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
if this.initialized :
raise RuntimeError('already initialized.')
if len(device_ids) != 0 :
this.device_ids = device_ids
if max_po2idx_per_chunk != -1 :
this.max_po2idx_per_chunk = max_po2idx_per_chunk
if memory_store_size != -1 :
this.memory_store_size = memory_store_size
def reset_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
this.device_ids = []
this.max_po2idx_per_chunk = -1
this.memory_store_size = -1
def create_qubit_states(dtype) :
if not this.initialized :
module_init()
# create qubit_processor
qproc = NativeQubitProcessor(dtype, cudaext.qubit_processor_new(dtype))
this.native_instances[id(qproc)] = qproc
# create qubit states
ptr = cudaext.qubit_states_new(dtype)
qstates = NativeQubitStates(ptr, qproc)
this.native_instances[id(qstates)] = qstates
return qstates
def create_qubits_states_getter(dtype) :
ptr = cudaext.qubits_states_getter_new(dtype)
return CUDAQubitsStatesGetter(dtype, ptr)
class CUDAQubitsStatesGetter(NativeQubitsStatesGetter) :
def __init__(self, dtype, ptr) :
NativeQubitsStatesGetter.__init__(self, dtype, ptr)
def create_sampling_pool(self, qreg_ordering,
n_lanes, n_hidden_lanes, lane_trans, empty_lanes,
sampling_pool_factory = None) :
return self._create_sampling_pool(qreg_ordering, n_lanes, n_hidden_lanes, lane_trans,
empty_lanes, True, sampling_pool_factory)
def module_init() :
cudaext.devices_initialize(this.device_ids, this.max_po2idx_per_chunk, this.memory_store_size)
this.initialized = True
def module_finalize() :
instances = this.native_instances.values()
for ptr in instances :
ptr.delete()
if this.initialized :
cudaext.devices_clear()
this.initialized = False
import atexit
atexit.register(module_finalize)
# set default preference
this.reset_preference() | 0.347316 | 0.220542 |
from __future__ import print_function
from pandas import option_context
from ..externals.colored import stylize, fg, attr
# Dictionary of term colors used for printing to terminal
fg_colors = {
'official_train': 'light_green',
'official_valid': 'light_blue',
'official_test': 'red',
'train': 'dark_sea_green_3b',
'valid': 'light_slate_blue',
'test': 'pink_1',
'title': 'gold_3b',
'warning': 'grey_46',
}
def print_title(str):
print(stylize(str, fg(fg_colors['title']) + attr('bold')))
def print_warning(str):
print(stylize(str, fg(fg_colors['warning'])))
def print_df_scores(df_scores, indent=''):
"""Pretty print the scores dataframe.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
indent : str, default=''
indentation if needed
"""
with option_context("display.width", 160):
df_repr = repr(df_scores)
df_repr_out = []
for line, color_key in zip(df_repr.splitlines(),
[None, None] +
list(df_scores.index.values)):
if line.strip() == 'step':
continue
if color_key is None:
# table header
line = stylize(line, fg(fg_colors['title']) + attr('bold'))
if color_key is not None:
tokens = line.split()
tokens_bak = tokens[:]
if 'official_' + color_key in fg_colors:
# line label and official score bold & bright
label_color = fg(fg_colors['official_' + color_key])
tokens[0] = stylize(tokens[0], label_color + attr('bold'))
tokens[1] = stylize(tokens[1], label_color + attr('bold'))
if color_key in fg_colors:
# other scores pale
tokens[2:] = [stylize(token, fg(fg_colors[color_key]))
for token in tokens[2:]]
for token_from, token_to in zip(tokens_bak, tokens):
line = line.replace(token_from, token_to)
line = indent + line
df_repr_out.append(line)
print('\n'.join(df_repr_out)) | rampwf/utils/pretty_print.py | from __future__ import print_function
from pandas import option_context
from ..externals.colored import stylize, fg, attr
# Dictionary of term colors used for printing to terminal
fg_colors = {
'official_train': 'light_green',
'official_valid': 'light_blue',
'official_test': 'red',
'train': 'dark_sea_green_3b',
'valid': 'light_slate_blue',
'test': 'pink_1',
'title': 'gold_3b',
'warning': 'grey_46',
}
def print_title(str):
print(stylize(str, fg(fg_colors['title']) + attr('bold')))
def print_warning(str):
print(stylize(str, fg(fg_colors['warning'])))
def print_df_scores(df_scores, indent=''):
"""Pretty print the scores dataframe.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
indent : str, default=''
indentation if needed
"""
with option_context("display.width", 160):
df_repr = repr(df_scores)
df_repr_out = []
for line, color_key in zip(df_repr.splitlines(),
[None, None] +
list(df_scores.index.values)):
if line.strip() == 'step':
continue
if color_key is None:
# table header
line = stylize(line, fg(fg_colors['title']) + attr('bold'))
if color_key is not None:
tokens = line.split()
tokens_bak = tokens[:]
if 'official_' + color_key in fg_colors:
# line label and official score bold & bright
label_color = fg(fg_colors['official_' + color_key])
tokens[0] = stylize(tokens[0], label_color + attr('bold'))
tokens[1] = stylize(tokens[1], label_color + attr('bold'))
if color_key in fg_colors:
# other scores pale
tokens[2:] = [stylize(token, fg(fg_colors[color_key]))
for token in tokens[2:]]
for token_from, token_to in zip(tokens_bak, tokens):
line = line.replace(token_from, token_to)
line = indent + line
df_repr_out.append(line)
print('\n'.join(df_repr_out)) | 0.658857 | 0.149252 |
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.learning_curve import validation_curve
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.tree import export_graphviz
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import SelectFpr
from sklearn.feature_selection import SelectFdr
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_classif
from sklearn.ensemble import ExtraTreesClassifier
def check_qm(df):
for n in list(df.columns):
# print (n)
qestmark_qty = sum([1 for i in list(df[n].str.find("?")) if i != -1])
if qestmark_qty == 0: continue
print ('column name is {name}'.format(name = n))
print ('question mark qty is {qty}'.format(qty = qestmark_qty))
def make_class_map(df, df_col_ls):
'''
MAKE class map for each str columns
=====================================
df_col_ls: list/ col name list
df: dataframe to be used to replae the question mark
cls_map_dict: dict/ connect column name with the class mapping
'''
cls_map_dict = dict()
for n in df_col_ls:
if df[n].dtype == 'int64': continue
# print (df[n])
# print (np.unique(df[n]))
temp_dict = dict()
for idx,label in enumerate(np.unique(df[n])):
if label != 'N/A': temp_dict[label] = idx
else: temp_dict[label] = -1
cls_map_dict[n] = temp_dict
# print (cls_map_dict)
return cls_map_dict
def do_class_map(df, df_col_ls, cls_map_dict):
'''
MAP the category into int with class map
'''
for n in df_col_ls:
if df[n].dtype == 'int64': continue
df[n] = df[n].map(cls_map_dict[n])
return df
class mushroom_ana:
def __init__(self, df):
self.raw_data = df
self.col_names = self.raw_data.columns
def _dp_remove_missing(self):
'''
input: dataframe/ rawdata
output:
1. deal with missing data
2. make the data split
'''
check_qm(self.raw_data)
def _dp_data_2split(self):
# turn the data into int/float
# split the data
cls_map_dict = make_class_map(self.raw_data, self.col_names)
self.raw_data = do_class_map(self.raw_data, self.col_names, cls_map_dict)
# print (cls_map_dict)
self.y = self.raw_data[self.col_names[0]].values
self.X = self.raw_data[self.col_names[1:]].values
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size = .3, random_state = 0)
def _feature_selection(self):
# --------- removing feature with low variance --------------
# var_thr = VarianceThreshold(threshold=(.8 * (1 - .8)))
# X_var_thr = var_thr.fit_transform(self.X)
# # print (self.X.shape)
# # print (X_var_thr.shape)
# # print ([self.col_names[1:][i] for i in var_thr.get_support(indices = True)])
# --------- univariate feature selection --------------
# # check different alg will impact the feature selection or not.
# sel_best = SelectKBest(chi2, k=8)
# sel_best_0 = SelectKBest(f_classif, k=8)
# sel_best_00 = SelectKBest(mutual_info_classif, k=8)
# X_sel_best = sel_best.fit_transform(self.X, self.y)
# X_sel_best_0 = sel_best_0.fit_transform(self.X, self.y)
# X_sel_best_00 = sel_best_00.fit_transform(self.X, self.y)
# print (X_sel_best.shape)
# print ([self.col_names[1:][i] for i in sel_best.get_support(indices = True)])
# print ([self.col_names[1:][i] for i in sel_best_0.get_support(indices = True)])
# print ([self.col_names[1:][i] for i in sel_best_00.get_support(indices = True)])
# sel_best_1 = SelectPercentile(chi2, percentile = 19)
# X_sel_best_1 = sel_best_1.fit_transform(self.X, self.y)
# print (X_sel_best_1.shape)
# print ([self.col_names[1:][i] for i in sel_best_1.get_support(indices = True)])
# --------- select From Model --------------
clf = ExtraTreesClassifier()
clf = clf.fit(self.X, self.y)
col_imp = {j:i for i in clf.feature_importances_ for j in self.col_names[1:]}
print (sorted(col_imp, key = col_imp.get)[:7])
model = SelectFromModel(clf, prefit = True)
X_new = model.transform(self.X)
def _paratune(self, alg, param_grid, score_name):
gs = GridSearchCV(estimator = alg, param_grid = param_grid, scoring = score_name, cv = 5)
gs = gs.fit(self.X, self.y)
# print (gs.best_score_)
# print (gs.best_params_)
def _learning_cur_plot(self, alg, param_name, param_range, score_name):
train_scores, test_scores = validation_curve(estimator = alg, X = self.X_train, y = self.y_train, param_name = param_name, param_range = param_range, cv = 5, scoring = score_name)
train_mean = np.mean(train_scores, axis = 1)
train_std = np.std(train_scores, axis = 1)
test_mean = np.mean(test_scores, axis = 1)
test_std = np.std(test_scores, axis = 1)
# print ('train_mean: ', train_mean)
# print ('train_std: ', train_std)
# print ('test_mean: ', test_mean)
# print ('test_std: ', test_std)
plt.plot(param_range, train_mean, color = 'blue', marker = 'o', markersize = 5, label = 'training {score_name}'.format(score_name = score_name))
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha = .15, color = 'blue')
plt.plot(param_range, test_mean, color = 'green', marker = 's', markersize = 5, linestyle = '--', label = 'validation {score_name}'.format(score_name = score_name))
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha = .15, color = 'green')
plt.grid()
plt.xscale('log')
plt.legend(loc = 'lower right')
plt.xlabel(param_name)
plt.ylabel('Accuracy')
# plt.ylim([.5, .7])
plt.show()
def decision_tree(self):
tree = DecisionTreeClassifier(random_state = 0)
param_range = ['entropy', 'gini' ]
depth_range = [3, 5, 7, 8, 9, 10, 11]
param_grid = {'criterion': param_range, 'max_depth': depth_range}
'''tune paramter'''
self._paratune(tree, param_grid, 'accuracy')
'''plot the learning curve'''
tree = DecisionTreeClassifier(random_state = 0, criterion = 'entropy')
self._learning_cur_plot(tree, 'max_depth', depth_range, "roc_auc")
tree = DecisionTreeClassifier(criterion = "entropy", random_state = 0, max_depth = 7)
tree.fit(self.X_train, self.y_train)
self.y_pred = tree.predict(self.X_test)
self.y_prob = tree.predict_proba(self.X_test)[:, 1]
# export_graphviz(tree, out_file = 'tree.dot', feature_names = self.col_names[1:])
def preci_scores(self):
return (precision_score(y_true = self.y_test, y_pred = self.y_pred))
def accuracy_scores(self):
return (accuracy_score(y_true = self.y_test, y_pred = self.y_pred))
def roc_auc_scores(self):
return (roc_auc_score(y_true = self.y_test, y_score = self.y_prob))
def test(self):
print (self.raw_data.shape)
if __name__ == '__main__':
df_mushroom = pd.read_csv('agaricus-lepiota.data', header = None)
name_col = ['class', 'cap-shape', 'cap-surface', 'cap-color', 'bruises?', 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color', 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring', 'stalk-surface-below-ring', 'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population', 'habitat']
df_mushroom.columns = name_col
m_ana = mushroom_ana(df_mushroom)
m_ana._dp_data_2split()
m_ana._feature_selection()
# m_ana.decision_tree()
# print ('precision score is: {precision_score: .3f}'.format(precision_score = m_ana.preci_scores()))
# print ('accuracy score is: {accuracy_score: .3f}'.format(accuracy_score = m_ana.accuracy_scores()))
# print ('score is:{roc_auc_score: .3f}'.format(roc_auc_score = m_ana.roc_auc_scores()))
'''
compare version:
- try the data with decision tree with/ without dealing with the missing data
- compare the accuracy
step 1: make a clear data.
- deal with the missing data
- turn the string into number if needed / only "stalk-root" has the question mark
- make the data split [done]
step 2: check all the algorithm
- use the algorithm
- tune the parameter [decison tree done/ ]
- check the learning curve [decision tree done/ ]
- ! check the accuracy [decision tree done/ ]
- ! try to find the important parameter
* lr
* svm
* decision tree [done]
* naive bayes (any other naive bayes could be used except the gaussion NB?)
a = [j for i in ['cat','dog','rabbit'] for j in i]
print (a)
''' | mushroom_wen.py | import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.learning_curve import validation_curve
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.tree import export_graphviz
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import SelectFpr
from sklearn.feature_selection import SelectFdr
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_classif
from sklearn.ensemble import ExtraTreesClassifier
def check_qm(df):
for n in list(df.columns):
# print (n)
qestmark_qty = sum([1 for i in list(df[n].str.find("?")) if i != -1])
if qestmark_qty == 0: continue
print ('column name is {name}'.format(name = n))
print ('question mark qty is {qty}'.format(qty = qestmark_qty))
def make_class_map(df, df_col_ls):
'''
MAKE class map for each str columns
=====================================
df_col_ls: list/ col name list
df: dataframe to be used to replae the question mark
cls_map_dict: dict/ connect column name with the class mapping
'''
cls_map_dict = dict()
for n in df_col_ls:
if df[n].dtype == 'int64': continue
# print (df[n])
# print (np.unique(df[n]))
temp_dict = dict()
for idx,label in enumerate(np.unique(df[n])):
if label != 'N/A': temp_dict[label] = idx
else: temp_dict[label] = -1
cls_map_dict[n] = temp_dict
# print (cls_map_dict)
return cls_map_dict
def do_class_map(df, df_col_ls, cls_map_dict):
'''
MAP the category into int with class map
'''
for n in df_col_ls:
if df[n].dtype == 'int64': continue
df[n] = df[n].map(cls_map_dict[n])
return df
class mushroom_ana:
def __init__(self, df):
self.raw_data = df
self.col_names = self.raw_data.columns
def _dp_remove_missing(self):
'''
input: dataframe/ rawdata
output:
1. deal with missing data
2. make the data split
'''
check_qm(self.raw_data)
def _dp_data_2split(self):
# turn the data into int/float
# split the data
cls_map_dict = make_class_map(self.raw_data, self.col_names)
self.raw_data = do_class_map(self.raw_data, self.col_names, cls_map_dict)
# print (cls_map_dict)
self.y = self.raw_data[self.col_names[0]].values
self.X = self.raw_data[self.col_names[1:]].values
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size = .3, random_state = 0)
def _feature_selection(self):
# --------- removing feature with low variance --------------
# var_thr = VarianceThreshold(threshold=(.8 * (1 - .8)))
# X_var_thr = var_thr.fit_transform(self.X)
# # print (self.X.shape)
# # print (X_var_thr.shape)
# # print ([self.col_names[1:][i] for i in var_thr.get_support(indices = True)])
# --------- univariate feature selection --------------
# # check different alg will impact the feature selection or not.
# sel_best = SelectKBest(chi2, k=8)
# sel_best_0 = SelectKBest(f_classif, k=8)
# sel_best_00 = SelectKBest(mutual_info_classif, k=8)
# X_sel_best = sel_best.fit_transform(self.X, self.y)
# X_sel_best_0 = sel_best_0.fit_transform(self.X, self.y)
# X_sel_best_00 = sel_best_00.fit_transform(self.X, self.y)
# print (X_sel_best.shape)
# print ([self.col_names[1:][i] for i in sel_best.get_support(indices = True)])
# print ([self.col_names[1:][i] for i in sel_best_0.get_support(indices = True)])
# print ([self.col_names[1:][i] for i in sel_best_00.get_support(indices = True)])
# sel_best_1 = SelectPercentile(chi2, percentile = 19)
# X_sel_best_1 = sel_best_1.fit_transform(self.X, self.y)
# print (X_sel_best_1.shape)
# print ([self.col_names[1:][i] for i in sel_best_1.get_support(indices = True)])
# --------- select From Model --------------
clf = ExtraTreesClassifier()
clf = clf.fit(self.X, self.y)
col_imp = {j:i for i in clf.feature_importances_ for j in self.col_names[1:]}
print (sorted(col_imp, key = col_imp.get)[:7])
model = SelectFromModel(clf, prefit = True)
X_new = model.transform(self.X)
def _paratune(self, alg, param_grid, score_name):
gs = GridSearchCV(estimator = alg, param_grid = param_grid, scoring = score_name, cv = 5)
gs = gs.fit(self.X, self.y)
# print (gs.best_score_)
# print (gs.best_params_)
def _learning_cur_plot(self, alg, param_name, param_range, score_name):
train_scores, test_scores = validation_curve(estimator = alg, X = self.X_train, y = self.y_train, param_name = param_name, param_range = param_range, cv = 5, scoring = score_name)
train_mean = np.mean(train_scores, axis = 1)
train_std = np.std(train_scores, axis = 1)
test_mean = np.mean(test_scores, axis = 1)
test_std = np.std(test_scores, axis = 1)
# print ('train_mean: ', train_mean)
# print ('train_std: ', train_std)
# print ('test_mean: ', test_mean)
# print ('test_std: ', test_std)
plt.plot(param_range, train_mean, color = 'blue', marker = 'o', markersize = 5, label = 'training {score_name}'.format(score_name = score_name))
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha = .15, color = 'blue')
plt.plot(param_range, test_mean, color = 'green', marker = 's', markersize = 5, linestyle = '--', label = 'validation {score_name}'.format(score_name = score_name))
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha = .15, color = 'green')
plt.grid()
plt.xscale('log')
plt.legend(loc = 'lower right')
plt.xlabel(param_name)
plt.ylabel('Accuracy')
# plt.ylim([.5, .7])
plt.show()
def decision_tree(self):
tree = DecisionTreeClassifier(random_state = 0)
param_range = ['entropy', 'gini' ]
depth_range = [3, 5, 7, 8, 9, 10, 11]
param_grid = {'criterion': param_range, 'max_depth': depth_range}
'''tune paramter'''
self._paratune(tree, param_grid, 'accuracy')
'''plot the learning curve'''
tree = DecisionTreeClassifier(random_state = 0, criterion = 'entropy')
self._learning_cur_plot(tree, 'max_depth', depth_range, "roc_auc")
tree = DecisionTreeClassifier(criterion = "entropy", random_state = 0, max_depth = 7)
tree.fit(self.X_train, self.y_train)
self.y_pred = tree.predict(self.X_test)
self.y_prob = tree.predict_proba(self.X_test)[:, 1]
# export_graphviz(tree, out_file = 'tree.dot', feature_names = self.col_names[1:])
def preci_scores(self):
return (precision_score(y_true = self.y_test, y_pred = self.y_pred))
def accuracy_scores(self):
return (accuracy_score(y_true = self.y_test, y_pred = self.y_pred))
def roc_auc_scores(self):
return (roc_auc_score(y_true = self.y_test, y_score = self.y_prob))
def test(self):
print (self.raw_data.shape)
if __name__ == '__main__':
df_mushroom = pd.read_csv('agaricus-lepiota.data', header = None)
name_col = ['class', 'cap-shape', 'cap-surface', 'cap-color', 'bruises?', 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color', 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring', 'stalk-surface-below-ring', 'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population', 'habitat']
df_mushroom.columns = name_col
m_ana = mushroom_ana(df_mushroom)
m_ana._dp_data_2split()
m_ana._feature_selection()
# m_ana.decision_tree()
# print ('precision score is: {precision_score: .3f}'.format(precision_score = m_ana.preci_scores()))
# print ('accuracy score is: {accuracy_score: .3f}'.format(accuracy_score = m_ana.accuracy_scores()))
# print ('score is:{roc_auc_score: .3f}'.format(roc_auc_score = m_ana.roc_auc_scores()))
'''
compare version:
- try the data with decision tree with/ without dealing with the missing data
- compare the accuracy
step 1: make a clear data.
- deal with the missing data
- turn the string into number if needed / only "stalk-root" has the question mark
- make the data split [done]
step 2: check all the algorithm
- use the algorithm
- tune the parameter [decison tree done/ ]
- check the learning curve [decision tree done/ ]
- ! check the accuracy [decision tree done/ ]
- ! try to find the important parameter
* lr
* svm
* decision tree [done]
* naive bayes (any other naive bayes could be used except the gaussion NB?)
a = [j for i in ['cat','dog','rabbit'] for j in i]
print (a)
''' | 0.377541 | 0.329904 |
SERVICES_TABLE_ID = 'central_services'
SERVICES_TABLE_ROWS_XPATH = '//table[@id="central_services"]//tbody/tr'
SERVICES_TABLE_ROW_CSS = '#central_services tbody tr'
SERVICE_ADD_BUTTON_ID = 'central_service_add'
SERVICE_EDIT_BUTTON_ID = 'central_service_details'
SERVICE_DELETE_BUTTON_ID = 'central_service_delete'
SERVICE_EDIT_DIALOG_CLEAR_BUTTON_ID = 'central_service_details_clear_search'
NEW_CENTRAL_SERVICE_DATA = [['CS_CODE', 'Test member 2', 'VERSION', 'Test member 2', '00000002', 'COM', 'Central monitoring client', False,
None, None, False],
[' CS_CODE ', ' TS1OWNER ', ' VERSION ', ' TS1 ', ' TS1OWNER ', 'GOV',
' Management Services ', False, None, None, True],
[256 * 'C', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'serviceCode', False],
['CS_CODE', 256 * 'C', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceCode', False],
['CS_CODE', 'CODE', 256 * 'V', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceVersion', False],
['CS_CODE', 'CODE', 'VERSION', 256 * 'P', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderName', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 256 * 'P', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 256 * 'S', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderSubsystem', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', '', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderClass', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', '', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', '', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderName', False],
['CS_CODE', '', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetServiceCode', False],
]
EDIT_CENTRAL_SERVICE_DATA = [['CS_CODE', 'TS1OWNER', 'VERSION', 'TS1', 'TS1OWNER', 'GOV', 'Management Services', False,
None, None, False],
[' CS_CODE ', ' TS1OWNER ', ' VERSION ', ' TS1 ', ' TS1OWNER ', 'GOV',
' Management Services ', False, None, None, True],
['CS_CODE', 256 * 'C', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceCode', False],
['CS_CODE', 'CODE', 256 * 'V', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceVersion', False],
['CS_CODE', 'CODE', 'VERSION', 256 * 'P', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderName', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 256 * 'P', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 256 * 'S', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderSubsystem', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', '', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderClass', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', '', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', '', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderName', False],
['CS_CODE', '', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetServiceCode', False],
]
CENTRAL_SERVICE = ['CS_CODE', 'TS1OWNER', 'VERSION', 'TS1', 'TS1OWNER', 'GOV', 'Management Services']
def get_central_service_text(text):
return "//table[@id='central_services']//td[text()='{0}']".format(text) | common/xrd-ui-tests-python/view_models/central_services.py | SERVICES_TABLE_ID = 'central_services'
SERVICES_TABLE_ROWS_XPATH = '//table[@id="central_services"]//tbody/tr'
SERVICES_TABLE_ROW_CSS = '#central_services tbody tr'
SERVICE_ADD_BUTTON_ID = 'central_service_add'
SERVICE_EDIT_BUTTON_ID = 'central_service_details'
SERVICE_DELETE_BUTTON_ID = 'central_service_delete'
SERVICE_EDIT_DIALOG_CLEAR_BUTTON_ID = 'central_service_details_clear_search'
NEW_CENTRAL_SERVICE_DATA = [['CS_CODE', 'Test member 2', 'VERSION', 'Test member 2', '00000002', 'COM', 'Central monitoring client', False,
None, None, False],
[' CS_CODE ', ' TS1OWNER ', ' VERSION ', ' TS1 ', ' TS1OWNER ', 'GOV',
' Management Services ', False, None, None, True],
[256 * 'C', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'serviceCode', False],
['CS_CODE', 256 * 'C', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceCode', False],
['CS_CODE', 'CODE', 256 * 'V', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceVersion', False],
['CS_CODE', 'CODE', 'VERSION', 256 * 'P', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderName', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 256 * 'P', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 256 * 'S', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderSubsystem', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', '', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderClass', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', '', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', '', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderName', False],
['CS_CODE', '', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetServiceCode', False],
]
EDIT_CENTRAL_SERVICE_DATA = [['CS_CODE', 'TS1OWNER', 'VERSION', 'TS1', 'TS1OWNER', 'GOV', 'Management Services', False,
None, None, False],
[' CS_CODE ', ' TS1OWNER ', ' VERSION ', ' TS1 ', ' TS1OWNER ', 'GOV',
' Management Services ', False, None, None, True],
['CS_CODE', 256 * 'C', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceCode', False],
['CS_CODE', 'CODE', 256 * 'V', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetServiceVersion', False],
['CS_CODE', 'CODE', 'VERSION', 256 * 'P', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderName', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 256 * 'P', 'GOV', 'SUBSYSTEM', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 256 * 'S', True,
"Parameter '{0}' input exceeds 255 characters", 'targetProviderSubsystem', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', 'P_CODE', '', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderClass', False],
['CS_CODE', 'CODE', 'VERSION', 'P_NAME', '', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderCode', False],
['CS_CODE', 'CODE', 'VERSION', '', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetProviderName', False],
['CS_CODE', '', 'VERSION', 'P_NAME', 'P_CODE', 'GOV', 'SUBSYSTEM', True,
'Missing parameter: {0}', 'targetServiceCode', False],
]
CENTRAL_SERVICE = ['CS_CODE', 'TS1OWNER', 'VERSION', 'TS1', 'TS1OWNER', 'GOV', 'Management Services']
def get_central_service_text(text):
return "//table[@id='central_services']//td[text()='{0}']".format(text) | 0.281801 | 0.112942 |
from random import random
from PyQt5.QtCore import QSize, Qt, QPoint
from PyQt5.QtGui import QColor, QBrush, QPen
from PyQt5.QtWidgets import (QLabel, QVBoxLayout, QTableWidget, QWidget, QHBoxLayout, QHeaderView,
QCheckBox, QTableWidgetItem, QComboBox, QStyledItemDelegate, QStyle)
from sdbcore.logger import Logger
from sdbgui.globalconfig import GlobalConfig
from sdbgui.icon import Icon
from sdbgui.movie import Movie
from sdbgui.resulttablecellwidget import ResultTableCellWidget
from sdbgui.tabstate import TabState
def make_unqiue_and_preserve_order(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def make_shared_name(input_field, reference_field):
return "%s (%s)" % (input_field, reference_field)
def make_stage_name(result):
# TODO: Handle the case when input_stage != reference_stage
return result["input_stage"] + " [" + result["intent"] + "]"
class BackgroundDelegate(QStyledItemDelegate):
""" Draw transparent background """
def __init__(self, parent):
super().__init__(parent)
def paint(self, painter, option, index):
background = index.data(Qt.BackgroundRole)
if isinstance(background, QBrush):
painter.fillRect(option.rect, background)
super().paint(painter, option, index)
if option.state & QStyle.State_Selected:
painter.save()
pen = QPen(Qt.black, 2, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin)
w = pen.width() / 2
painter.setPen(pen)
painter.drawRect(option.rect.adjusted(w, w, -w, -w))
painter.restore()
class ResultTableWidget(QWidget):
def __init__(self, resultwindow, stencil_field_mapper):
super().__init__(resultwindow)
# Data
self.__stencil_field_mapper = stencil_field_mapper
self.__draw_success_icons = True
self.__draw_failure_icons = True
self.__table_data = None
self.__current_cell_row = None
self.__current_cell_col = None
self.__last_cell_row = None
self.__last_cell_row = None
self.__current_invocation_count = 0
# Widgets
self.__widget_resultwindow = resultwindow
self.__widget_table = QTableWidget(self)
self.__currently_processing_custom_context_menu_request = False
self.__widget_label_title = QLabel("", parent=self)
self.__widget_label_invocation_count = QLabel("Invocation count: ", parent=self)
self.__widget_label_invocation_count.setStatusTip("Select the invocation of the stencil")
self.__widget_combobox_invocation_count = QComboBox(self)
self.__widget_combobox_invocation_count.currentIndexChanged.connect(
self.set_invocation_count)
self.__widget_checkbox_draw_success = QCheckBox(self)
self.__widget_checkbox_draw_success.setIcon(Icon("success.png"))
self.__widget_checkbox_draw_success.setChecked(True)
self.__widget_checkbox_draw_success.stateChanged[int].connect(self.set_draw_success)
self.__widget_checkbox_draw_success.setStatusTip("Show success icons")
self.__widget_checkbox_draw_failure = QCheckBox(self)
self.__widget_checkbox_draw_failure.setIcon(Icon("failure-small.png"))
self.__widget_checkbox_draw_failure.setChecked(True)
self.__widget_checkbox_draw_failure.stateChanged[int].connect(self.set_draw_failure)
self.__widget_checkbox_draw_failure.setStatusTip("Show failure icons")
self.__widget_label_result = QLabel("", parent=self)
self.__widget_label_result_icon = QLabel("", parent=self)
self.__widget_label_loading = QLabel("", parent=self)
vbox = QVBoxLayout()
hbox_top = QHBoxLayout()
hbox_top.addWidget(self.__widget_label_title)
hbox_top.addStretch(1)
hbox_top.addWidget(self.__widget_checkbox_draw_success)
hbox_top.addWidget(self.__widget_checkbox_draw_failure)
vbox.addLayout(hbox_top)
hbox_middle = QHBoxLayout()
hbox_middle.addWidget(self.__widget_label_invocation_count)
hbox_middle.addWidget(self.__widget_combobox_invocation_count)
hbox_middle.addStretch(1)
vbox.addLayout(hbox_middle)
vbox.addWidget(self.__widget_table)
hbox_bottom = QHBoxLayout()
hbox_bottom.addWidget(self.__widget_label_result)
hbox_bottom.addWidget(self.__widget_label_result_icon)
hbox_bottom.addStretch(1)
hbox_bottom.addWidget(self.__widget_label_loading)
vbox.addLayout(hbox_bottom)
self.setLayout(vbox)
def make_update(self):
Logger.info("Updating ResultTableWidget")
self.__comparison_result_list = self.__stencil_field_mapper.comparison_result_list
self.__widget_label_title.setText(
"<b>%s</b>" % self.__comparison_result_list.shared_stencil_name())
# Set current invocation count
if self.__current_invocation_count >= self.__comparison_result_list.invocation_count():
self.__current_invocation_count = 0
num_errors = 0
# Set invocation count widgets and compute errors
if self.__comparison_result_list.invocation_count() >= 1:
self.__widget_combobox_invocation_count.clear()
for i in range(self.__comparison_result_list.invocation_count()):
self.__widget_combobox_invocation_count.addItem("%i" % i)
for result in self.__comparison_result_list.results(i):
num_errors += not result["match"]
else:
# No comparison found, roll back
self.__widget_resultwindow.widget_mainwindow.popup_error_box(
"<b>No valid comparisons</b><br/>No valid comparison were "
"computed for the selected stencil pair.")
self.__widget_resultwindow.make_back()
self.__widget_label_invocation_count.setEnabled(
self.__comparison_result_list.invocation_count() > 1)
self.__widget_combobox_invocation_count.setEnabled(
self.__comparison_result_list.invocation_count() > 1)
# Update the table
self.update_table()
# Set bottom message and display a funny gif ;)
if num_errors != 0:
self.__widget_label_result_icon.clear()
self.__widget_label_result.setText(
"<b>%s error%s detected</b>" % (num_errors, "s" if num_errors > 1 else ""))
self.__widget_label_result.setStyleSheet("QLabel {color: #B72424}")
else:
if random() < 0.2:
rnd = random()
if rnd < 0.33:
movie = Movie("dance_1.gif")
movie.setScaledSize(QSize(21, 25))
elif rnd < 0.66:
movie = Movie("dance_2.gif")
movie.setScaledSize(QSize(42, 25))
else:
movie = Movie("dance_3.gif")
movie.setScaledSize(QSize(20, 25))
self.__widget_label_result_icon.setMovie(movie)
movie.start()
else:
self.__widget_label_result_icon.clear()
self.__widget_label_result.setText("<b>No errors detected! Hurray!</b>")
self.__widget_label_result.setStyleSheet("QLabel {color: #478E40}")
def set_invocation_count(self, idx):
if idx < 0:
self.__current_invocation_count = 0
else:
self.__current_invocation_count = int(
self.__widget_combobox_invocation_count.itemText(idx))
self.update_table()
def update_table(self):
# Compute stages and fields
stages = []
fields = []
fields_tooltip = []
num_errors = 0
first_error_cell = None
for result in self.__comparison_result_list.results(self.__current_invocation_count):
stages += [make_stage_name(result)]
input_field = result["input_field_name"]
reference_field = result["reference_field_name"]
if input_field == reference_field:
fields += [input_field]
fields_tooltip += ["Field: \"%s\"" % input_field]
else:
fields += [make_shared_name(input_field, reference_field)]
fields_tooltip += [
"Input field: \"%s\", Reference field: \"%s\"" % (input_field, reference_field)]
num_errors += not result["match"]
stages = make_unqiue_and_preserve_order(stages)
fields = make_unqiue_and_preserve_order(fields)
fields_tooltip = make_unqiue_and_preserve_order(fields_tooltip)
# Setup headers of table
rows = len(fields)
cols = len(stages)
self.__widget_table.setRowCount(rows)
self.__widget_table.setColumnCount(cols)
self.__table_data = [([None] * cols) for row in range(rows)]
self.__widget_table.setHorizontalHeaderLabels(stages)
self.__widget_table.setVerticalHeaderLabels(fields)
self.__widget_table.setStyleSheet(
'''
QTableWidget::item:selected:active {
background: #FFFFFF;
border-style: solid;
border-color: #D4D8DD;
border-width: 2px;
}
''')
for i in range(self.__widget_table.rowCount()):
item = self.__widget_table.verticalHeaderItem(i)
item.setToolTip(fields_tooltip[i])
self.__widget_table.horizontalHeader().resizeSections(QHeaderView.Stretch)
self.__widget_table.setEditTriggers(QTableWidget.NoEditTriggers)
self.__widget_table.setContextMenuPolicy(Qt.CustomContextMenu)
self.__widget_table.cellClicked[int, int].connect(self.cell_left_clicked)
self.__widget_table.customContextMenuRequested[QPoint].connect(self.cell_right_clicked)
# Populate table
for result in self.__comparison_result_list.results(self.__current_invocation_count):
stage_idx = stages.index(make_stage_name(result))
input_field_name = result["input_field_name"]
if input_field_name in fields:
field_idx = fields.index(input_field_name)
else:
field_idx = fields.index(
make_shared_name(input_field_name, result["reference_field_name"]))
# Widget
cell = ResultTableCellWidget(result["match"])
cell.set_icon(self.__draw_success_icons, self.__draw_failure_icons)
self.__widget_table.setCellWidget(field_idx, stage_idx, cell)
# Save the first error for selection
if not result["match"] and not first_error_cell:
first_error_cell = [field_idx, stage_idx]
# Item
cell_item = QTableWidgetItem("")
self.__widget_table.setItem(field_idx, stage_idx, cell_item)
# Data
self.__table_data[field_idx][stage_idx] = result
# Emulate "left" click on first error
if num_errors != 0:
self.__widget_table.setCurrentCell(first_error_cell[0], first_error_cell[1])
self.cell_left_clicked(first_error_cell[0], first_error_cell[1])
def set_draw_success(self, state):
self.__draw_success_icons = True if state == Qt.Checked else False
self.update_icons()
def set_draw_failure(self, state):
self.__draw_failure_icons = True if state == Qt.Checked else False
self.update_icons()
def update_icons(self):
for i in range(self.__widget_table.rowCount()):
for j in range(self.__widget_table.columnCount()):
if self.__widget_table.cellWidget(i, j):
self.__widget_table.cellWidget(i, j).set_icon(self.__draw_success_icons,
self.__draw_failure_icons)
def set_current_cell(self, item):
if item:
self.__current_cell_row = item.row()
self.__current_cell_col = item.column()
else:
self.__current_cell_row = None
self.__current_cell_col = None
def cell_right_clicked(self, point):
self.set_current_cell(self.__widget_table.itemAt(point))
self.try_switch_to_error_tab()
def cell_left_clicked(self, row, column):
self.set_current_cell(self.__widget_table.item(row, column))
def try_switch_to_error_tab(self):
Logger.info("Attempting to swtich to Error tab")
cur_row = self.__current_cell_row
cur_col = self.__current_cell_col
if cur_row is not None and cur_col is not None:
result_data = self.__table_data[cur_row][cur_col]
if not result_data["match"]:
mainwindow = self.__widget_resultwindow.widget_mainwindow
# Check if dimensions match, if not display an error message and abort
if not result_data.shapes_match():
# We only display the error message that the dimensions mismatch once. If we
# don't do this it will popup two error message.. don't ask me why :(
if self.__last_cell_row == cur_row and self.__last_cell_col == cur_col:
return False
self.__last_cell_row = cur_row
self.__last_cell_col = cur_col
errmsg = "<b>Dimension mismatch</b><br/>"
errmsg += "Input '%s': %s<br/>" % (
result_data["input_field_name"], result_data.input_shape)
errmsg += "Reference '%s': %s" % (
result_data["reference_field_name"], result_data.reference_shape)
mainwindow.popup_error_box(errmsg)
return False
else:
mainwindow.error_window_set_result_data(result_data)
mainwindow.switch_to_tab(TabState.Error)
self.__last_cell_row = self.__last_cell_col = None
return True
return False | src/serialbox-python/sdb/sdbgui/resulttablewidget.py |
from random import random
from PyQt5.QtCore import QSize, Qt, QPoint
from PyQt5.QtGui import QColor, QBrush, QPen
from PyQt5.QtWidgets import (QLabel, QVBoxLayout, QTableWidget, QWidget, QHBoxLayout, QHeaderView,
QCheckBox, QTableWidgetItem, QComboBox, QStyledItemDelegate, QStyle)
from sdbcore.logger import Logger
from sdbgui.globalconfig import GlobalConfig
from sdbgui.icon import Icon
from sdbgui.movie import Movie
from sdbgui.resulttablecellwidget import ResultTableCellWidget
from sdbgui.tabstate import TabState
def make_unqiue_and_preserve_order(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def make_shared_name(input_field, reference_field):
return "%s (%s)" % (input_field, reference_field)
def make_stage_name(result):
# TODO: Handle the case when input_stage != reference_stage
return result["input_stage"] + " [" + result["intent"] + "]"
class BackgroundDelegate(QStyledItemDelegate):
""" Draw transparent background """
def __init__(self, parent):
super().__init__(parent)
def paint(self, painter, option, index):
background = index.data(Qt.BackgroundRole)
if isinstance(background, QBrush):
painter.fillRect(option.rect, background)
super().paint(painter, option, index)
if option.state & QStyle.State_Selected:
painter.save()
pen = QPen(Qt.black, 2, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin)
w = pen.width() / 2
painter.setPen(pen)
painter.drawRect(option.rect.adjusted(w, w, -w, -w))
painter.restore()
class ResultTableWidget(QWidget):
def __init__(self, resultwindow, stencil_field_mapper):
super().__init__(resultwindow)
# Data
self.__stencil_field_mapper = stencil_field_mapper
self.__draw_success_icons = True
self.__draw_failure_icons = True
self.__table_data = None
self.__current_cell_row = None
self.__current_cell_col = None
self.__last_cell_row = None
self.__last_cell_row = None
self.__current_invocation_count = 0
# Widgets
self.__widget_resultwindow = resultwindow
self.__widget_table = QTableWidget(self)
self.__currently_processing_custom_context_menu_request = False
self.__widget_label_title = QLabel("", parent=self)
self.__widget_label_invocation_count = QLabel("Invocation count: ", parent=self)
self.__widget_label_invocation_count.setStatusTip("Select the invocation of the stencil")
self.__widget_combobox_invocation_count = QComboBox(self)
self.__widget_combobox_invocation_count.currentIndexChanged.connect(
self.set_invocation_count)
self.__widget_checkbox_draw_success = QCheckBox(self)
self.__widget_checkbox_draw_success.setIcon(Icon("success.png"))
self.__widget_checkbox_draw_success.setChecked(True)
self.__widget_checkbox_draw_success.stateChanged[int].connect(self.set_draw_success)
self.__widget_checkbox_draw_success.setStatusTip("Show success icons")
self.__widget_checkbox_draw_failure = QCheckBox(self)
self.__widget_checkbox_draw_failure.setIcon(Icon("failure-small.png"))
self.__widget_checkbox_draw_failure.setChecked(True)
self.__widget_checkbox_draw_failure.stateChanged[int].connect(self.set_draw_failure)
self.__widget_checkbox_draw_failure.setStatusTip("Show failure icons")
self.__widget_label_result = QLabel("", parent=self)
self.__widget_label_result_icon = QLabel("", parent=self)
self.__widget_label_loading = QLabel("", parent=self)
vbox = QVBoxLayout()
hbox_top = QHBoxLayout()
hbox_top.addWidget(self.__widget_label_title)
hbox_top.addStretch(1)
hbox_top.addWidget(self.__widget_checkbox_draw_success)
hbox_top.addWidget(self.__widget_checkbox_draw_failure)
vbox.addLayout(hbox_top)
hbox_middle = QHBoxLayout()
hbox_middle.addWidget(self.__widget_label_invocation_count)
hbox_middle.addWidget(self.__widget_combobox_invocation_count)
hbox_middle.addStretch(1)
vbox.addLayout(hbox_middle)
vbox.addWidget(self.__widget_table)
hbox_bottom = QHBoxLayout()
hbox_bottom.addWidget(self.__widget_label_result)
hbox_bottom.addWidget(self.__widget_label_result_icon)
hbox_bottom.addStretch(1)
hbox_bottom.addWidget(self.__widget_label_loading)
vbox.addLayout(hbox_bottom)
self.setLayout(vbox)
def make_update(self):
Logger.info("Updating ResultTableWidget")
self.__comparison_result_list = self.__stencil_field_mapper.comparison_result_list
self.__widget_label_title.setText(
"<b>%s</b>" % self.__comparison_result_list.shared_stencil_name())
# Set current invocation count
if self.__current_invocation_count >= self.__comparison_result_list.invocation_count():
self.__current_invocation_count = 0
num_errors = 0
# Set invocation count widgets and compute errors
if self.__comparison_result_list.invocation_count() >= 1:
self.__widget_combobox_invocation_count.clear()
for i in range(self.__comparison_result_list.invocation_count()):
self.__widget_combobox_invocation_count.addItem("%i" % i)
for result in self.__comparison_result_list.results(i):
num_errors += not result["match"]
else:
# No comparison found, roll back
self.__widget_resultwindow.widget_mainwindow.popup_error_box(
"<b>No valid comparisons</b><br/>No valid comparison were "
"computed for the selected stencil pair.")
self.__widget_resultwindow.make_back()
self.__widget_label_invocation_count.setEnabled(
self.__comparison_result_list.invocation_count() > 1)
self.__widget_combobox_invocation_count.setEnabled(
self.__comparison_result_list.invocation_count() > 1)
# Update the table
self.update_table()
# Set bottom message and display a funny gif ;)
if num_errors != 0:
self.__widget_label_result_icon.clear()
self.__widget_label_result.setText(
"<b>%s error%s detected</b>" % (num_errors, "s" if num_errors > 1 else ""))
self.__widget_label_result.setStyleSheet("QLabel {color: #B72424}")
else:
if random() < 0.2:
rnd = random()
if rnd < 0.33:
movie = Movie("dance_1.gif")
movie.setScaledSize(QSize(21, 25))
elif rnd < 0.66:
movie = Movie("dance_2.gif")
movie.setScaledSize(QSize(42, 25))
else:
movie = Movie("dance_3.gif")
movie.setScaledSize(QSize(20, 25))
self.__widget_label_result_icon.setMovie(movie)
movie.start()
else:
self.__widget_label_result_icon.clear()
self.__widget_label_result.setText("<b>No errors detected! Hurray!</b>")
self.__widget_label_result.setStyleSheet("QLabel {color: #478E40}")
def set_invocation_count(self, idx):
if idx < 0:
self.__current_invocation_count = 0
else:
self.__current_invocation_count = int(
self.__widget_combobox_invocation_count.itemText(idx))
self.update_table()
def update_table(self):
# Compute stages and fields
stages = []
fields = []
fields_tooltip = []
num_errors = 0
first_error_cell = None
for result in self.__comparison_result_list.results(self.__current_invocation_count):
stages += [make_stage_name(result)]
input_field = result["input_field_name"]
reference_field = result["reference_field_name"]
if input_field == reference_field:
fields += [input_field]
fields_tooltip += ["Field: \"%s\"" % input_field]
else:
fields += [make_shared_name(input_field, reference_field)]
fields_tooltip += [
"Input field: \"%s\", Reference field: \"%s\"" % (input_field, reference_field)]
num_errors += not result["match"]
stages = make_unqiue_and_preserve_order(stages)
fields = make_unqiue_and_preserve_order(fields)
fields_tooltip = make_unqiue_and_preserve_order(fields_tooltip)
# Setup headers of table
rows = len(fields)
cols = len(stages)
self.__widget_table.setRowCount(rows)
self.__widget_table.setColumnCount(cols)
self.__table_data = [([None] * cols) for row in range(rows)]
self.__widget_table.setHorizontalHeaderLabels(stages)
self.__widget_table.setVerticalHeaderLabels(fields)
self.__widget_table.setStyleSheet(
'''
QTableWidget::item:selected:active {
background: #FFFFFF;
border-style: solid;
border-color: #D4D8DD;
border-width: 2px;
}
''')
for i in range(self.__widget_table.rowCount()):
item = self.__widget_table.verticalHeaderItem(i)
item.setToolTip(fields_tooltip[i])
self.__widget_table.horizontalHeader().resizeSections(QHeaderView.Stretch)
self.__widget_table.setEditTriggers(QTableWidget.NoEditTriggers)
self.__widget_table.setContextMenuPolicy(Qt.CustomContextMenu)
self.__widget_table.cellClicked[int, int].connect(self.cell_left_clicked)
self.__widget_table.customContextMenuRequested[QPoint].connect(self.cell_right_clicked)
# Populate table
for result in self.__comparison_result_list.results(self.__current_invocation_count):
stage_idx = stages.index(make_stage_name(result))
input_field_name = result["input_field_name"]
if input_field_name in fields:
field_idx = fields.index(input_field_name)
else:
field_idx = fields.index(
make_shared_name(input_field_name, result["reference_field_name"]))
# Widget
cell = ResultTableCellWidget(result["match"])
cell.set_icon(self.__draw_success_icons, self.__draw_failure_icons)
self.__widget_table.setCellWidget(field_idx, stage_idx, cell)
# Save the first error for selection
if not result["match"] and not first_error_cell:
first_error_cell = [field_idx, stage_idx]
# Item
cell_item = QTableWidgetItem("")
self.__widget_table.setItem(field_idx, stage_idx, cell_item)
# Data
self.__table_data[field_idx][stage_idx] = result
# Emulate "left" click on first error
if num_errors != 0:
self.__widget_table.setCurrentCell(first_error_cell[0], first_error_cell[1])
self.cell_left_clicked(first_error_cell[0], first_error_cell[1])
def set_draw_success(self, state):
self.__draw_success_icons = True if state == Qt.Checked else False
self.update_icons()
def set_draw_failure(self, state):
self.__draw_failure_icons = True if state == Qt.Checked else False
self.update_icons()
def update_icons(self):
for i in range(self.__widget_table.rowCount()):
for j in range(self.__widget_table.columnCount()):
if self.__widget_table.cellWidget(i, j):
self.__widget_table.cellWidget(i, j).set_icon(self.__draw_success_icons,
self.__draw_failure_icons)
def set_current_cell(self, item):
if item:
self.__current_cell_row = item.row()
self.__current_cell_col = item.column()
else:
self.__current_cell_row = None
self.__current_cell_col = None
def cell_right_clicked(self, point):
self.set_current_cell(self.__widget_table.itemAt(point))
self.try_switch_to_error_tab()
def cell_left_clicked(self, row, column):
self.set_current_cell(self.__widget_table.item(row, column))
def try_switch_to_error_tab(self):
Logger.info("Attempting to swtich to Error tab")
cur_row = self.__current_cell_row
cur_col = self.__current_cell_col
if cur_row is not None and cur_col is not None:
result_data = self.__table_data[cur_row][cur_col]
if not result_data["match"]:
mainwindow = self.__widget_resultwindow.widget_mainwindow
# Check if dimensions match, if not display an error message and abort
if not result_data.shapes_match():
# We only display the error message that the dimensions mismatch once. If we
# don't do this it will popup two error message.. don't ask me why :(
if self.__last_cell_row == cur_row and self.__last_cell_col == cur_col:
return False
self.__last_cell_row = cur_row
self.__last_cell_col = cur_col
errmsg = "<b>Dimension mismatch</b><br/>"
errmsg += "Input '%s': %s<br/>" % (
result_data["input_field_name"], result_data.input_shape)
errmsg += "Reference '%s': %s" % (
result_data["reference_field_name"], result_data.reference_shape)
mainwindow.popup_error_box(errmsg)
return False
else:
mainwindow.error_window_set_result_data(result_data)
mainwindow.switch_to_tab(TabState.Error)
self.__last_cell_row = self.__last_cell_col = None
return True
return False | 0.334698 | 0.087058 |
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils.expressions import explain, explain_analyze
from tests import TestCase
class ExpressionTestCase(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
content = sa.Column(sa.UnicodeText)
self.Article = Article
def assert_startswith(self, query, query_part):
assert str(
query.compile(dialect=postgresql.dialect())
).startswith(query_part)
# Check that query executes properly
self.session.execute(query)
class TestExplain(ExpressionTestCase):
def test_render_explain(self):
self.assert_startswith(
explain(self.session.query(self.Article)),
'EXPLAIN SELECT'
)
def test_render_explain_with_analyze(self):
self.assert_startswith(
explain(self.session.query(self.Article), analyze=True),
'EXPLAIN (ANALYZE true) SELECT'
)
def test_with_string_as_stmt_param(self):
self.assert_startswith(
explain('SELECT 1 FROM article'),
'EXPLAIN SELECT'
)
def test_format(self):
self.assert_startswith(
explain('SELECT 1 FROM article', format='json'),
'EXPLAIN (FORMAT json) SELECT'
)
def test_timing(self):
self.assert_startswith(
explain('SELECT 1 FROM article', analyze=True, timing=False),
'EXPLAIN (ANALYZE true, TIMING false) SELECT'
)
def test_verbose(self):
self.assert_startswith(
explain('SELECT 1 FROM article', verbose=True),
'EXPLAIN (VERBOSE true) SELECT'
)
def test_buffers(self):
self.assert_startswith(
explain('SELECT 1 FROM article', analyze=True, buffers=True),
'EXPLAIN (ANALYZE true, BUFFERS true) SELECT'
)
def test_costs(self):
self.assert_startswith(
explain('SELECT 1 FROM article', costs=False),
'EXPLAIN (COSTS false) SELECT'
)
class TestExplainAnalyze(ExpressionTestCase):
def test_render_explain_analyze(self):
assert str(
explain_analyze(self.session.query(self.Article))
.compile(
dialect=postgresql.dialect()
)
).startswith('EXPLAIN (ANALYZE true) SELECT') | tests/test_expressions.py | import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils.expressions import explain, explain_analyze
from tests import TestCase
class ExpressionTestCase(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
content = sa.Column(sa.UnicodeText)
self.Article = Article
def assert_startswith(self, query, query_part):
assert str(
query.compile(dialect=postgresql.dialect())
).startswith(query_part)
# Check that query executes properly
self.session.execute(query)
class TestExplain(ExpressionTestCase):
def test_render_explain(self):
self.assert_startswith(
explain(self.session.query(self.Article)),
'EXPLAIN SELECT'
)
def test_render_explain_with_analyze(self):
self.assert_startswith(
explain(self.session.query(self.Article), analyze=True),
'EXPLAIN (ANALYZE true) SELECT'
)
def test_with_string_as_stmt_param(self):
self.assert_startswith(
explain('SELECT 1 FROM article'),
'EXPLAIN SELECT'
)
def test_format(self):
self.assert_startswith(
explain('SELECT 1 FROM article', format='json'),
'EXPLAIN (FORMAT json) SELECT'
)
def test_timing(self):
self.assert_startswith(
explain('SELECT 1 FROM article', analyze=True, timing=False),
'EXPLAIN (ANALYZE true, TIMING false) SELECT'
)
def test_verbose(self):
self.assert_startswith(
explain('SELECT 1 FROM article', verbose=True),
'EXPLAIN (VERBOSE true) SELECT'
)
def test_buffers(self):
self.assert_startswith(
explain('SELECT 1 FROM article', analyze=True, buffers=True),
'EXPLAIN (ANALYZE true, BUFFERS true) SELECT'
)
def test_costs(self):
self.assert_startswith(
explain('SELECT 1 FROM article', costs=False),
'EXPLAIN (COSTS false) SELECT'
)
class TestExplainAnalyze(ExpressionTestCase):
def test_render_explain_analyze(self):
assert str(
explain_analyze(self.session.query(self.Article))
.compile(
dialect=postgresql.dialect()
)
).startswith('EXPLAIN (ANALYZE true) SELECT') | 0.512693 | 0.457137 |
from __future__ import division
from collections import namedtuple
def _mixin_alpha(colors, alpha):
ratio = alpha / 255
return [int(round(color * ratio)) for color in colors]
class Color(object):
__slots__ = 'red', 'green', 'blue', 'alpha'
def __init__(self, red, green, blue, alpha=255):
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
def __str__(self):
return 'Color: r:%s, g:%s, b:%s, a:%s' % (self.red, self.green, self.blue, self.alpha)
def __repr__(self):
return '<%s>' % self
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __eq__(self, other):
return (
self.red == other.red and
self.green == other.green and
self.blue == other.blue and
self.alpha == other.alpha
)
@classmethod
def from_pixel(cls, pixel):
"""
Convert a pixel (list of 3-4 values) to a Color instance.
"""
assert len(pixel) in (3,4), "Color.from_pixel only supports 3 and 4 value pixels"
return cls(*map(int, list(pixel)))
@classmethod
def from_hexcode(cls, hexcode):
"""
Convert hexcode to RGB/RGBA.
"""
hexcode = hexcode.strip('#')
assert len(hexcode) in (3,4,6,8), "Hex codes must be 3, 4, 6 or 8 characters long"
if len(hexcode) in (3,4):
hexcode = ''.join(x*2 for x in hexcode)
return cls(*[int(''.join(x), 16) for x in zip(hexcode[::2], hexcode[1::2])])
def get_for_brightness(self, brightness):
"""
Brightness is a float between 0 and 1
"""
return Color(self.red, self.green, self.blue, int(round((self.alpha + 1) * brightness)) - 1)
def cover_with(self, cover_color):
"""
Mix the two colors respecting their alpha value.
Puts cover_color over itself compositing the colors using the alpha
values.
"""
# fastpath for solid colors
if cover_color.alpha == 255:
return Color(cover_color.red, cover_color.green, cover_color.blue, cover_color.alpha)
srca = cover_color.alpha / 255
dsta = self.alpha / 255
outa = srca + dsta * (1 - srca)
srcr, srcg, srcb = cover_color.red, cover_color.green, cover_color.blue
dstr, dstg, dstb = self.red, self.green, self.blue
outr = (srcr * srca + dstr * dsta * (1 - srca)) / outa
outg = (srcg * srca + dstg * dsta * (1 - srca)) / outa
outb = (srcb * srca + dstb * dsta * (1 - srca)) / outa
red = int(round(outr))
green = int(round(outg))
blue = int(round(outb))
alpha = int(round(outa * 255))
return Color(red, green, blue, alpha)
def to_pixel(self, pixelsize):
"""
Convert to pixel (list of 3-4 values)
"""
assert pixelsize in (3,4), "Color.to_pixel only supports 3 and 4 value pixels"
if pixelsize == 3:
return _mixin_alpha([self.red, self.green, self.blue], self.alpha)
else:
return [self.red, self.green, self.blue, self.alpha]
def to_hexcode(self):
"""
Convert to RGBA hexcode
"""
return ''.join(hex(x)[2:] for x in (self.red, self.green, self.blue, self.alpha))
ColorType = namedtuple('ColorType', 'length alpha')
RGB = ColorType(3, False)
RGBA = ColorType(4, True) | pymaging/colors.py | from __future__ import division
from collections import namedtuple
def _mixin_alpha(colors, alpha):
ratio = alpha / 255
return [int(round(color * ratio)) for color in colors]
class Color(object):
__slots__ = 'red', 'green', 'blue', 'alpha'
def __init__(self, red, green, blue, alpha=255):
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
def __str__(self):
return 'Color: r:%s, g:%s, b:%s, a:%s' % (self.red, self.green, self.blue, self.alpha)
def __repr__(self):
return '<%s>' % self
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __eq__(self, other):
return (
self.red == other.red and
self.green == other.green and
self.blue == other.blue and
self.alpha == other.alpha
)
@classmethod
def from_pixel(cls, pixel):
"""
Convert a pixel (list of 3-4 values) to a Color instance.
"""
assert len(pixel) in (3,4), "Color.from_pixel only supports 3 and 4 value pixels"
return cls(*map(int, list(pixel)))
@classmethod
def from_hexcode(cls, hexcode):
"""
Convert hexcode to RGB/RGBA.
"""
hexcode = hexcode.strip('#')
assert len(hexcode) in (3,4,6,8), "Hex codes must be 3, 4, 6 or 8 characters long"
if len(hexcode) in (3,4):
hexcode = ''.join(x*2 for x in hexcode)
return cls(*[int(''.join(x), 16) for x in zip(hexcode[::2], hexcode[1::2])])
def get_for_brightness(self, brightness):
"""
Brightness is a float between 0 and 1
"""
return Color(self.red, self.green, self.blue, int(round((self.alpha + 1) * brightness)) - 1)
def cover_with(self, cover_color):
"""
Mix the two colors respecting their alpha value.
Puts cover_color over itself compositing the colors using the alpha
values.
"""
# fastpath for solid colors
if cover_color.alpha == 255:
return Color(cover_color.red, cover_color.green, cover_color.blue, cover_color.alpha)
srca = cover_color.alpha / 255
dsta = self.alpha / 255
outa = srca + dsta * (1 - srca)
srcr, srcg, srcb = cover_color.red, cover_color.green, cover_color.blue
dstr, dstg, dstb = self.red, self.green, self.blue
outr = (srcr * srca + dstr * dsta * (1 - srca)) / outa
outg = (srcg * srca + dstg * dsta * (1 - srca)) / outa
outb = (srcb * srca + dstb * dsta * (1 - srca)) / outa
red = int(round(outr))
green = int(round(outg))
blue = int(round(outb))
alpha = int(round(outa * 255))
return Color(red, green, blue, alpha)
def to_pixel(self, pixelsize):
"""
Convert to pixel (list of 3-4 values)
"""
assert pixelsize in (3,4), "Color.to_pixel only supports 3 and 4 value pixels"
if pixelsize == 3:
return _mixin_alpha([self.red, self.green, self.blue], self.alpha)
else:
return [self.red, self.green, self.blue, self.alpha]
def to_hexcode(self):
"""
Convert to RGBA hexcode
"""
return ''.join(hex(x)[2:] for x in (self.red, self.green, self.blue, self.alpha))
ColorType = namedtuple('ColorType', 'length alpha')
RGB = ColorType(3, False)
RGBA = ColorType(4, True) | 0.918187 | 0.416915 |
import lx, modo, replay
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Saves the current Macro() object to the destination stored in its
`file_path` property. If `file_path` is `None`, prompt for a destination. Unlike
`replay.fileExport`, this command only supports saving to the LXM format."""
_path = lx.eval('query platformservice alias ? {scripts:untitled}')
def commander_arguments(self):
return [
{
'name': 'path',
'datatype': 'string',
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
# Stop recording
lx.eval('replay.record stop')
macro = replay.Macro()
file_path = None
file_format = macro.file_format
# If there is no associated file path try to get from command line or prompt the user for new destination
if file_path is None:
# Try to get the path from the command line:
file_path = self.commander_arg_value(0)
file_format = "lxm"
# Prompt the user
if not file_path:
file_path = modo.dialogs.customFile(
dtype = 'fileSave',
title = message("MECCO_REPLAY", "SAVE_DIALOG_TITLE"),
names = ('LXM',),
unames = ('LXM file',),
ext=('LXM',),
path = self._path
)
if file_path is None:
return
self.__class__._path = file_path
# And save it for the next time
macro.file_path = file_path
macro.render(file_format, file_path)
lx.eval('!!replay.fileClose')
lx.eval('replay.fileOpen {%s}' % file_path)
# Add to recently-opened
lx.eval('replay.fileOpenAddRecent {%s}' % file_path)
def basic_Enable(self, msg):
if replay.Macro().is_empty:
return False
return True
lx.bless(CommandClass, 'replay.fileSaveAs') | lxserv/replay_fileSaveAs.py |
import lx, modo, replay
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Saves the current Macro() object to the destination stored in its
`file_path` property. If `file_path` is `None`, prompt for a destination. Unlike
`replay.fileExport`, this command only supports saving to the LXM format."""
_path = lx.eval('query platformservice alias ? {scripts:untitled}')
def commander_arguments(self):
return [
{
'name': 'path',
'datatype': 'string',
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
# Stop recording
lx.eval('replay.record stop')
macro = replay.Macro()
file_path = None
file_format = macro.file_format
# If there is no associated file path try to get from command line or prompt the user for new destination
if file_path is None:
# Try to get the path from the command line:
file_path = self.commander_arg_value(0)
file_format = "lxm"
# Prompt the user
if not file_path:
file_path = modo.dialogs.customFile(
dtype = 'fileSave',
title = message("MECCO_REPLAY", "SAVE_DIALOG_TITLE"),
names = ('LXM',),
unames = ('LXM file',),
ext=('LXM',),
path = self._path
)
if file_path is None:
return
self.__class__._path = file_path
# And save it for the next time
macro.file_path = file_path
macro.render(file_format, file_path)
lx.eval('!!replay.fileClose')
lx.eval('replay.fileOpen {%s}' % file_path)
# Add to recently-opened
lx.eval('replay.fileOpenAddRecent {%s}' % file_path)
def basic_Enable(self, msg):
if replay.Macro().is_empty:
return False
return True
lx.bless(CommandClass, 'replay.fileSaveAs') | 0.620507 | 0.373362 |
from pyspark import SparkContext, keyword_only
from pyspark.ml.common import _java2py
from pyspark.ml.param import Param
from pyspark.ml.param.shared import HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol, HasCheckpointInterval
from pyspark.ml.util import JavaMLWritable, JavaPredictionModel
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from sparkxgb.util import XGBoostReadable
class JavaParamsOverrides(object):
"""
Mixin for overriding methods derived from JavaParams.
"""
# Define a fix similar to SPARK-10931 (For Spark <2.3)
def _create_params_from_java(self):
"""
Create params that are defined in the Java obj but not here
"""
java_params = list(self._java_obj.params())
from pyspark.ml.param import Param
for java_param in java_params:
java_param_name = java_param.name()
if not hasattr(self, java_param_name):
param = Param(self, java_param_name, java_param.doc())
setattr(param, "created_from_java_param", True)
setattr(self, java_param_name, param)
self._params = None # need to reset so self.params will discover new params
# Backport SPARK-10931 (For Spark <2.3)
def _transfer_params_from_java(self):
"""
Transforms the embedded params from the companion Java object.
"""
sc = SparkContext._active_spark_context
for param in self.params:
if self._java_obj.hasParam(param.name):
java_param = self._java_obj.getParam(param.name)
# SPARK-14931: Only check set params back to avoid default params mismatch.
if self._java_obj.isSet(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._set(**{param.name: value})
# SPARK-10931: Temporary fix for params that have a default in Java
if self._java_obj.hasDefault(java_param) and not self.isDefined(param):
value = _java2py(sc, self._java_obj.getDefault(java_param)).get()
self._setDefault(**{param.name: value})
# Override the "_from_java" method, so we can read our objects.
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java object, create and return a Python wrapper of it.
"""
# Create a new instance of this stage.
py_stage = cls()
# Load information from java_stage to the instance.
py_stage._java_obj = java_stage
py_stage._create_params_from_java()
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
return py_stage
class XGBoostEstimator(JavaParamsOverrides, JavaEstimator, HasCheckpointInterval, HasFeaturesCol, HasLabelCol,
HasPredictionCol, HasWeightCol, JavaMLWritable, XGBoostReadable):
"""
A PySpark implementation of ml.dmlc.xgboost4j.scala.spark.XGBoostEstimator.
"""
@keyword_only
def __init__(self,
# General Params
checkpoint_path="", checkpointInterval=-1, missing=None, nthread=1, nworkers=1, silent=0,
use_external_memory=False,
# Column Params
baseMarginCol="baseMargin", featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol="weight",
# Booster Params
base_score=0.5, booster="gbtree", eval_metric="error", num_class=2, num_round=2,
objective="binary:logistic", seed=None,
# Tree Booster Params
alpha=0.0, colsample_bytree=1.0, colsample_bylevel=1.0, eta=0.3, gamma=0.0, grow_policy='depthwise',
max_bin=256, max_delta_step=0.0, max_depth=6, min_child_weight=1.0, reg_lambda=0.0,
scale_pos_weight=1.0, sketch_eps=0.03, subsample=1.0, tree_method="auto",
# Dart Booster Params
normalize_type="tree", rate_drop=0.0, sample_type="uniform", skip_drop=0.0,
# Linear Booster Params
lambda_bias=0.0):
super(XGBoostEstimator, self).__init__()
self._java_obj = self._new_java_obj("ml.dmlc.xgboost4j.scala.spark.XGBoostEstimator", self.uid)
self._create_params_from_java()
self._setDefault(
# Column Params
featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol="weight", baseMarginCol="baseMargin",
# Booster Params
objective="binary:logistic", eval_metric="error", num_round=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self,
# General Params
checkpoint_path="", checkpointInterval=-1, missing=None, nthread=1, nworkers=1, silent=0,
use_external_memory=False,
# Column Params
baseMarginCol="baseMargin", featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol="weight",
# Booster Params
base_score=0.5, booster="gbtree", eval_metric="error", num_class=2, num_round=2,
objective="binary:logistic", seed=None,
# Tree Booster Params
alpha=0.0, colsample_bytree=1.0, colsample_bylevel=1.0, eta=0.3, gamma=0.0, grow_policy='depthwise',
max_bin=256, max_delta_step=0.0, max_depth=6, min_child_weight=1.0, reg_lambda=0.0,
scale_pos_weight=1.0, sketch_eps=0.03, subsample=1.0, tree_method="auto",
# Dart Booster Params
normalize_type="tree", rate_drop=0.0, sample_type="uniform", skip_drop=0.0,
# Linear Booster Params
lambda_bias=0.0):
kwargs = self._input_kwargs_processed()
return self._set(**kwargs)
def _input_kwargs_processed(self):
"""
Until consensus on parameter names can be achieved, we must rename kwargs which would break python.
"""
kwargs = self._input_kwargs
if "reg_lambda" in kwargs:
kwargs["lambda"] = kwargs.pop("reg_lambda")
return kwargs
def _create_model(self, java_model):
"""
Create the correct python object for the model type.
"""
java_package = java_model.getClass().getName()
java_class = java_package.split('.')[-1]
if java_class == 'XGBoostClassificationModel':
return XGBoostClassificationModel(java_model)
elif java_class == 'XGBoostRegressionModel':
return XGBoostRegressionModel(java_model)
else:
raise NotImplementedError("This XGBoost model type cannot loaded into Python currently: %r"
% java_class)
class XGBoostClassificationModel(JavaParamsOverrides, JavaModel, JavaPredictionModel, JavaMLWritable, XGBoostReadable):
"""
A PySpark implementation of ml.dmlc.xgboost4j.scala.spark.XGBoostClassificationModel.
"""
def __init__(self, java_model=None):
"""
Override the __init__ from JavaModel.
"""
super(XGBoostClassificationModel, self).__init__(java_model)
if java_model is not None:
# Get parameters only present in the model object.
self._create_params_from_java()
self._resetUid(java_model.uid())
# Transfer parameter values from java object.
self._transfer_params_from_java()
@property
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
def getThresholds(self):
"""
Gets the value of thresholds or its default value.
"""
return self.getOrDefault(self.thresholds)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def getRawPredictionCol(self):
"""
Gets the value of rawPredictionCol or its default value.
"""
return self.getOrDefault(self.rawPredictionCol)
class XGBoostRegressionModel(JavaParamsOverrides, JavaModel, JavaPredictionModel, JavaMLWritable, XGBoostReadable):
"""
A PySpark implementation of ml.dmlc.xgboost4j.scala.spark.XGBoostRegressionModel.
"""
def __init__(self, java_model=None):
"""
Override the __init__ from JavaModel.
"""
super(XGBoostRegressionModel, self).__init__(java_model)
if java_model is not None:
# Get parameters only present in the model object.
self._create_params_from_java()
self._resetUid(java_model.uid())
# Transfer parameter values from java object.
self._transfer_params_from_java() | docker/bdse_pyspark/main/module/sparkxgb/xgboost.py |
from pyspark import SparkContext, keyword_only
from pyspark.ml.common import _java2py
from pyspark.ml.param import Param
from pyspark.ml.param.shared import HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol, HasCheckpointInterval
from pyspark.ml.util import JavaMLWritable, JavaPredictionModel
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from sparkxgb.util import XGBoostReadable
class JavaParamsOverrides(object):
"""
Mixin for overriding methods derived from JavaParams.
"""
# Define a fix similar to SPARK-10931 (For Spark <2.3)
def _create_params_from_java(self):
"""
Create params that are defined in the Java obj but not here
"""
java_params = list(self._java_obj.params())
from pyspark.ml.param import Param
for java_param in java_params:
java_param_name = java_param.name()
if not hasattr(self, java_param_name):
param = Param(self, java_param_name, java_param.doc())
setattr(param, "created_from_java_param", True)
setattr(self, java_param_name, param)
self._params = None # need to reset so self.params will discover new params
# Backport SPARK-10931 (For Spark <2.3)
def _transfer_params_from_java(self):
"""
Transforms the embedded params from the companion Java object.
"""
sc = SparkContext._active_spark_context
for param in self.params:
if self._java_obj.hasParam(param.name):
java_param = self._java_obj.getParam(param.name)
# SPARK-14931: Only check set params back to avoid default params mismatch.
if self._java_obj.isSet(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._set(**{param.name: value})
# SPARK-10931: Temporary fix for params that have a default in Java
if self._java_obj.hasDefault(java_param) and not self.isDefined(param):
value = _java2py(sc, self._java_obj.getDefault(java_param)).get()
self._setDefault(**{param.name: value})
# Override the "_from_java" method, so we can read our objects.
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java object, create and return a Python wrapper of it.
"""
# Create a new instance of this stage.
py_stage = cls()
# Load information from java_stage to the instance.
py_stage._java_obj = java_stage
py_stage._create_params_from_java()
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
return py_stage
class XGBoostEstimator(JavaParamsOverrides, JavaEstimator, HasCheckpointInterval, HasFeaturesCol, HasLabelCol,
HasPredictionCol, HasWeightCol, JavaMLWritable, XGBoostReadable):
"""
A PySpark implementation of ml.dmlc.xgboost4j.scala.spark.XGBoostEstimator.
"""
@keyword_only
def __init__(self,
# General Params
checkpoint_path="", checkpointInterval=-1, missing=None, nthread=1, nworkers=1, silent=0,
use_external_memory=False,
# Column Params
baseMarginCol="baseMargin", featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol="weight",
# Booster Params
base_score=0.5, booster="gbtree", eval_metric="error", num_class=2, num_round=2,
objective="binary:logistic", seed=None,
# Tree Booster Params
alpha=0.0, colsample_bytree=1.0, colsample_bylevel=1.0, eta=0.3, gamma=0.0, grow_policy='depthwise',
max_bin=256, max_delta_step=0.0, max_depth=6, min_child_weight=1.0, reg_lambda=0.0,
scale_pos_weight=1.0, sketch_eps=0.03, subsample=1.0, tree_method="auto",
# Dart Booster Params
normalize_type="tree", rate_drop=0.0, sample_type="uniform", skip_drop=0.0,
# Linear Booster Params
lambda_bias=0.0):
super(XGBoostEstimator, self).__init__()
self._java_obj = self._new_java_obj("ml.dmlc.xgboost4j.scala.spark.XGBoostEstimator", self.uid)
self._create_params_from_java()
self._setDefault(
# Column Params
featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol="weight", baseMarginCol="baseMargin",
# Booster Params
objective="binary:logistic", eval_metric="error", num_round=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self,
# General Params
checkpoint_path="", checkpointInterval=-1, missing=None, nthread=1, nworkers=1, silent=0,
use_external_memory=False,
# Column Params
baseMarginCol="baseMargin", featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol="weight",
# Booster Params
base_score=0.5, booster="gbtree", eval_metric="error", num_class=2, num_round=2,
objective="binary:logistic", seed=None,
# Tree Booster Params
alpha=0.0, colsample_bytree=1.0, colsample_bylevel=1.0, eta=0.3, gamma=0.0, grow_policy='depthwise',
max_bin=256, max_delta_step=0.0, max_depth=6, min_child_weight=1.0, reg_lambda=0.0,
scale_pos_weight=1.0, sketch_eps=0.03, subsample=1.0, tree_method="auto",
# Dart Booster Params
normalize_type="tree", rate_drop=0.0, sample_type="uniform", skip_drop=0.0,
# Linear Booster Params
lambda_bias=0.0):
kwargs = self._input_kwargs_processed()
return self._set(**kwargs)
def _input_kwargs_processed(self):
"""
Until consensus on parameter names can be achieved, we must rename kwargs which would break python.
"""
kwargs = self._input_kwargs
if "reg_lambda" in kwargs:
kwargs["lambda"] = kwargs.pop("reg_lambda")
return kwargs
def _create_model(self, java_model):
"""
Create the correct python object for the model type.
"""
java_package = java_model.getClass().getName()
java_class = java_package.split('.')[-1]
if java_class == 'XGBoostClassificationModel':
return XGBoostClassificationModel(java_model)
elif java_class == 'XGBoostRegressionModel':
return XGBoostRegressionModel(java_model)
else:
raise NotImplementedError("This XGBoost model type cannot loaded into Python currently: %r"
% java_class)
class XGBoostClassificationModel(JavaParamsOverrides, JavaModel, JavaPredictionModel, JavaMLWritable, XGBoostReadable):
"""
A PySpark implementation of ml.dmlc.xgboost4j.scala.spark.XGBoostClassificationModel.
"""
def __init__(self, java_model=None):
"""
Override the __init__ from JavaModel.
"""
super(XGBoostClassificationModel, self).__init__(java_model)
if java_model is not None:
# Get parameters only present in the model object.
self._create_params_from_java()
self._resetUid(java_model.uid())
# Transfer parameter values from java object.
self._transfer_params_from_java()
@property
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
def getThresholds(self):
"""
Gets the value of thresholds or its default value.
"""
return self.getOrDefault(self.thresholds)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def getRawPredictionCol(self):
"""
Gets the value of rawPredictionCol or its default value.
"""
return self.getOrDefault(self.rawPredictionCol)
class XGBoostRegressionModel(JavaParamsOverrides, JavaModel, JavaPredictionModel, JavaMLWritable, XGBoostReadable):
"""
A PySpark implementation of ml.dmlc.xgboost4j.scala.spark.XGBoostRegressionModel.
"""
def __init__(self, java_model=None):
"""
Override the __init__ from JavaModel.
"""
super(XGBoostRegressionModel, self).__init__(java_model)
if java_model is not None:
# Get parameters only present in the model object.
self._create_params_from_java()
self._resetUid(java_model.uid())
# Transfer parameter values from java object.
self._transfer_params_from_java() | 0.906146 | 0.301426 |
from django.core.management.base import BaseCommand, CommandError
from gwasdb.models import Phenotype
import requests
class Command(BaseCommand):
help = 'Index AraPheno phenotypes in AraGWASCatalog'
def add_arguments(self, parser):
parser.add_argument('--id',
dest='phenotype_id',
type=int,
default=None,
help='Specify a primary key to index a specific phenotype. If empty will check entire phenotype list.')
parser.add_argument('--update',
dest='update',
type=bool,
default=False,
help='Update existing phenotypes.')
def handle(self, *args, **options):
phenotype_id = options.get('phenotype_id', None)
update = options.get('update', False)
try:
if phenotype_id:
r = requests.get('https://arapheno.1001genomes.org/rest/phenotype/list.json')
phenos_arapheno = [r.json()]
else:
# Retrieve list of all phenotypes from AraPheno:
r = requests.get('https://arapheno.1001genomes.org/rest/phenotype/list.json')
phenos_arapheno = r.json()
# check if phenotypes are stored in AraGWASCatalog
ids_aragwas = Phenotype.objects.all().values_list('id', flat=True)
counter = 0
for pheno in phenos_arapheno:
if pheno['phenotype_id'] not in ids_aragwas or update:
# Add to table:
p = Phenotype(pk=pheno['phenotype_id'], name=pheno['name'], study_name=pheno['study'], description=pheno['scoring'], date=pheno['integration_date'], arapheno_link="https://arapheno.1001genomes.org/phenotype/"+str(pheno['phenotype_id']), trait_ontology_id=pheno['to_term'] if pheno['to_term'] is not None else "", trait_ontology_name=pheno['to_name'] if pheno['to_name'] is not None else "", trait_ontology_description=pheno['to_definition'])
p.save()
counter += 1
# else:
# # add ontology information (this line will be removed after one call...
# p = Phenotype.objects.get(pk=pheno['phenotype_id'])
# p.trait_ontology_id = pheno['to_term'] if pheno['to_term'] is not None else ""
# p.trait_ontology_name = pheno['to_name'] if pheno['to_name'] is not None else ""
# p.trait_ontology_description=pheno['to_definition']
# p.save()
# counter += 1
print(str(counter) + ' new phenotype(s) added to the database.')
except Exception as err:
raise CommandError(
'Error saving phenotypes. Reason: %s' % str(err)) | aragwas_server/gwasdb/management/commands/import_phenotypes.py | from django.core.management.base import BaseCommand, CommandError
from gwasdb.models import Phenotype
import requests
class Command(BaseCommand):
help = 'Index AraPheno phenotypes in AraGWASCatalog'
def add_arguments(self, parser):
parser.add_argument('--id',
dest='phenotype_id',
type=int,
default=None,
help='Specify a primary key to index a specific phenotype. If empty will check entire phenotype list.')
parser.add_argument('--update',
dest='update',
type=bool,
default=False,
help='Update existing phenotypes.')
def handle(self, *args, **options):
phenotype_id = options.get('phenotype_id', None)
update = options.get('update', False)
try:
if phenotype_id:
r = requests.get('https://arapheno.1001genomes.org/rest/phenotype/list.json')
phenos_arapheno = [r.json()]
else:
# Retrieve list of all phenotypes from AraPheno:
r = requests.get('https://arapheno.1001genomes.org/rest/phenotype/list.json')
phenos_arapheno = r.json()
# check if phenotypes are stored in AraGWASCatalog
ids_aragwas = Phenotype.objects.all().values_list('id', flat=True)
counter = 0
for pheno in phenos_arapheno:
if pheno['phenotype_id'] not in ids_aragwas or update:
# Add to table:
p = Phenotype(pk=pheno['phenotype_id'], name=pheno['name'], study_name=pheno['study'], description=pheno['scoring'], date=pheno['integration_date'], arapheno_link="https://arapheno.1001genomes.org/phenotype/"+str(pheno['phenotype_id']), trait_ontology_id=pheno['to_term'] if pheno['to_term'] is not None else "", trait_ontology_name=pheno['to_name'] if pheno['to_name'] is not None else "", trait_ontology_description=pheno['to_definition'])
p.save()
counter += 1
# else:
# # add ontology information (this line will be removed after one call...
# p = Phenotype.objects.get(pk=pheno['phenotype_id'])
# p.trait_ontology_id = pheno['to_term'] if pheno['to_term'] is not None else ""
# p.trait_ontology_name = pheno['to_name'] if pheno['to_name'] is not None else ""
# p.trait_ontology_description=pheno['to_definition']
# p.save()
# counter += 1
print(str(counter) + ' new phenotype(s) added to the database.')
except Exception as err:
raise CommandError(
'Error saving phenotypes. Reason: %s' % str(err)) | 0.223462 | 0.063222 |
from pycket import config
from pycket import values, values_string
from pycket.base import SingletonMeta, UnhashableType
from pycket.hash.base import W_HashTable, get_dict_item, next_valid_index, w_missing
from pycket.error import SchemeException
from pycket.cont import continuation, loop_label
from rpython.rlib import rerased, jit
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.rlib.objectmodel import compute_hash, import_from_mixin, r_dict, specialize
import sys
def elidable_iff(pred):
def wrapper(func):
@jit.elidable
def elidable(*args):
return func(*args)
def inner(*args):
if jit.we_are_jitted() and pred(*args):
return elidable(*args)
return func(*args)
return inner
return wrapper
@loop_label
def equal_hash_ref_loop(data, idx, key, env, cont):
from pycket.interpreter import return_value
from pycket.prims.equal import equal_func_unroll_n, EqualInfo
if idx >= len(data):
return return_value(w_missing, env, cont)
k, v = data[idx]
info = EqualInfo.BASIC_SINGLETON
cont = catch_ref_is_equal_cont(data, idx, key, v, env, cont)
return equal_func_unroll_n(k, key, info, env, cont, 5)
@continuation
def catch_ref_is_equal_cont(data, idx, key, v, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
if val is not values.w_false:
return return_value(v, env, cont)
return equal_hash_ref_loop(data, idx + 1, key, env, cont)
def equal_hash_set_loop(data, idx, key, val, env, cont):
from pycket.interpreter import check_one_val, return_value
from pycket.prims.equal import equal_func, EqualInfo
if idx >= len(data):
data.append((key, val))
return return_value(values.w_void, env, cont)
k, _ = data[idx]
info = EqualInfo.BASIC_SINGLETON
return equal_func(k, key, info, env,
catch_set_is_equal_cont(data, idx, key, val, env, cont))
@continuation
def catch_set_is_equal_cont(data, idx, key, val, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
cmp = check_one_val(_vals)
if cmp is not values.w_false:
data[idx] = (key, val)
return return_value(values.w_void, env, cont)
return equal_hash_set_loop(data, idx + 1, key, val, env, cont)
class HashmapStrategy(object):
__metaclass__ = SingletonMeta
def get(self, w_dict, w_key, env, cont):
raise NotImplementedError("abstract base class")
def set(self, w_dict, w_key, w_val, env, cont):
raise NotImplementedError("abstract base class")
def rem(self, w_dict, w_key, env, cont):
raise NotImplementedError("abstract base class")
def rem_inplace(self, w_dict, w_key, env, cont):
raise NotImplementedError("abstract base class")
def items(self, w_dict):
raise NotImplementedError("abstract base class")
def get_item(self, w_dict, i):
raise NotImplementedError("abstract base class")
def hash_iterate_next(self, w_dict, i):
index = i.value
if index >= self.length(w_dict) - 1:
return values.w_false
return values.wrap(index + 1)
def hash_iterate_first(self, w_dict):
return 0
def length(self, w_dict):
raise NotImplementedError("abstract base class")
def create_storage(self, keys, vals):
raise NotImplementedError("abstract base class")
@jit.look_inside_iff(lambda keys:
jit.loop_unrolling_heuristic(
keys, len(keys), values.UNROLLING_CUTOFF))
def _find_strategy_class(keys):
if not config.strategies:
return ObjectHashmapStrategy.singleton
if len(keys) == 0:
return EmptyHashmapStrategy.singleton
# An empty vector stays empty forever. Don't implement special EmptyVectorStrategy.
single_class = type(keys[0])
for elem in keys:
if not isinstance(elem, single_class):
return ObjectHashmapStrategy.singleton
if single_class is values.W_Fixnum:
return FixnumHashmapStrategy.singleton
if single_class is values.W_Symbol:
return SymbolHashmapStrategy.singleton
if single_class is values_string.W_String:
return StringHashmapStrategy.singleton
if single_class is values.W_ImmutableBytes:
return ImmutableByteHashmapStrategy.singleton
if single_class is values.W_MutableBytes:
return MutableByteHashmapStrategy.singleton
return ObjectHashmapStrategy.singleton
class UnwrappedHashmapStrategyMixin(object):
# the concrete class needs to implement:
# erase, unerase, is_correct_type, wrap, unwrap
# create_storage needs to be overwritten if an r_dict is needed
@staticmethod
@elidable_iff(
lambda w_dict: jit.isconstant(w_dict) and w_dict.is_immutable)
def get_hstorage(w_dict):
return w_dict.hstorage
def get_storage(self, w_dict):
return self.unerase(self.get_hstorage(w_dict))
def get(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if self.is_correct_type(w_key):
storage = self.get_storage(w_dict)
w_res = storage.get(self.unwrap(w_key), w_missing)
return return_value(w_res, env, cont)
# XXX should not dehomogenize always
self.switch_to_object_strategy(w_dict)
return w_dict.hash_ref(w_key, env, cont)
def set(self, w_dict, w_key, w_val, env, cont):
from pycket.interpreter import return_value
if self.is_correct_type(w_key):
storage = self.get_storage(w_dict)
storage[self.unwrap(w_key)] = w_val
return return_value(values.w_void, env, cont)
self.switch_to_object_strategy(w_dict)
return w_dict.hash_set(w_key, w_val, env, cont)
def _set(self, w_dict, w_key, w_val):
if not self.is_correct_type(w_key):
raise KeyError
storage = self.unerase(w_dict.hstorage)
key = self.unwrap(w_key)
storage[key] = w_val
def rem_inplace(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if not self.is_correct_type(w_key):
raise KeyError
storage = self.unerase(w_dict.hstorage)
key = self.unwrap(w_key)
if key in storage:
del storage[key]
return return_value(values.w_void, env, cont)
def items(self, w_dict):
return [(self.wrap(key), w_val) for key, w_val in self.unerase(w_dict.hstorage).iteritems()]
def get_item(self, w_dict, i):
key, w_val = get_dict_item(self.unerase(w_dict.hstorage), i)
return self.wrap(key), w_val
def length(self, w_dict):
return len(self.unerase(w_dict.hstorage))
def create_storage(self, keys, vals):
d = self._create_empty_dict()
if not keys:
return self.erase(d)
for i, w_key in enumerate(keys):
d[self.unwrap(w_key)] = vals[i]
return self.erase(d)
def _create_empty_dict(self):
return {}
def switch_to_object_strategy(self, w_dict):
d = self.unerase(w_dict.hstorage)
keys = [self.wrap(key) for key in d.keys()]
values = d.values()
strategy = ObjectHashmapStrategy.singleton
storage = strategy.create_storage(keys, values)
w_dict.strategy = strategy
w_dict.hstorage = storage
class EmptyHashmapStrategy(HashmapStrategy):
erase, unerase = rerased.new_static_erasing_pair("object-hashmap-strategy")
def get(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
return return_value(w_missing, env, cont) # contains nothing
def set(self, w_dict, w_key, w_val, env, cont):
self.switch_to_correct_strategy(w_dict, w_key)
return w_dict.hash_set(w_key, w_val, env, cont)
def rem(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
return return_value(w_dict, env, cont) # there's nothing to remove
def _set(self, w_dict, w_key, w_val):
self.switch_to_correct_strategy(w_dict, w_key)
return w_dict._set(w_key, w_val)
def rem_inplace(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
return return_value(values.w_void, env, cont) # there's nothing to remove
def items(self, w_dict):
return []
def get_item(self, w_dict, i):
raise IndexError
def length(self, w_dict):
return 0
def create_storage(self, keys, vals):
assert not keys
assert not vals
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
if type(w_key) is values.W_Fixnum:
strategy = FixnumHashmapStrategy.singleton
elif type(w_key) is values.W_Symbol:
strategy = SymbolHashmapStrategy.singleton
elif isinstance(w_key, values_string.W_String):
strategy = StringHashmapStrategy.singleton
elif isinstance(w_key, values.W_ImmutableBytes):
strategy = ImmutableByteHashmapStrategy.singleton
elif isinstance(w_key, values.W_MutableBytes):
strategy = MutableByteHashmapStrategy.singleton
else:
strategy = ObjectHashmapStrategy.singleton
storage = strategy.create_storage([], [])
w_dict.strategy = strategy
w_dict.hstorage = storage
UNHASHABLE_TAG = 0b0001
def tagged_hash(w_object):
try:
return w_object.hash_equal() << 1
except UnhashableType:
return UNHASHABLE_TAG
class ObjectHashmapStrategy(HashmapStrategy):
erase, unerase = rerased.new_static_erasing_pair("object-hashmap-strategy")
import_from_mixin(UnwrappedHashmapStrategyMixin)
def get_bucket(self, w_dict, w_key, nonull=False):
hash = tagged_hash(w_key)
storage = self.get_storage(w_dict)
bucket = storage.get(hash, None)
if nonull and bucket is None:
storage[hash] = bucket = []
return bucket
def get(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
bucket = self.get_bucket(w_dict, w_key)
if not bucket:
return return_value(w_missing, env, cont)
return equal_hash_ref_loop(bucket, 0, w_key, env, cont)
def set(self, w_dict, w_key, w_val, env, cont):
bucket = self.get_bucket(w_dict, w_key, nonull=True)
return equal_hash_set_loop(bucket, 0, w_key, w_val, env, cont)
def rem_inplace(self, w_dict, w_key, env, cont):
raise NotImplementedError("hash-remove! not supported for ObjectHashmapStrategy")
def rem(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if not w_dict.immutable():
raise SchemeException("Expected an immutable hash table")
new_keys = []
new_vals = []
for (k, v) in w_dict.hash_items():
if k is w_key:
continue
new_keys.append(k)
new_vals.append(v)
assert isinstance(w_dict, W_EqualHashTable)
new_table = W_EqualHashTable(new_keys, new_vals, True)
return return_value(new_table, env, cont)
def _set(self, w_dict, w_key, w_val):
raise NotImplementedError("Unsafe set not supported for ObjectHashmapStrategy")
def items(self, w_dict):
items = []
storage = self.unerase(w_dict.hstorage)
for bucket in storage.itervalues():
for item in bucket:
items.append(item)
return items
if sys.maxint == 2147483647:
def get_item(self, w_dict, i):
storage = self.unerase(w_dict.hstorage)
for bucket in storage.itervalues():
size = len(bucket)
if size > i:
return bucket[i]
i -= size
raise IndexError
else:
@staticmethod
def _valid_bucket(v):
return bool(v[1])
def get_item(self, w_dict, i):
from pycket.hash.persistent_hash_map import MASK_32
storage = self.unerase(w_dict.hstorage)
assert i >= 0
i = r_uint(i)
index = i & MASK_32
subindex = (i >> 32) & MASK_32
bucket = get_dict_item(storage, index)[1]
if bucket is None:
raise IndexError
return bucket[subindex]
def hash_iterate_next(self, w_dict, pos):
from pycket.hash.persistent_hash_map import MASK_32
storage = self.unerase(w_dict.hstorage)
i = r_uint(pos.value)
assert i >= 0
index = r_uint(i & MASK_32)
subindex = r_uint((i >> 32) & MASK_32)
bucket = get_dict_item(storage, index)[1]
subindex += 1
if subindex == r_uint(len(bucket)):
subindex = r_uint(0)
try:
next = next_valid_index(storage, intmask(index),
valid=self._valid_bucket)
except IndexError:
return values.w_false
index = r_uint(next)
next = intmask((subindex << r_uint(32)) | index)
return values.wrap(next)
def hash_iterate_first(self, w_dict):
return next_valid_index(w_dict, 0, valid=self._valid_bucket)
def length(self, w_dict):
storage = self.unerase(w_dict.hstorage)
size = 0
for bucket in storage.itervalues():
size += len(bucket)
return size
def create_storage(self, keys, vals):
storage = {}
for i, key in enumerate(keys):
val = vals[i]
hash = tagged_hash(key)
bucket = storage.get(hash, None)
if bucket is None:
storage[hash] = bucket = []
bucket.append((key, val))
return self.erase(storage)
class FixnumHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("fixnum-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_Fixnum)
def wrap(self, val):
assert isinstance(val, int)
return values.W_Fixnum(val)
def unwrap(self, w_val):
assert isinstance(w_val, values.W_Fixnum)
return w_val.value
class SymbolHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("symbol-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_Symbol)
def wrap(self, val):
assert isinstance(val, values.W_Symbol)
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_Symbol)
return w_val
def rem(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if not w_dict.immutable():
raise Exception("Expected an immutable hash table")
new_keys = []
new_vals = []
for (k, v) in w_dict.hash_items():
if k is w_key:
continue
new_keys.append(k)
new_vals.append(v)
assert isinstance(w_dict, W_EqualHashTable)
new_table = W_EqualHashTable(new_keys, new_vals, True)
return return_value(new_table, env, cont)
def hash_strings(w_b):
assert isinstance(w_b, values_string.W_String)
return w_b.hash_equal()
def cmp_strings(w_a, w_b):
assert isinstance(w_a, values_string.W_String)
assert isinstance(w_b, values_string.W_String)
return w_a.equal(w_b)
class StringHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("string-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values_string.W_String)
def wrap(self, w_val):
return w_val
def unwrap(self, w_val):
return w_val
def _create_empty_dict(self):
return r_dict(cmp_strings, hash_strings)
def hash_mutable_bytes(w_b):
assert isinstance(w_b, values.W_MutableBytes)
return w_b.hash_equal()
def hash_immutable_bytes(w_b):
assert isinstance(w_b, values.W_ImmutableBytes)
return w_b.hash_equal()
def cmp_mutable_bytes(w_a, w_b):
assert isinstance(w_a, values.W_MutableBytes)
assert isinstance(w_b, values.W_MutableBytes)
return w_a.value == w_b.value
def cmp_immutable_bytes(w_a, w_b):
assert isinstance(w_a, values.W_ImmutableBytes)
assert isinstance(w_b, values.W_ImmutableBytes)
return w_a.value == w_b.value
class MutableByteHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("byte-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_MutableBytes)
def wrap(self, val):
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_MutableBytes)
return w_val
def _create_empty_dict(self):
return r_dict(cmp_mutable_bytes, hash_mutable_bytes)
class ImmutableByteHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("byte-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_ImmutableBytes)
def wrap(self, val):
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_ImmutableBytes)
return w_val
def _create_empty_dict(self):
return r_dict(cmp_immutable_bytes, hash_immutable_bytes)
class W_EqualHashTable(W_HashTable):
_attrs_ = ['strategy', 'hstorage', 'is_immutable']
_immutable_fields_ = ['is_immutable']
def __init__(self, keys, vals, immutable=False):
self.is_immutable = immutable
self.strategy = _find_strategy_class(keys)
self.hstorage = self.strategy.create_storage(keys, vals)
def immutable(self):
return self.is_immutable
def hash_items(self):
return self.strategy.items(self)
def _set(self, key, val):
return self.strategy._set(self, key, val)
def hash_set(self, key, val, env, cont):
return self.strategy.set(self, key, val, env, cont)
def hash_equal(self, info=None):
return self.length()
def hash_ref(self, key, env, cont):
return self.strategy.get(self, key, env, cont)
def hash_remove(self, key, env, cont):
return self.strategy.rem(self, key, env, cont)
def hash_remove_inplace(self, key, env, cont):
return self.strategy.rem_inplace(self, key, env, cont)
def get_item(self, i):
return self.strategy.get_item(self, i)
def hash_iterate_next(self, pos):
return self.strategy.hash_iterate_next(self, pos)
def hash_iterate_first(self):
return self.strategy.hash_iterate_first(self)
def length(self):
return self.strategy.length(self)
def make_empty(self):
return W_EqualHashTable([], [], immutable=self.is_immutable)
def tostring(self):
lst = [values.W_Cons.make(k, v).tostring() for k, v in self.hash_items()]
return "#hash(%s)" % " ".join(lst) | pycket/hash/equal.py | from pycket import config
from pycket import values, values_string
from pycket.base import SingletonMeta, UnhashableType
from pycket.hash.base import W_HashTable, get_dict_item, next_valid_index, w_missing
from pycket.error import SchemeException
from pycket.cont import continuation, loop_label
from rpython.rlib import rerased, jit
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.rlib.objectmodel import compute_hash, import_from_mixin, r_dict, specialize
import sys
def elidable_iff(pred):
def wrapper(func):
@jit.elidable
def elidable(*args):
return func(*args)
def inner(*args):
if jit.we_are_jitted() and pred(*args):
return elidable(*args)
return func(*args)
return inner
return wrapper
@loop_label
def equal_hash_ref_loop(data, idx, key, env, cont):
from pycket.interpreter import return_value
from pycket.prims.equal import equal_func_unroll_n, EqualInfo
if idx >= len(data):
return return_value(w_missing, env, cont)
k, v = data[idx]
info = EqualInfo.BASIC_SINGLETON
cont = catch_ref_is_equal_cont(data, idx, key, v, env, cont)
return equal_func_unroll_n(k, key, info, env, cont, 5)
@continuation
def catch_ref_is_equal_cont(data, idx, key, v, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
if val is not values.w_false:
return return_value(v, env, cont)
return equal_hash_ref_loop(data, idx + 1, key, env, cont)
def equal_hash_set_loop(data, idx, key, val, env, cont):
from pycket.interpreter import check_one_val, return_value
from pycket.prims.equal import equal_func, EqualInfo
if idx >= len(data):
data.append((key, val))
return return_value(values.w_void, env, cont)
k, _ = data[idx]
info = EqualInfo.BASIC_SINGLETON
return equal_func(k, key, info, env,
catch_set_is_equal_cont(data, idx, key, val, env, cont))
@continuation
def catch_set_is_equal_cont(data, idx, key, val, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
cmp = check_one_val(_vals)
if cmp is not values.w_false:
data[idx] = (key, val)
return return_value(values.w_void, env, cont)
return equal_hash_set_loop(data, idx + 1, key, val, env, cont)
class HashmapStrategy(object):
__metaclass__ = SingletonMeta
def get(self, w_dict, w_key, env, cont):
raise NotImplementedError("abstract base class")
def set(self, w_dict, w_key, w_val, env, cont):
raise NotImplementedError("abstract base class")
def rem(self, w_dict, w_key, env, cont):
raise NotImplementedError("abstract base class")
def rem_inplace(self, w_dict, w_key, env, cont):
raise NotImplementedError("abstract base class")
def items(self, w_dict):
raise NotImplementedError("abstract base class")
def get_item(self, w_dict, i):
raise NotImplementedError("abstract base class")
def hash_iterate_next(self, w_dict, i):
index = i.value
if index >= self.length(w_dict) - 1:
return values.w_false
return values.wrap(index + 1)
def hash_iterate_first(self, w_dict):
return 0
def length(self, w_dict):
raise NotImplementedError("abstract base class")
def create_storage(self, keys, vals):
raise NotImplementedError("abstract base class")
@jit.look_inside_iff(lambda keys:
jit.loop_unrolling_heuristic(
keys, len(keys), values.UNROLLING_CUTOFF))
def _find_strategy_class(keys):
if not config.strategies:
return ObjectHashmapStrategy.singleton
if len(keys) == 0:
return EmptyHashmapStrategy.singleton
# An empty vector stays empty forever. Don't implement special EmptyVectorStrategy.
single_class = type(keys[0])
for elem in keys:
if not isinstance(elem, single_class):
return ObjectHashmapStrategy.singleton
if single_class is values.W_Fixnum:
return FixnumHashmapStrategy.singleton
if single_class is values.W_Symbol:
return SymbolHashmapStrategy.singleton
if single_class is values_string.W_String:
return StringHashmapStrategy.singleton
if single_class is values.W_ImmutableBytes:
return ImmutableByteHashmapStrategy.singleton
if single_class is values.W_MutableBytes:
return MutableByteHashmapStrategy.singleton
return ObjectHashmapStrategy.singleton
class UnwrappedHashmapStrategyMixin(object):
# the concrete class needs to implement:
# erase, unerase, is_correct_type, wrap, unwrap
# create_storage needs to be overwritten if an r_dict is needed
@staticmethod
@elidable_iff(
lambda w_dict: jit.isconstant(w_dict) and w_dict.is_immutable)
def get_hstorage(w_dict):
return w_dict.hstorage
def get_storage(self, w_dict):
return self.unerase(self.get_hstorage(w_dict))
def get(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if self.is_correct_type(w_key):
storage = self.get_storage(w_dict)
w_res = storage.get(self.unwrap(w_key), w_missing)
return return_value(w_res, env, cont)
# XXX should not dehomogenize always
self.switch_to_object_strategy(w_dict)
return w_dict.hash_ref(w_key, env, cont)
def set(self, w_dict, w_key, w_val, env, cont):
from pycket.interpreter import return_value
if self.is_correct_type(w_key):
storage = self.get_storage(w_dict)
storage[self.unwrap(w_key)] = w_val
return return_value(values.w_void, env, cont)
self.switch_to_object_strategy(w_dict)
return w_dict.hash_set(w_key, w_val, env, cont)
def _set(self, w_dict, w_key, w_val):
if not self.is_correct_type(w_key):
raise KeyError
storage = self.unerase(w_dict.hstorage)
key = self.unwrap(w_key)
storage[key] = w_val
def rem_inplace(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if not self.is_correct_type(w_key):
raise KeyError
storage = self.unerase(w_dict.hstorage)
key = self.unwrap(w_key)
if key in storage:
del storage[key]
return return_value(values.w_void, env, cont)
def items(self, w_dict):
return [(self.wrap(key), w_val) for key, w_val in self.unerase(w_dict.hstorage).iteritems()]
def get_item(self, w_dict, i):
key, w_val = get_dict_item(self.unerase(w_dict.hstorage), i)
return self.wrap(key), w_val
def length(self, w_dict):
return len(self.unerase(w_dict.hstorage))
def create_storage(self, keys, vals):
d = self._create_empty_dict()
if not keys:
return self.erase(d)
for i, w_key in enumerate(keys):
d[self.unwrap(w_key)] = vals[i]
return self.erase(d)
def _create_empty_dict(self):
return {}
def switch_to_object_strategy(self, w_dict):
d = self.unerase(w_dict.hstorage)
keys = [self.wrap(key) for key in d.keys()]
values = d.values()
strategy = ObjectHashmapStrategy.singleton
storage = strategy.create_storage(keys, values)
w_dict.strategy = strategy
w_dict.hstorage = storage
class EmptyHashmapStrategy(HashmapStrategy):
erase, unerase = rerased.new_static_erasing_pair("object-hashmap-strategy")
def get(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
return return_value(w_missing, env, cont) # contains nothing
def set(self, w_dict, w_key, w_val, env, cont):
self.switch_to_correct_strategy(w_dict, w_key)
return w_dict.hash_set(w_key, w_val, env, cont)
def rem(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
return return_value(w_dict, env, cont) # there's nothing to remove
def _set(self, w_dict, w_key, w_val):
self.switch_to_correct_strategy(w_dict, w_key)
return w_dict._set(w_key, w_val)
def rem_inplace(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
return return_value(values.w_void, env, cont) # there's nothing to remove
def items(self, w_dict):
return []
def get_item(self, w_dict, i):
raise IndexError
def length(self, w_dict):
return 0
def create_storage(self, keys, vals):
assert not keys
assert not vals
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
if type(w_key) is values.W_Fixnum:
strategy = FixnumHashmapStrategy.singleton
elif type(w_key) is values.W_Symbol:
strategy = SymbolHashmapStrategy.singleton
elif isinstance(w_key, values_string.W_String):
strategy = StringHashmapStrategy.singleton
elif isinstance(w_key, values.W_ImmutableBytes):
strategy = ImmutableByteHashmapStrategy.singleton
elif isinstance(w_key, values.W_MutableBytes):
strategy = MutableByteHashmapStrategy.singleton
else:
strategy = ObjectHashmapStrategy.singleton
storage = strategy.create_storage([], [])
w_dict.strategy = strategy
w_dict.hstorage = storage
UNHASHABLE_TAG = 0b0001
def tagged_hash(w_object):
try:
return w_object.hash_equal() << 1
except UnhashableType:
return UNHASHABLE_TAG
class ObjectHashmapStrategy(HashmapStrategy):
erase, unerase = rerased.new_static_erasing_pair("object-hashmap-strategy")
import_from_mixin(UnwrappedHashmapStrategyMixin)
def get_bucket(self, w_dict, w_key, nonull=False):
hash = tagged_hash(w_key)
storage = self.get_storage(w_dict)
bucket = storage.get(hash, None)
if nonull and bucket is None:
storage[hash] = bucket = []
return bucket
def get(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
bucket = self.get_bucket(w_dict, w_key)
if not bucket:
return return_value(w_missing, env, cont)
return equal_hash_ref_loop(bucket, 0, w_key, env, cont)
def set(self, w_dict, w_key, w_val, env, cont):
bucket = self.get_bucket(w_dict, w_key, nonull=True)
return equal_hash_set_loop(bucket, 0, w_key, w_val, env, cont)
def rem_inplace(self, w_dict, w_key, env, cont):
raise NotImplementedError("hash-remove! not supported for ObjectHashmapStrategy")
def rem(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if not w_dict.immutable():
raise SchemeException("Expected an immutable hash table")
new_keys = []
new_vals = []
for (k, v) in w_dict.hash_items():
if k is w_key:
continue
new_keys.append(k)
new_vals.append(v)
assert isinstance(w_dict, W_EqualHashTable)
new_table = W_EqualHashTable(new_keys, new_vals, True)
return return_value(new_table, env, cont)
def _set(self, w_dict, w_key, w_val):
raise NotImplementedError("Unsafe set not supported for ObjectHashmapStrategy")
def items(self, w_dict):
items = []
storage = self.unerase(w_dict.hstorage)
for bucket in storage.itervalues():
for item in bucket:
items.append(item)
return items
if sys.maxint == 2147483647:
def get_item(self, w_dict, i):
storage = self.unerase(w_dict.hstorage)
for bucket in storage.itervalues():
size = len(bucket)
if size > i:
return bucket[i]
i -= size
raise IndexError
else:
@staticmethod
def _valid_bucket(v):
return bool(v[1])
def get_item(self, w_dict, i):
from pycket.hash.persistent_hash_map import MASK_32
storage = self.unerase(w_dict.hstorage)
assert i >= 0
i = r_uint(i)
index = i & MASK_32
subindex = (i >> 32) & MASK_32
bucket = get_dict_item(storage, index)[1]
if bucket is None:
raise IndexError
return bucket[subindex]
def hash_iterate_next(self, w_dict, pos):
from pycket.hash.persistent_hash_map import MASK_32
storage = self.unerase(w_dict.hstorage)
i = r_uint(pos.value)
assert i >= 0
index = r_uint(i & MASK_32)
subindex = r_uint((i >> 32) & MASK_32)
bucket = get_dict_item(storage, index)[1]
subindex += 1
if subindex == r_uint(len(bucket)):
subindex = r_uint(0)
try:
next = next_valid_index(storage, intmask(index),
valid=self._valid_bucket)
except IndexError:
return values.w_false
index = r_uint(next)
next = intmask((subindex << r_uint(32)) | index)
return values.wrap(next)
def hash_iterate_first(self, w_dict):
return next_valid_index(w_dict, 0, valid=self._valid_bucket)
def length(self, w_dict):
storage = self.unerase(w_dict.hstorage)
size = 0
for bucket in storage.itervalues():
size += len(bucket)
return size
def create_storage(self, keys, vals):
storage = {}
for i, key in enumerate(keys):
val = vals[i]
hash = tagged_hash(key)
bucket = storage.get(hash, None)
if bucket is None:
storage[hash] = bucket = []
bucket.append((key, val))
return self.erase(storage)
class FixnumHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("fixnum-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_Fixnum)
def wrap(self, val):
assert isinstance(val, int)
return values.W_Fixnum(val)
def unwrap(self, w_val):
assert isinstance(w_val, values.W_Fixnum)
return w_val.value
class SymbolHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("symbol-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_Symbol)
def wrap(self, val):
assert isinstance(val, values.W_Symbol)
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_Symbol)
return w_val
def rem(self, w_dict, w_key, env, cont):
from pycket.interpreter import return_value
if not w_dict.immutable():
raise Exception("Expected an immutable hash table")
new_keys = []
new_vals = []
for (k, v) in w_dict.hash_items():
if k is w_key:
continue
new_keys.append(k)
new_vals.append(v)
assert isinstance(w_dict, W_EqualHashTable)
new_table = W_EqualHashTable(new_keys, new_vals, True)
return return_value(new_table, env, cont)
def hash_strings(w_b):
assert isinstance(w_b, values_string.W_String)
return w_b.hash_equal()
def cmp_strings(w_a, w_b):
assert isinstance(w_a, values_string.W_String)
assert isinstance(w_b, values_string.W_String)
return w_a.equal(w_b)
class StringHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("string-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values_string.W_String)
def wrap(self, w_val):
return w_val
def unwrap(self, w_val):
return w_val
def _create_empty_dict(self):
return r_dict(cmp_strings, hash_strings)
def hash_mutable_bytes(w_b):
assert isinstance(w_b, values.W_MutableBytes)
return w_b.hash_equal()
def hash_immutable_bytes(w_b):
assert isinstance(w_b, values.W_ImmutableBytes)
return w_b.hash_equal()
def cmp_mutable_bytes(w_a, w_b):
assert isinstance(w_a, values.W_MutableBytes)
assert isinstance(w_b, values.W_MutableBytes)
return w_a.value == w_b.value
def cmp_immutable_bytes(w_a, w_b):
assert isinstance(w_a, values.W_ImmutableBytes)
assert isinstance(w_b, values.W_ImmutableBytes)
return w_a.value == w_b.value
class MutableByteHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("byte-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_MutableBytes)
def wrap(self, val):
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_MutableBytes)
return w_val
def _create_empty_dict(self):
return r_dict(cmp_mutable_bytes, hash_mutable_bytes)
class ImmutableByteHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
erase, unerase = rerased.new_static_erasing_pair("byte-hashmap-strategy")
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_ImmutableBytes)
def wrap(self, val):
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_ImmutableBytes)
return w_val
def _create_empty_dict(self):
return r_dict(cmp_immutable_bytes, hash_immutable_bytes)
class W_EqualHashTable(W_HashTable):
_attrs_ = ['strategy', 'hstorage', 'is_immutable']
_immutable_fields_ = ['is_immutable']
def __init__(self, keys, vals, immutable=False):
self.is_immutable = immutable
self.strategy = _find_strategy_class(keys)
self.hstorage = self.strategy.create_storage(keys, vals)
def immutable(self):
return self.is_immutable
def hash_items(self):
return self.strategy.items(self)
def _set(self, key, val):
return self.strategy._set(self, key, val)
def hash_set(self, key, val, env, cont):
return self.strategy.set(self, key, val, env, cont)
def hash_equal(self, info=None):
return self.length()
def hash_ref(self, key, env, cont):
return self.strategy.get(self, key, env, cont)
def hash_remove(self, key, env, cont):
return self.strategy.rem(self, key, env, cont)
def hash_remove_inplace(self, key, env, cont):
return self.strategy.rem_inplace(self, key, env, cont)
def get_item(self, i):
return self.strategy.get_item(self, i)
def hash_iterate_next(self, pos):
return self.strategy.hash_iterate_next(self, pos)
def hash_iterate_first(self):
return self.strategy.hash_iterate_first(self)
def length(self):
return self.strategy.length(self)
def make_empty(self):
return W_EqualHashTable([], [], immutable=self.is_immutable)
def tostring(self):
lst = [values.W_Cons.make(k, v).tostring() for k, v in self.hash_items()]
return "#hash(%s)" % " ".join(lst) | 0.46393 | 0.164215 |
import unittest
import ActionUserCounter as action
import copy
import os
import json
class TestSomething(unittest.TestCase) :
def test_splitActionOwnerName(self) :
cases = [
"user/action",
"user/action-name",
"user/longer-action-name",
"action",
"action-name",
"longer-action-name"
]
expected = [
("user", "action"),
("user", "action-name"),
("user", "longer-action-name"),
("", "action"),
("", "action-name"),
("", "longer-action-name")
]
for i, c in enumerate(cases) :
self.assertEqual(expected[i], action.splitActionOwnerName(c))
def test_formatCount(self) :
cases = [
(0, "0"),
(1, "1"),
(9, "9"),
(10, "10"),
(99, "99"),
(100, "100"),
(999, "999"),
(1000, "1000"),
(9999, "9999"),
(10000, "10.0K"),
(10099, "10.0K"),
(10100, "10.1K"),
(99900, "99.9K"),
(99999, "99.9K"),
(100000, "100.0K"),
(100099, "100.0K"),
(100100, "100.1K"),
(999900, "999.9K"),
(999999, "999.9K"),
(1000000, "1.00M"),
(1009999, "1.00M"),
(1010000, "1.01M"),
(1019999, "1.01M"),
(1020000, "1.02M"),
(1099999, "1.09M"),
(1100000, "1.10M"),
(1109999, "1.10M"),
(9999999, "9.99M"),
(10000000, "10.00M")
]
for caseInput, expected in cases :
self.assertEqual(expected, action.formatCount(caseInput))
def test_toDictWithShieldsKeys(self) :
cases = [
("100", "green", None, None),
("100", "green", "githubactions", None),
("100", "green", "github", None),
("100", "green", None, "flat"),
("100", "green", "githubactions", "flat"),
("100", "green", "github", "flat")
]
expected = [
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "githubactions", "logoColor" : "#fff"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "style" : "flat"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "githubactions", "style" : "flat", "logoColor" : "#fff"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github", "style" : "flat"}
]
for i, (count, color, logo, style) in enumerate(cases) :
self.assertEqual(expected[i], action.toDictWithShieldsKeys(count, color, logo, style))
def test_toJsonEndpoints(self) :
case = {
"action-1" : "100",
"action-2" : "120",
"action-3" : "303",
"action-4" : "104",
"action-5" : "155",
"action-6" : "600"
}
expected1 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green"}
}
expected2 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green", "namedLogo" : "github", "style" : "flat"}
}
self.assertEqual(expected1, action.toJsonEndpoints(case, "green", None, None))
self.assertEqual(expected2, action.toJsonEndpoints(case, "green", "github", "flat"))
def test_writeToFiles(self) :
case1 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green"}
}
case2 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green", "namedLogo" : "github", "style" : "flat"}
}
os.chdir("tests")
action.writeToFiles(copy.deepcopy(case1), False)
for actionName, expected in case1.items() :
filename = actionName + ".json"
self.assertTrue(os.path.exists(filename))
with open(filename, "r") as f :
self.assertEqual(expected, json.load(f))
os.remove(filename)
action.writeToFiles(copy.deepcopy(case2), False)
for actionName, expected in case2.items() :
filename = actionName + ".json"
self.assertTrue(os.path.exists(filename))
with open(filename, "r") as f :
self.assertEqual(expected, json.load(f))
os.remove(filename)
os.chdir("..") | tests/tests.py |
import unittest
import ActionUserCounter as action
import copy
import os
import json
class TestSomething(unittest.TestCase) :
def test_splitActionOwnerName(self) :
cases = [
"user/action",
"user/action-name",
"user/longer-action-name",
"action",
"action-name",
"longer-action-name"
]
expected = [
("user", "action"),
("user", "action-name"),
("user", "longer-action-name"),
("", "action"),
("", "action-name"),
("", "longer-action-name")
]
for i, c in enumerate(cases) :
self.assertEqual(expected[i], action.splitActionOwnerName(c))
def test_formatCount(self) :
cases = [
(0, "0"),
(1, "1"),
(9, "9"),
(10, "10"),
(99, "99"),
(100, "100"),
(999, "999"),
(1000, "1000"),
(9999, "9999"),
(10000, "10.0K"),
(10099, "10.0K"),
(10100, "10.1K"),
(99900, "99.9K"),
(99999, "99.9K"),
(100000, "100.0K"),
(100099, "100.0K"),
(100100, "100.1K"),
(999900, "999.9K"),
(999999, "999.9K"),
(1000000, "1.00M"),
(1009999, "1.00M"),
(1010000, "1.01M"),
(1019999, "1.01M"),
(1020000, "1.02M"),
(1099999, "1.09M"),
(1100000, "1.10M"),
(1109999, "1.10M"),
(9999999, "9.99M"),
(10000000, "10.00M")
]
for caseInput, expected in cases :
self.assertEqual(expected, action.formatCount(caseInput))
def test_toDictWithShieldsKeys(self) :
cases = [
("100", "green", None, None),
("100", "green", "githubactions", None),
("100", "green", "github", None),
("100", "green", None, "flat"),
("100", "green", "githubactions", "flat"),
("100", "green", "github", "flat")
]
expected = [
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "githubactions", "logoColor" : "#fff"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "style" : "flat"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "githubactions", "style" : "flat", "logoColor" : "#fff"},
{"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github", "style" : "flat"}
]
for i, (count, color, logo, style) in enumerate(cases) :
self.assertEqual(expected[i], action.toDictWithShieldsKeys(count, color, logo, style))
def test_toJsonEndpoints(self) :
case = {
"action-1" : "100",
"action-2" : "120",
"action-3" : "303",
"action-4" : "104",
"action-5" : "155",
"action-6" : "600"
}
expected1 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green"}
}
expected2 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green", "namedLogo" : "github", "style" : "flat"}
}
self.assertEqual(expected1, action.toJsonEndpoints(case, "green", None, None))
self.assertEqual(expected2, action.toJsonEndpoints(case, "green", "github", "flat"))
def test_writeToFiles(self) :
case1 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green"}
}
case2 = {
"action-1" : {"schemaVersion" : 1, "label" : "used by", "message" : "100", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-2" : {"schemaVersion" : 1, "label" : "used by", "message" : "120", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-3" : {"schemaVersion" : 1, "label" : "used by", "message" : "303", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-4" : {"schemaVersion" : 1, "label" : "used by", "message" : "104", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-5" : {"schemaVersion" : 1, "label" : "used by", "message" : "155", "color" : "green", "namedLogo" : "github", "style" : "flat"},
"action-6" : {"schemaVersion" : 1, "label" : "used by", "message" : "600", "color" : "green", "namedLogo" : "github", "style" : "flat"}
}
os.chdir("tests")
action.writeToFiles(copy.deepcopy(case1), False)
for actionName, expected in case1.items() :
filename = actionName + ".json"
self.assertTrue(os.path.exists(filename))
with open(filename, "r") as f :
self.assertEqual(expected, json.load(f))
os.remove(filename)
action.writeToFiles(copy.deepcopy(case2), False)
for actionName, expected in case2.items() :
filename = actionName + ".json"
self.assertTrue(os.path.exists(filename))
with open(filename, "r") as f :
self.assertEqual(expected, json.load(f))
os.remove(filename)
os.chdir("..") | 0.498535 | 0.394201 |
import glob
import pathlib
import re
import shutil
from collections import Counter
import ase
import ase.symbols
import numpy as np
from bandapi.dispatcher.dpdispatcher import Task
from bandapi.flow.abacus import default_settings
from bandapi.flow.state import FlowStateControl
from bandapi.flow.task_content import NamedAtomsContentDict
from bandapi.io.abacus.out import read_stru
from bandapi.io.abacus.potential import AbacusPotential
"""
Abacus has calculation state as following:
- scf(default)
- relax: ionic relaxations
- cell-relax: cell relaxation
- nscf: charge density file is needed.
- istate: Not Supported Now.
- ienvelope: Not Supported Now.
- md: Not Supported Now.
"""
from bandapi.flow.abacus import AbacusState
class AbacusScfState(AbacusState):
_state = "scf"
def bakeup(self, task_content: NamedAtomsContentDict):
"""
:param NamedAtomsContentDict task_content:
:return:
"""
for subdir, atoms in task_content.items():
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(subdir, None):
atoms: ase.Atoms
self._write_stur(subname=subdir, atoms=atoms, potential_name=self.get_state_settings("potential_name"))
self._write_kpt(subname=subdir, atoms=atoms)
self._write_input(subname=subdir, atoms=atoms)
def prepare(self, task_content: NamedAtomsContentDict, task_settings):
task_list = []
for idx, item in enumerate(task_content.keys()):
for idx, item in enumerate(task_content):
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(item, None):
task_list.append(
Task(command=task_settings["remote_command"],
task_work_path=f"{self._state}/{item}/",
forward_files=[*self.bake_upload_files(task_content[item])],
backward_files=["OUT.ABACUS"]
))
else:
pass
return task_list
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state == "nscf-band":
raise ValueError("Please use `AbacusScfStateWithCharge` for band-structure scf.")
else:
pass
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "scf",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
}
def get_kpt_args(self, atoms):
"""
Implemented kpt_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
if not self.get_state_settings("kpointfix", default_settings["kpointfix"]):
scope = self.get_state_settings("kpointscope", default_settings["kpointscope"])
odd_flag = scope % 2
abc = atoms.cell.lengths()
result = np.around(1 / abc / min(1 / abc) * scope)
if result[0] * result[1] * result[2] > 1000:
scope -= 2
odd_flag = scope % 2
abc = atoms.cell.lengths()
result = np.around(1 / abc / min(1 / abc) * scope)
mask = result % 2 != odd_flag
shift = np.zeros_like(result)
content = np.concatenate([result + mask, shift], axis=-1)
else:
scope = self.get_state_settings("kpointscope", default_settings["kpointscope"])
content = np.array([scope, scope, scope, 0, 0, 0])
return {
"number_of_kpt": 0,
"mode": "Gamma",
"content": content
}
def bake_upload_files(self, atoms):
"""
Prase which files of atoms should be upload. Such as INPUT, STRU, KPT,...
:param ase.Atoms atoms:
:return:
"""
pseudo_file_list = []
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
for num in atom_type_list:
potfile: pathlib.Path = AbacusPotential(pot_name=self.get_state_settings("potential_name"))[num]
pseudo_file_list.append(potfile.name)
return ["INPUT", "STRU", "KPT", *pseudo_file_list]
class AbacusRelaxState(AbacusScfState, AbacusState):
_state = "relax"
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "relax",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"nstep": self.get_state_settings("nstep", default_settings["nstep"]),
}
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state is not None:
self._submit_loop_condition = 1
for subdir, _ in self.task_content.items():
self.task_content[subdir] = read_stru(self.flow_work_root / self._state / subdir / "OUT.ABACUS" / "STRU_ION_D")
return True
else:
raise NotImplementedError
class AbacusCellRelaxState(AbacusScfState, AbacusState):
_state = "cell-relax"
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "cell-relax",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"nstep": self.get_state_settings("nstep", default_settings["nstep"]),
}
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state == "scf" or "relax" or "scf-charge":
self._submit_loop_condition = 1
for subdir, _ in self.task_content.items():
self.task_content[subdir] = read_stru(self.flow_work_root / self._state / subdir / "OUT.ABACUS" / "STRU_ION_D")
return True
else:
raise NotImplementedError
class AbacusScfStateWithCharge(AbacusScfState, AbacusState):
_state = "scf-charge"
def flow_begin_test(self):
check_status = {}
for subdir, atoms in self.task_content.items():
atoms: ase.Atoms
CHGfile = glob.glob((self.flow_work_root / "scf-charge" / subdir / "OUT.ABACUS" / "SPIN*_CHG").as_posix())
for item in CHGfile:
if item:
check_status[subdir] = True
self.check_exist_status = check_status
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "scf",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"out_charge": 1
}
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state == "nscf-band":
self._submit_loop_condition = 1
for subdir, _ in self.task_content.items():
self.task_content[subdir] = read_stru(self.flow_work_root / self._state / subdir / "STRU")
return True
else:
raise NotImplementedError
class AbacusBandState(AbacusState):
_state = "nscf-band"
def flow_begin_test(self):
check_status = {}
for subdir, atoms in self.task_content.items():
atoms: ase.Atoms
CHGfile = glob.glob((self.flow_work_root / self._state / subdir / "OUT.ABACUS" / "running_nscf*").as_posix())
for item in CHGfile:
if item:
check_status[subdir] = True
self.check_exist_status = check_status
for subdir, atoms in self.task_content.items():
if not self.check_exist_status.get(subdir,None):
atoms: ase.Atoms
CHGfile = glob.glob((self.flow_work_root / "scf-charge" / subdir / "OUT.ABACUS" / "SPIN*_CHG").as_posix())
(self.flow_work_root / self._state / subdir / "OUT.ABACUS").mkdir(parents=True, exist_ok=True)
for item in CHGfile:
shutil.copy(item, self.flow_work_root / self._state / subdir / "OUT.ABACUS/")
def bakeup(self, task_content: NamedAtomsContentDict):
"""
:param NamedAtomsContentDict task_content:
:return:
"""
for subdir, atoms in task_content.items():
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(subdir, None):
for subdir, atoms in task_content.items():
self._write_stur(subname=subdir, atoms=atoms, potential_name=self.get_state_settings("potential_name"))
self._write_kpt(subname=subdir, atoms=atoms)
self._write_input(subname=subdir, atoms=atoms)
def prepare(self, task_content: NamedAtomsContentDict, task_settings):
task_list = []
for idx, item in enumerate(task_content.keys()):
for idx, item in enumerate(task_content):
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(item, None):
task_list.append(
Task(command=task_settings["remote_command"],
task_work_path=f"{self._state}/{item}/",
forward_files=[*self.bake_upload_files(task_content[item])],
backward_files=["OUT.ABACUS"]
)
)
return task_list
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
return False
elif next_state == "band-data":
return True
else:
return NotImplementedError
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "nscf",
"nbands": self.get_state_settings("nbands", default_settings["nbands"]),
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"out_band": 1,
"start_charge": "file"
}
def get_kpt_args(self, atoms):
"""
Implemented kpt_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
scope = self.get_state_settings("kpathscope", default_settings["kpathscope"])
sp = atoms.cell.bandpath().special_points
path = atoms.cell.bandpath().path.split(",")
path_lines = []
num_lines = []
for item in path:
for point in re.findall("\w\d*", item):
path_lines.append(sp[point])
num_lines.append(scope)
num_lines[-1] = 1
path_lines = np.array(path_lines)
num_lines = np.array(num_lines)
kpathlines = np.concatenate([path_lines, num_lines[:, None]], axis=-1)
return {
"number_of_kpt": kpathlines.shape[0],
"mode": "Line",
"content": kpathlines
}
def bake_upload_files(self, atoms):
"""
Prase which files of atoms should be upload. Such as INPUT, STRU, KPT,...
:param ase.Atoms atoms:
:return:
"""
pseudo_file_list = []
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
for num in atom_type_list:
potfile: pathlib.Path = AbacusPotential(pot_name=self.get_state_settings("potential_name"))[num]
pseudo_file_list.append(potfile.name)
return ["INPUT", "STRU", "KPT", *pseudo_file_list, "OUT.ABACUS/"]
class AbacusStateControl(FlowStateControl):
_flow_state_class = AbacusState
def __init__(self, flow_list, task_content, **kwargs):
super(AbacusStateControl, self).__init__(flow_list=flow_list, task_content=task_content, **kwargs) | src/bandapi/flow/abacus/calculation_state.py | import glob
import pathlib
import re
import shutil
from collections import Counter
import ase
import ase.symbols
import numpy as np
from bandapi.dispatcher.dpdispatcher import Task
from bandapi.flow.abacus import default_settings
from bandapi.flow.state import FlowStateControl
from bandapi.flow.task_content import NamedAtomsContentDict
from bandapi.io.abacus.out import read_stru
from bandapi.io.abacus.potential import AbacusPotential
"""
Abacus has calculation state as following:
- scf(default)
- relax: ionic relaxations
- cell-relax: cell relaxation
- nscf: charge density file is needed.
- istate: Not Supported Now.
- ienvelope: Not Supported Now.
- md: Not Supported Now.
"""
from bandapi.flow.abacus import AbacusState
class AbacusScfState(AbacusState):
_state = "scf"
def bakeup(self, task_content: NamedAtomsContentDict):
"""
:param NamedAtomsContentDict task_content:
:return:
"""
for subdir, atoms in task_content.items():
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(subdir, None):
atoms: ase.Atoms
self._write_stur(subname=subdir, atoms=atoms, potential_name=self.get_state_settings("potential_name"))
self._write_kpt(subname=subdir, atoms=atoms)
self._write_input(subname=subdir, atoms=atoms)
def prepare(self, task_content: NamedAtomsContentDict, task_settings):
task_list = []
for idx, item in enumerate(task_content.keys()):
for idx, item in enumerate(task_content):
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(item, None):
task_list.append(
Task(command=task_settings["remote_command"],
task_work_path=f"{self._state}/{item}/",
forward_files=[*self.bake_upload_files(task_content[item])],
backward_files=["OUT.ABACUS"]
))
else:
pass
return task_list
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state == "nscf-band":
raise ValueError("Please use `AbacusScfStateWithCharge` for band-structure scf.")
else:
pass
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "scf",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
}
def get_kpt_args(self, atoms):
"""
Implemented kpt_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
if not self.get_state_settings("kpointfix", default_settings["kpointfix"]):
scope = self.get_state_settings("kpointscope", default_settings["kpointscope"])
odd_flag = scope % 2
abc = atoms.cell.lengths()
result = np.around(1 / abc / min(1 / abc) * scope)
if result[0] * result[1] * result[2] > 1000:
scope -= 2
odd_flag = scope % 2
abc = atoms.cell.lengths()
result = np.around(1 / abc / min(1 / abc) * scope)
mask = result % 2 != odd_flag
shift = np.zeros_like(result)
content = np.concatenate([result + mask, shift], axis=-1)
else:
scope = self.get_state_settings("kpointscope", default_settings["kpointscope"])
content = np.array([scope, scope, scope, 0, 0, 0])
return {
"number_of_kpt": 0,
"mode": "Gamma",
"content": content
}
def bake_upload_files(self, atoms):
"""
Prase which files of atoms should be upload. Such as INPUT, STRU, KPT,...
:param ase.Atoms atoms:
:return:
"""
pseudo_file_list = []
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
for num in atom_type_list:
potfile: pathlib.Path = AbacusPotential(pot_name=self.get_state_settings("potential_name"))[num]
pseudo_file_list.append(potfile.name)
return ["INPUT", "STRU", "KPT", *pseudo_file_list]
class AbacusRelaxState(AbacusScfState, AbacusState):
_state = "relax"
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "relax",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"nstep": self.get_state_settings("nstep", default_settings["nstep"]),
}
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state is not None:
self._submit_loop_condition = 1
for subdir, _ in self.task_content.items():
self.task_content[subdir] = read_stru(self.flow_work_root / self._state / subdir / "OUT.ABACUS" / "STRU_ION_D")
return True
else:
raise NotImplementedError
class AbacusCellRelaxState(AbacusScfState, AbacusState):
_state = "cell-relax"
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "cell-relax",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"nstep": self.get_state_settings("nstep", default_settings["nstep"]),
}
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state == "scf" or "relax" or "scf-charge":
self._submit_loop_condition = 1
for subdir, _ in self.task_content.items():
self.task_content[subdir] = read_stru(self.flow_work_root / self._state / subdir / "OUT.ABACUS" / "STRU_ION_D")
return True
else:
raise NotImplementedError
class AbacusScfStateWithCharge(AbacusScfState, AbacusState):
_state = "scf-charge"
def flow_begin_test(self):
check_status = {}
for subdir, atoms in self.task_content.items():
atoms: ase.Atoms
CHGfile = glob.glob((self.flow_work_root / "scf-charge" / subdir / "OUT.ABACUS" / "SPIN*_CHG").as_posix())
for item in CHGfile:
if item:
check_status[subdir] = True
self.check_exist_status = check_status
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "scf",
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"out_charge": 1
}
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
pass
elif next_state == "nscf-band":
self._submit_loop_condition = 1
for subdir, _ in self.task_content.items():
self.task_content[subdir] = read_stru(self.flow_work_root / self._state / subdir / "STRU")
return True
else:
raise NotImplementedError
class AbacusBandState(AbacusState):
_state = "nscf-band"
def flow_begin_test(self):
check_status = {}
for subdir, atoms in self.task_content.items():
atoms: ase.Atoms
CHGfile = glob.glob((self.flow_work_root / self._state / subdir / "OUT.ABACUS" / "running_nscf*").as_posix())
for item in CHGfile:
if item:
check_status[subdir] = True
self.check_exist_status = check_status
for subdir, atoms in self.task_content.items():
if not self.check_exist_status.get(subdir,None):
atoms: ase.Atoms
CHGfile = glob.glob((self.flow_work_root / "scf-charge" / subdir / "OUT.ABACUS" / "SPIN*_CHG").as_posix())
(self.flow_work_root / self._state / subdir / "OUT.ABACUS").mkdir(parents=True, exist_ok=True)
for item in CHGfile:
shutil.copy(item, self.flow_work_root / self._state / subdir / "OUT.ABACUS/")
def bakeup(self, task_content: NamedAtomsContentDict):
"""
:param NamedAtomsContentDict task_content:
:return:
"""
for subdir, atoms in task_content.items():
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(subdir, None):
for subdir, atoms in task_content.items():
self._write_stur(subname=subdir, atoms=atoms, potential_name=self.get_state_settings("potential_name"))
self._write_kpt(subname=subdir, atoms=atoms)
self._write_input(subname=subdir, atoms=atoms)
def prepare(self, task_content: NamedAtomsContentDict, task_settings):
task_list = []
for idx, item in enumerate(task_content.keys()):
for idx, item in enumerate(task_content):
if hasattr(self, "check_exist_status"):
self.check_exist_status: dict
if not self.check_exist_status.get(item, None):
task_list.append(
Task(command=task_settings["remote_command"],
task_work_path=f"{self._state}/{item}/",
forward_files=[*self.bake_upload_files(task_content[item])],
backward_files=["OUT.ABACUS"]
)
)
return task_list
def run_end(self, next_state: str):
"""
Define if the next_state is able to run, and do necessary work.
:param next_state:
:return:
"""
if next_state is None:
return False
elif next_state == "band-data":
return True
else:
return NotImplementedError
def get_input_args(self, atoms):
"""
Implemented input_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
return {
"pseudo_dir": "./",
"calculation": "nscf",
"nbands": self.get_state_settings("nbands", default_settings["nbands"]),
"ntype": len(atom_type_list),
"basis_type": "pw",
"symmetry": 0,
"ecutwfc": self.get_state_settings("ecutwfc", default_settings["ecutwfc"]),
"dr2": self.get_state_settings("dr2", default_settings["dr2"]),
"out_band": 1,
"start_charge": "file"
}
def get_kpt_args(self, atoms):
"""
Implemented kpt_args for `scf` state.
:param ase.Atoms atoms:
:return:
"""
scope = self.get_state_settings("kpathscope", default_settings["kpathscope"])
sp = atoms.cell.bandpath().special_points
path = atoms.cell.bandpath().path.split(",")
path_lines = []
num_lines = []
for item in path:
for point in re.findall("\w\d*", item):
path_lines.append(sp[point])
num_lines.append(scope)
num_lines[-1] = 1
path_lines = np.array(path_lines)
num_lines = np.array(num_lines)
kpathlines = np.concatenate([path_lines, num_lines[:, None]], axis=-1)
return {
"number_of_kpt": kpathlines.shape[0],
"mode": "Line",
"content": kpathlines
}
def bake_upload_files(self, atoms):
"""
Prase which files of atoms should be upload. Such as INPUT, STRU, KPT,...
:param ase.Atoms atoms:
:return:
"""
pseudo_file_list = []
atoms_counter = Counter(atoms.get_atomic_numbers())
atom_type_list = list(ase.symbols.chemical_symbols[item] for item in list(atoms_counter.keys()))
for num in atom_type_list:
potfile: pathlib.Path = AbacusPotential(pot_name=self.get_state_settings("potential_name"))[num]
pseudo_file_list.append(potfile.name)
return ["INPUT", "STRU", "KPT", *pseudo_file_list, "OUT.ABACUS/"]
class AbacusStateControl(FlowStateControl):
_flow_state_class = AbacusState
def __init__(self, flow_list, task_content, **kwargs):
super(AbacusStateControl, self).__init__(flow_list=flow_list, task_content=task_content, **kwargs) | 0.58676 | 0.182717 |
from uuid import uuid4
import pytest
from django.contrib.auth import get_user_model
from django.test import RequestFactory
from zapier.auth import authenticate_request, authorize_request
from zapier.exceptions import (
MissingTokenHeader,
TokenAuthError,
TokenScopeError,
TokenUserError,
UnknownToken,
)
from zapier.models import ZapierToken, ZapierUser
@pytest.mark.django_db
class TestAuthenticateRequest:
def test_authenticate_request(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
authenticate_request(request)
assert request.auth == zapier_token
assert request.user.is_anonymous
def test_authenticate_missing_token_header(self, rf: RequestFactory) -> None:
request = rf.get("/")
with pytest.raises(MissingTokenHeader):
authenticate_request(request)
request = rf.get("/", HTTP_X_API_TOKEN="")
with pytest.raises(MissingTokenHeader):
authenticate_request(request)
def test_authenticate_unknown_token(self, rf: RequestFactory) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(uuid4()))
with pytest.raises(UnknownToken):
authenticate_request(request)
def test_authenticate_inactive_user_error(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
zapier_token.user.is_active = False
zapier_token.user.save()
with pytest.raises(TokenUserError):
authenticate_request(request)
def test_authenticate_token_user_error(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.user = get_user_model().objects.create(username=str(uuid4()))
with pytest.raises(TokenUserError):
authenticate_request(request)
@pytest.mark.django_db
class TestAuthorizeRequest:
@pytest.mark.parametrize(
"scopes,scope",
[
(["foo"], "foo"),
(["foo", "bar"], "bar"),
],
)
def test_authorize_request(
self,
rf: RequestFactory,
zapier_token: ZapierToken,
scopes: list[str],
scope: str,
) -> None:
zapier_token.set_scopes(scopes)
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.auth = zapier_token
authorize_request(request, scope)
@pytest.mark.parametrize(
"scopes,scope,error",
[
(["foo"], "", ValueError),
(["foo"], "*", ValueError),
(["foo"], "bar", TokenScopeError),
],
)
def test_authorize_request__error(
self,
rf: RequestFactory,
zapier_token: ZapierToken,
scopes: list[str],
scope: str,
error: type[Exception] | None,
) -> None:
zapier_token.set_scopes(scopes)
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.auth = zapier_token
with pytest.raises(error):
authorize_request(request, scope)
def test_authorize_request__no_token(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
with pytest.raises(TokenAuthError):
authorize_request(request, scope="foo")
def test_authorize_request__invalid_auth(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.auth = ZapierUser()
with pytest.raises(TokenAuthError):
authorize_request(request, scope="foo") | tests/test_auth.py | from uuid import uuid4
import pytest
from django.contrib.auth import get_user_model
from django.test import RequestFactory
from zapier.auth import authenticate_request, authorize_request
from zapier.exceptions import (
MissingTokenHeader,
TokenAuthError,
TokenScopeError,
TokenUserError,
UnknownToken,
)
from zapier.models import ZapierToken, ZapierUser
@pytest.mark.django_db
class TestAuthenticateRequest:
def test_authenticate_request(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
authenticate_request(request)
assert request.auth == zapier_token
assert request.user.is_anonymous
def test_authenticate_missing_token_header(self, rf: RequestFactory) -> None:
request = rf.get("/")
with pytest.raises(MissingTokenHeader):
authenticate_request(request)
request = rf.get("/", HTTP_X_API_TOKEN="")
with pytest.raises(MissingTokenHeader):
authenticate_request(request)
def test_authenticate_unknown_token(self, rf: RequestFactory) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(uuid4()))
with pytest.raises(UnknownToken):
authenticate_request(request)
def test_authenticate_inactive_user_error(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
zapier_token.user.is_active = False
zapier_token.user.save()
with pytest.raises(TokenUserError):
authenticate_request(request)
def test_authenticate_token_user_error(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.user = get_user_model().objects.create(username=str(uuid4()))
with pytest.raises(TokenUserError):
authenticate_request(request)
@pytest.mark.django_db
class TestAuthorizeRequest:
@pytest.mark.parametrize(
"scopes,scope",
[
(["foo"], "foo"),
(["foo", "bar"], "bar"),
],
)
def test_authorize_request(
self,
rf: RequestFactory,
zapier_token: ZapierToken,
scopes: list[str],
scope: str,
) -> None:
zapier_token.set_scopes(scopes)
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.auth = zapier_token
authorize_request(request, scope)
@pytest.mark.parametrize(
"scopes,scope,error",
[
(["foo"], "", ValueError),
(["foo"], "*", ValueError),
(["foo"], "bar", TokenScopeError),
],
)
def test_authorize_request__error(
self,
rf: RequestFactory,
zapier_token: ZapierToken,
scopes: list[str],
scope: str,
error: type[Exception] | None,
) -> None:
zapier_token.set_scopes(scopes)
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.auth = zapier_token
with pytest.raises(error):
authorize_request(request, scope)
def test_authorize_request__no_token(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
with pytest.raises(TokenAuthError):
authorize_request(request, scope="foo")
def test_authorize_request__invalid_auth(
self, rf: RequestFactory, zapier_token: ZapierToken
) -> None:
request = rf.get("/", HTTP_X_API_TOKEN=str(zapier_token.api_token))
request.auth = ZapierUser()
with pytest.raises(TokenAuthError):
authorize_request(request, scope="foo") | 0.485356 | 0.307787 |
import numpy as np
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'input.txt'
with open(filename, 'r') as f:
data = f.read()
# All unique characters / entities in the data set.
chars = list(set(data))
chars.sort()
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
# Each character in the vocabulary gets a unique integer index assigned, in the
# half-open interval [0:N). These indices are useful to create one-hot encoded
# vectors that represent characters in numerical computations.
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print('char_to_ix', char_to_ix)
print('ix_to_char', ix_to_char)
# Hyperparameters
hidden_size = 50 # size of hidden layer of neurons
seq_length = 16 # number of steps to unroll the RNN for
learning_rate = 1e-1
ub, lb = 0.1, -0.1
# LSTM
Wgs = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wis = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wfs = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wos = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
bgs = np.zeros((seq_length, hidden_size, 1))
bis = np.zeros((seq_length, hidden_size, 1))
bfs = np.zeros((seq_length, hidden_size, 1))
bos = np.zeros((seq_length, hidden_size, 1))
# Fully-connected
Why = np.random.randn(vocab_size, hidden_size) * (ub - lb) + lb
by = np.zeros((vocab_size, 1))
def lossFun(inputs, targets, hprev, sprev):
assert len(inputs) == seq_length
assert len(targets) == seq_length
xs, hs, ss, ps, ys = {}, {}, {}, {}, {}
gs, iis, fs, os = {}, {}, {}, {} # the `iis` here should be `is`, unfortunately `is` is a keyword in python
# Initial incoming state.
hs[-1] = np.copy(hprev)
ss[-1] = np.copy(sprev)
loss = 0
# Forward pass
for t in range(seq_length):
xs[t] = np.zeros((vocab_size, 1))
xs[t][inputs[t]] = 1
xc = np.vstack((xs[t], hs[t - 1]))
gs[t] = np.tanh(np.dot(Wgs[t], xc) + bgs[t])
iis[t] = sigmoid(np.dot(Wis[t], xc) + bis[t])
fs[t] = sigmoid(np.dot(Wfs[t], xc) + bfs[t])
os[t] = sigmoid(np.dot(Wos[t], xc) + bos[t])
ss[t] = gs[t] * iis[t] + ss[t - 1] * fs[t]
hs[t] = ss[t] * os[t]
ys[t] = np.dot(Why, hs[t]) + by
ps[t] = softmax(ys[t])
loss += -np.log(ps[t][targets[t], 0])
# Backward pass
dWgs, dWis, dWfs, dWos = np.zeros_like(Wgs), np.zeros_like(Wis), np.zeros_like(Wfs), np.zeros_like(Wos)
dbgs, dbis, dbfs, dbos = np.zeros_like(bgs), np.zeros_like(bis), np.zeros_like(bfs), np.zeros_like(bos)
dWhy, dby = np.zeros_like(Why), np.zeros_like(by)
dh_next = np.zeros_like(hprev)
ds_next = np.zeros_like(sprev)
for t in reversed(range(seq_length)):
# Backprop through the gradients of loss and softmax
dy = np.copy(ps[t])
dy[targets[t]] -= 1
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dh_next
ds = os[t] * dh + ds_next
do = ss[t] * dh
di = gs[t] * ds
dg = iis[t] * ds
df = ss[t - 1] * ds
di_input = sigmoid_derivative(iis[t]) * di
df_input = sigmoid_derivative(fs[t]) * df
do_input = sigmoid_derivative(os[t]) * do
dg_input = tanh_derivative(gs[t]) * dg
xc = np.vstack((xs[t], hs[t - 1]))
dWis[t] = np.outer(di_input, xc)
dWfs[t] = np.outer(df_input, xc)
dWos[t] = np.outer(do_input, xc)
dWgs[t] = np.outer(dg_input, xc)
dbis[t] = di_input
dbfs[t] = df_input
dbos[t] = do_input
dbgs[t] = dg_input
dxc = np.zeros_like(xc)
dxc += np.dot(Wis[t].T, di_input)
dxc += np.dot(Wfs[t].T, df_input)
dxc += np.dot(Wos[t].T, do_input)
dxc += np.dot(Wgs[t].T, dg_input)
ds_next = ds * fs[t]
dh_next = dxc[vocab_size:]
for dparam in [dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby]:
np.clip(dparam, -5, 5, out=dparam)
return loss, dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby, hs[seq_length - 1], ss[seq_length - 1]
def sample(h, s, seed_ix, n):
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
xc = np.vstack((x, h))
tt = t % seq_length
g = np.tanh(np.dot(Wgs[tt], xc) + bgs[tt])
i = sigmoid(np.dot(Wis[tt], xc) + bis[tt])
f = sigmoid(np.dot(Wfs[tt], xc) + bfs[tt])
o = sigmoid(np.dot(Wos[tt], xc) + bos[tt])
s = g * i + s * f
h = s * o
y = np.dot(Why, h) + by
p = softmax(y)
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def sigmoid_derivative(x):
return x * (1. - x)
def tanh_derivative(x):
return 1. - x * x
def softmax(x):
return np.exp(x) / np.sum(np.exp(x))
def gradCheck(inputs, targets, hprev, sprev):
from random import uniform
global Wgs, Wis, Wfs, Wos, bgs, bis, bfs, bos, Why, by
num_checks, delta = 10, 1e-4
loss, dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby, _, _ = lossFun(inputs, targets, hprev, sprev)
for param, dparam, name in zip([Wgs, Wis, Wfs, Wos, bgs, bis, bfs, bos, Why, by],
[dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby],
['Wgs', 'Wis', 'Wfs', 'Wos', 'bgs', 'bis', 'bfs', 'bos', 'Why', 'by']):
s0 = dparam.shape
s1 = param.shape
assert s0 == s1, f"Error dims don't match {s0} and {s1}."
print(name)
for i in range(num_checks):
ri = int(uniform(0, param.size))
# evaluate cost at [x + delta] and [x - delta]
old_val = param.flat[ri]
param.flat[ri] = old_val + delta
cg0, _, _, _, _, _, _, _, _, _, _, _, _ = lossFun(inputs, targets, hprev, sprev)
param.flat[ri] = old_val - delta
cg1, _, _, _, _, _, _, _, _, _, _, _, _ = lossFun(inputs, targets, hprev, sprev)
param.flat[ri] = old_val # reset old value for this parameter
# fetch both numerical and analytic gradient
grad_analytic = dparam.flat[ri]
grad_numerical = (cg0 - cg1) / (2 * delta)
rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
print('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))
def basicGradCheck():
inputs = [char_to_ix[ch] for ch in data[:seq_length]]
targets = [char_to_ix[ch] for ch in data[1:seq_length + 1]]
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
sprev = np.zeros((hidden_size, 1))
gradCheck(inputs, targets, hprev, sprev)
# Uncomment this to run a basic gradient check.
# basicGradCheck()
n, p = 0, 0
mWgs, mWis, mWfs, mWos = np.zeros_like(Wgs), np.zeros_like(Wis), np.zeros_like(Wfs), np.zeros_like(Wos)
mbgs, mbis, mbfs, mbos = np.zeros_like(bgs), np.zeros_like(bis), np.zeros_like(bfs), np.zeros_like(bos)
mWhy, mby = np.zeros_like(Why), np.zeros_like(by)
smooth_loss = -np.log(1.0 / vocab_size) * seq_length
MAX_DATA = 1000000
while p < MAX_DATA:
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
sprev = np.zeros((hidden_size, 1))
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]
targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]
if n % 1000 == 0:
sample_ix = sample(hprev, sprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt,))
loss, dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby, hprev, sprev = lossFun(inputs, targets, hprev, sprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 200 == 0: print('iter %d (p=%d), loss: %f' % (n, p, smooth_loss))
for param, dparam, mem in zip([Wgs, Wis, Wfs, Wos, bgs, bis, bfs, bos, Why, by],
[dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby],
[mWgs, mWis, mWfs, mWos, mbgs, mbis, mbfs, mbos, mWhy, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8)
p += seq_length
n += 1 | min-char-rnn/min_char_rnn_lstm.py | import numpy as np
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'input.txt'
with open(filename, 'r') as f:
data = f.read()
# All unique characters / entities in the data set.
chars = list(set(data))
chars.sort()
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
# Each character in the vocabulary gets a unique integer index assigned, in the
# half-open interval [0:N). These indices are useful to create one-hot encoded
# vectors that represent characters in numerical computations.
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print('char_to_ix', char_to_ix)
print('ix_to_char', ix_to_char)
# Hyperparameters
hidden_size = 50 # size of hidden layer of neurons
seq_length = 16 # number of steps to unroll the RNN for
learning_rate = 1e-1
ub, lb = 0.1, -0.1
# LSTM
Wgs = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wis = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wfs = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wos = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
bgs = np.zeros((seq_length, hidden_size, 1))
bis = np.zeros((seq_length, hidden_size, 1))
bfs = np.zeros((seq_length, hidden_size, 1))
bos = np.zeros((seq_length, hidden_size, 1))
# Fully-connected
Why = np.random.randn(vocab_size, hidden_size) * (ub - lb) + lb
by = np.zeros((vocab_size, 1))
def lossFun(inputs, targets, hprev, sprev):
assert len(inputs) == seq_length
assert len(targets) == seq_length
xs, hs, ss, ps, ys = {}, {}, {}, {}, {}
gs, iis, fs, os = {}, {}, {}, {} # the `iis` here should be `is`, unfortunately `is` is a keyword in python
# Initial incoming state.
hs[-1] = np.copy(hprev)
ss[-1] = np.copy(sprev)
loss = 0
# Forward pass
for t in range(seq_length):
xs[t] = np.zeros((vocab_size, 1))
xs[t][inputs[t]] = 1
xc = np.vstack((xs[t], hs[t - 1]))
gs[t] = np.tanh(np.dot(Wgs[t], xc) + bgs[t])
iis[t] = sigmoid(np.dot(Wis[t], xc) + bis[t])
fs[t] = sigmoid(np.dot(Wfs[t], xc) + bfs[t])
os[t] = sigmoid(np.dot(Wos[t], xc) + bos[t])
ss[t] = gs[t] * iis[t] + ss[t - 1] * fs[t]
hs[t] = ss[t] * os[t]
ys[t] = np.dot(Why, hs[t]) + by
ps[t] = softmax(ys[t])
loss += -np.log(ps[t][targets[t], 0])
# Backward pass
dWgs, dWis, dWfs, dWos = np.zeros_like(Wgs), np.zeros_like(Wis), np.zeros_like(Wfs), np.zeros_like(Wos)
dbgs, dbis, dbfs, dbos = np.zeros_like(bgs), np.zeros_like(bis), np.zeros_like(bfs), np.zeros_like(bos)
dWhy, dby = np.zeros_like(Why), np.zeros_like(by)
dh_next = np.zeros_like(hprev)
ds_next = np.zeros_like(sprev)
for t in reversed(range(seq_length)):
# Backprop through the gradients of loss and softmax
dy = np.copy(ps[t])
dy[targets[t]] -= 1
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dh_next
ds = os[t] * dh + ds_next
do = ss[t] * dh
di = gs[t] * ds
dg = iis[t] * ds
df = ss[t - 1] * ds
di_input = sigmoid_derivative(iis[t]) * di
df_input = sigmoid_derivative(fs[t]) * df
do_input = sigmoid_derivative(os[t]) * do
dg_input = tanh_derivative(gs[t]) * dg
xc = np.vstack((xs[t], hs[t - 1]))
dWis[t] = np.outer(di_input, xc)
dWfs[t] = np.outer(df_input, xc)
dWos[t] = np.outer(do_input, xc)
dWgs[t] = np.outer(dg_input, xc)
dbis[t] = di_input
dbfs[t] = df_input
dbos[t] = do_input
dbgs[t] = dg_input
dxc = np.zeros_like(xc)
dxc += np.dot(Wis[t].T, di_input)
dxc += np.dot(Wfs[t].T, df_input)
dxc += np.dot(Wos[t].T, do_input)
dxc += np.dot(Wgs[t].T, dg_input)
ds_next = ds * fs[t]
dh_next = dxc[vocab_size:]
for dparam in [dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby]:
np.clip(dparam, -5, 5, out=dparam)
return loss, dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby, hs[seq_length - 1], ss[seq_length - 1]
def sample(h, s, seed_ix, n):
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
xc = np.vstack((x, h))
tt = t % seq_length
g = np.tanh(np.dot(Wgs[tt], xc) + bgs[tt])
i = sigmoid(np.dot(Wis[tt], xc) + bis[tt])
f = sigmoid(np.dot(Wfs[tt], xc) + bfs[tt])
o = sigmoid(np.dot(Wos[tt], xc) + bos[tt])
s = g * i + s * f
h = s * o
y = np.dot(Why, h) + by
p = softmax(y)
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def sigmoid_derivative(x):
return x * (1. - x)
def tanh_derivative(x):
return 1. - x * x
def softmax(x):
return np.exp(x) / np.sum(np.exp(x))
def gradCheck(inputs, targets, hprev, sprev):
from random import uniform
global Wgs, Wis, Wfs, Wos, bgs, bis, bfs, bos, Why, by
num_checks, delta = 10, 1e-4
loss, dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby, _, _ = lossFun(inputs, targets, hprev, sprev)
for param, dparam, name in zip([Wgs, Wis, Wfs, Wos, bgs, bis, bfs, bos, Why, by],
[dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby],
['Wgs', 'Wis', 'Wfs', 'Wos', 'bgs', 'bis', 'bfs', 'bos', 'Why', 'by']):
s0 = dparam.shape
s1 = param.shape
assert s0 == s1, f"Error dims don't match {s0} and {s1}."
print(name)
for i in range(num_checks):
ri = int(uniform(0, param.size))
# evaluate cost at [x + delta] and [x - delta]
old_val = param.flat[ri]
param.flat[ri] = old_val + delta
cg0, _, _, _, _, _, _, _, _, _, _, _, _ = lossFun(inputs, targets, hprev, sprev)
param.flat[ri] = old_val - delta
cg1, _, _, _, _, _, _, _, _, _, _, _, _ = lossFun(inputs, targets, hprev, sprev)
param.flat[ri] = old_val # reset old value for this parameter
# fetch both numerical and analytic gradient
grad_analytic = dparam.flat[ri]
grad_numerical = (cg0 - cg1) / (2 * delta)
rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
print('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))
def basicGradCheck():
inputs = [char_to_ix[ch] for ch in data[:seq_length]]
targets = [char_to_ix[ch] for ch in data[1:seq_length + 1]]
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
sprev = np.zeros((hidden_size, 1))
gradCheck(inputs, targets, hprev, sprev)
# Uncomment this to run a basic gradient check.
# basicGradCheck()
n, p = 0, 0
mWgs, mWis, mWfs, mWos = np.zeros_like(Wgs), np.zeros_like(Wis), np.zeros_like(Wfs), np.zeros_like(Wos)
mbgs, mbis, mbfs, mbos = np.zeros_like(bgs), np.zeros_like(bis), np.zeros_like(bfs), np.zeros_like(bos)
mWhy, mby = np.zeros_like(Why), np.zeros_like(by)
smooth_loss = -np.log(1.0 / vocab_size) * seq_length
MAX_DATA = 1000000
while p < MAX_DATA:
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
sprev = np.zeros((hidden_size, 1))
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]
targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]
if n % 1000 == 0:
sample_ix = sample(hprev, sprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt,))
loss, dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby, hprev, sprev = lossFun(inputs, targets, hprev, sprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 200 == 0: print('iter %d (p=%d), loss: %f' % (n, p, smooth_loss))
for param, dparam, mem in zip([Wgs, Wis, Wfs, Wos, bgs, bis, bfs, bos, Why, by],
[dWgs, dWis, dWfs, dWos, dbgs, dbis, dbfs, dbos, dWhy, dby],
[mWgs, mWis, mWfs, mWos, mbgs, mbis, mbfs, mbos, mWhy, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8)
p += seq_length
n += 1 | 0.45302 | 0.485234 |
import pytest
from bitvector import BitVector, BitField, ReadOnlyBitField
from itertools import combinations
def test_bitfield_create_no_args():
with pytest.raises(TypeError):
BitField()
@pytest.mark.parametrize("offset", list(range(0, 128)))
def test_bitfield_create_with_offset(offset: int):
test = BitField(offset)
assert isinstance(test, BitField)
assert isinstance(test.field, slice)
assert (offset, offset + 1, 1) == test.field.indices(128)
@pytest.mark.parametrize("offset,width", list(combinations(range(1, 16), 2)))
def test_bitfield_create_with_offset_and_width(offset: int, width: int):
test = BitField(offset, width)
assert (offset, min(16, offset + width), 1) == test.field.indices(16)
def test_bitfield_in_bitvector_subclass_get_values(SixteenBitClass: type):
test = SixteenBitClass(0xABCD)
assert test == 0xABCD
assert test.byte0 == 0xCD
assert test.byte1 == 0xAB
# 0xD
assert test.bit0 == 1
assert test.bit1 == 0
assert test.bit2 == 1
assert test.bit3 == 1
# 0xC
assert test.bit4 == 0
assert test.bit5 == 0
assert test.bit6 == 1
assert test.bit7 == 1
# 0xB
assert test.bit8 == 1
assert test.bit9 == 1
assert test.bitA == 0
assert test.bitB == 1
# 0xA
assert test.bitC == 0
assert test.bitD == 1
assert test.bitE == 0
assert test.bitF == 1
def test_bitfield_in_bitvector_subclass_get_values(SixteenBitClass: type):
test = SixteenBitClass(0x0000)
assert test == 0
test.byte0 = 0x55
test.byte1 = 0xAA
assert test.byte0 == 0x55
assert test.byte1 == 0xAA
# 0x5
assert test.bit0 == 1
assert test.bit1 == 0
assert test.bit2 == 1
assert test.bit3 == 0
# 0x5
assert test.bit4 == 1
assert test.bit5 == 0
assert test.bit6 == 1
assert test.bit7 == 0
# 0xA
assert test.bit8 == 0
assert test.bit9 == 1
assert test.bitA == 0
assert test.bitB == 1
# 0xA
assert test.bitC == 0
assert test.bitD == 1
assert test.bitE == 0
assert test.bitF == 1
def test_readonly_bitfield_in_bitvector_subclass():
class TestClass(BitVector):
def __init__(self):
super().__init__(value=0xDEADBEEF, size=32)
dead = BitField(16, 16)
beef = ReadOnlyBitField(0, 16)
test = TestClass()
assert test.dead == 0xDEAD
assert test.beef == 0xBEEF
test.dead = 0xcafe
assert test.dead == 0xcafe
with pytest.raises(TypeError):
test.beef = 0x0bad
assert test.beef == 0xbeef | tests/test_bitfield.py | import pytest
from bitvector import BitVector, BitField, ReadOnlyBitField
from itertools import combinations
def test_bitfield_create_no_args():
with pytest.raises(TypeError):
BitField()
@pytest.mark.parametrize("offset", list(range(0, 128)))
def test_bitfield_create_with_offset(offset: int):
test = BitField(offset)
assert isinstance(test, BitField)
assert isinstance(test.field, slice)
assert (offset, offset + 1, 1) == test.field.indices(128)
@pytest.mark.parametrize("offset,width", list(combinations(range(1, 16), 2)))
def test_bitfield_create_with_offset_and_width(offset: int, width: int):
test = BitField(offset, width)
assert (offset, min(16, offset + width), 1) == test.field.indices(16)
def test_bitfield_in_bitvector_subclass_get_values(SixteenBitClass: type):
test = SixteenBitClass(0xABCD)
assert test == 0xABCD
assert test.byte0 == 0xCD
assert test.byte1 == 0xAB
# 0xD
assert test.bit0 == 1
assert test.bit1 == 0
assert test.bit2 == 1
assert test.bit3 == 1
# 0xC
assert test.bit4 == 0
assert test.bit5 == 0
assert test.bit6 == 1
assert test.bit7 == 1
# 0xB
assert test.bit8 == 1
assert test.bit9 == 1
assert test.bitA == 0
assert test.bitB == 1
# 0xA
assert test.bitC == 0
assert test.bitD == 1
assert test.bitE == 0
assert test.bitF == 1
def test_bitfield_in_bitvector_subclass_get_values(SixteenBitClass: type):
test = SixteenBitClass(0x0000)
assert test == 0
test.byte0 = 0x55
test.byte1 = 0xAA
assert test.byte0 == 0x55
assert test.byte1 == 0xAA
# 0x5
assert test.bit0 == 1
assert test.bit1 == 0
assert test.bit2 == 1
assert test.bit3 == 0
# 0x5
assert test.bit4 == 1
assert test.bit5 == 0
assert test.bit6 == 1
assert test.bit7 == 0
# 0xA
assert test.bit8 == 0
assert test.bit9 == 1
assert test.bitA == 0
assert test.bitB == 1
# 0xA
assert test.bitC == 0
assert test.bitD == 1
assert test.bitE == 0
assert test.bitF == 1
def test_readonly_bitfield_in_bitvector_subclass():
class TestClass(BitVector):
def __init__(self):
super().__init__(value=0xDEADBEEF, size=32)
dead = BitField(16, 16)
beef = ReadOnlyBitField(0, 16)
test = TestClass()
assert test.dead == 0xDEAD
assert test.beef == 0xBEEF
test.dead = 0xcafe
assert test.dead == 0xcafe
with pytest.raises(TypeError):
test.beef = 0x0bad
assert test.beef == 0xbeef | 0.752195 | 0.885086 |
import pygame
import constants as cons
from dungeon import Dungeon
class StatView():
def __init__(self, surface, pos_rect):
self.surface = surface
self.topleft = pos_rect
self.dirty = True
def draw(self, screen):
if self.dirty:
self.surface.fill(pygame.color.Color("moccasin"))
screen.blit(self.surface, self.topleft)
class Log():
def __init__(self, surface, pos_rect):
self.surface = surface
self.topleft = pos_rect
self.dirty = True
def draw(self, screen):
if self.dirty:
self.surface.fill(pygame.color.Color("navajowhite"))
screen.blit(self.surface, self.topleft)
class Game():
def __init__(self):
pygame.init()
self.window_w = cons.TILE_D*cons.SCREEN_TW
self.window_h = cons.TILE_D*cons.SCREEN_TH
self.screensize = (self.window_w, self.window_h)
self.screen = pygame.display.set_mode(self.screensize)
self.running = True
self.setup()
def setup(self):
dungeonsurface = pygame.Surface(cons.MAP_DIM)
self.dungeon = Dungeon(dungeonsurface, cons.MAP_POS, 50, 50)
statsurface = pygame.Surface(cons.STAT_DIM)
self.statview = StatView(statsurface, cons.STAT_POS)
logsurface = pygame.Surface(cons.LOG_DIM)
self.logview = Log(logsurface, cons.LOG_POS)
# Test player
self.px = 25
self.py = 25
def handle_events(self):
events = pygame.event.get()
for event in events:
# Quit the game.
if event.type == pygame.QUIT:
self.running = False
break
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
break
# Toggle fullscreen.
if event.type == pygame.KEYDOWN and event.key == pygame.K_f:
if self.screen.get_flags() & pygame.FULLSCREEN:
pygame.display.set_mode(self.screensize)
else:
pygame.display.set_mode(self.screensize, pygame.FULLSCREEN)
# Move the player.
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
self.py -= 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.py += 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
self.px -= 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
self.px += 1
def draw(self):
self.dungeon.draw(self.screen, self.px, self.py)
self.statview.draw(self.screen)
self.logview.draw(self.screen)
def loop(self):
while self.running:
self.handle_events()
self.draw()
pygame.display.update()
pygame.quit()
if __name__ == '__main__':
game = Game()
game.loop() | python/architecture-test/game.py | import pygame
import constants as cons
from dungeon import Dungeon
class StatView():
def __init__(self, surface, pos_rect):
self.surface = surface
self.topleft = pos_rect
self.dirty = True
def draw(self, screen):
if self.dirty:
self.surface.fill(pygame.color.Color("moccasin"))
screen.blit(self.surface, self.topleft)
class Log():
def __init__(self, surface, pos_rect):
self.surface = surface
self.topleft = pos_rect
self.dirty = True
def draw(self, screen):
if self.dirty:
self.surface.fill(pygame.color.Color("navajowhite"))
screen.blit(self.surface, self.topleft)
class Game():
def __init__(self):
pygame.init()
self.window_w = cons.TILE_D*cons.SCREEN_TW
self.window_h = cons.TILE_D*cons.SCREEN_TH
self.screensize = (self.window_w, self.window_h)
self.screen = pygame.display.set_mode(self.screensize)
self.running = True
self.setup()
def setup(self):
dungeonsurface = pygame.Surface(cons.MAP_DIM)
self.dungeon = Dungeon(dungeonsurface, cons.MAP_POS, 50, 50)
statsurface = pygame.Surface(cons.STAT_DIM)
self.statview = StatView(statsurface, cons.STAT_POS)
logsurface = pygame.Surface(cons.LOG_DIM)
self.logview = Log(logsurface, cons.LOG_POS)
# Test player
self.px = 25
self.py = 25
def handle_events(self):
events = pygame.event.get()
for event in events:
# Quit the game.
if event.type == pygame.QUIT:
self.running = False
break
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
break
# Toggle fullscreen.
if event.type == pygame.KEYDOWN and event.key == pygame.K_f:
if self.screen.get_flags() & pygame.FULLSCREEN:
pygame.display.set_mode(self.screensize)
else:
pygame.display.set_mode(self.screensize, pygame.FULLSCREEN)
# Move the player.
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
self.py -= 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.py += 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
self.px -= 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
self.px += 1
def draw(self):
self.dungeon.draw(self.screen, self.px, self.py)
self.statview.draw(self.screen)
self.logview.draw(self.screen)
def loop(self):
while self.running:
self.handle_events()
self.draw()
pygame.display.update()
pygame.quit()
if __name__ == '__main__':
game = Game()
game.loop() | 0.397237 | 0.194119 |
from packageManager import *
# this command enables us to download torch models
ssl._create_default_https_context = ssl._create_unverified_context
class ImageFolderWithPaths(datasets.ImageFolder):
"""Custom dataset that includes image file paths. Extends torchvision.datasets.ImageFolder
"""
# override the __getitem__ method
# __getitem__ method is the method that dataloaders calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# Image file path
path = self.imgs[index][0]
# Make a tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path
# function to extract features
def pooling_output(x):
global model
for layer_name, layer in model._modules.items():
x = layer(x)
if layer_name == 'avgpool':
break
return x
# Transforms are made using the torchvision.transforms library.
# transforms.Compose allows to compose multiple transforms together so we can use more than one transformation.
# resizes the images to 224 x 224 (input size required by the ResNet)
# transforms.ToTensor() converts image into numbers.
# transforms.Normalize() subtracts the mean from each value and then divides by the standard deviation
transforms_ = transforms.Compose([
transforms.Resize(size=[224, 224], interpolation=2),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# Load in each dataset and apply transformations using the torchvision.datasets as datasets library
# data_dir main directory containing our image dataset
data_dir = "/Users/peisch/code/WebScraper/ImageSearch/images"
dataset = ImageFolderWithPaths(data_dir, transforms_)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1)
# use GPU if possible => here we use faiss-cpu
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
# Get pretrained model using torchvision.models
model = models.resnet50(pretrained=True)
# iterate over data
# image_paths is to be saved since it contains information on index
image_paths = []
# descriptors: list of output vectors
descriptors = []
model.to(DEVICE)
# Tell torch not to calculate gradients
with torch.no_grad():
model.eval()
for inputs, labels, paths in dataloader:
result = pooling_output(inputs.to(DEVICE))
descriptors.append(result.cpu().view(1, -1).numpy())
image_paths.append(paths)
torch.cuda.empty_cache()
# build faiss index with fixed size
index = faiss.IndexFlatL2(2048)
# stack arrays in sequence vertically (row wise).
descriptors = np.vstack(descriptors)
index.add(descriptors)
# save the index object to output file
faiss.write_index(index, f"{data_dir}/faiss_index") | WebScraper/buildIndex.py | from packageManager import *
# this command enables us to download torch models
ssl._create_default_https_context = ssl._create_unverified_context
class ImageFolderWithPaths(datasets.ImageFolder):
"""Custom dataset that includes image file paths. Extends torchvision.datasets.ImageFolder
"""
# override the __getitem__ method
# __getitem__ method is the method that dataloaders calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# Image file path
path = self.imgs[index][0]
# Make a tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path
# function to extract features
def pooling_output(x):
global model
for layer_name, layer in model._modules.items():
x = layer(x)
if layer_name == 'avgpool':
break
return x
# Transforms are made using the torchvision.transforms library.
# transforms.Compose allows to compose multiple transforms together so we can use more than one transformation.
# resizes the images to 224 x 224 (input size required by the ResNet)
# transforms.ToTensor() converts image into numbers.
# transforms.Normalize() subtracts the mean from each value and then divides by the standard deviation
transforms_ = transforms.Compose([
transforms.Resize(size=[224, 224], interpolation=2),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# Load in each dataset and apply transformations using the torchvision.datasets as datasets library
# data_dir main directory containing our image dataset
data_dir = "/Users/peisch/code/WebScraper/ImageSearch/images"
dataset = ImageFolderWithPaths(data_dir, transforms_)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1)
# use GPU if possible => here we use faiss-cpu
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
# Get pretrained model using torchvision.models
model = models.resnet50(pretrained=True)
# iterate over data
# image_paths is to be saved since it contains information on index
image_paths = []
# descriptors: list of output vectors
descriptors = []
model.to(DEVICE)
# Tell torch not to calculate gradients
with torch.no_grad():
model.eval()
for inputs, labels, paths in dataloader:
result = pooling_output(inputs.to(DEVICE))
descriptors.append(result.cpu().view(1, -1).numpy())
image_paths.append(paths)
torch.cuda.empty_cache()
# build faiss index with fixed size
index = faiss.IndexFlatL2(2048)
# stack arrays in sequence vertically (row wise).
descriptors = np.vstack(descriptors)
index.add(descriptors)
# save the index object to output file
faiss.write_index(index, f"{data_dir}/faiss_index") | 0.897156 | 0.597549 |
import random
from contextlib import contextmanager
import six
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import (
rcParams,
colors
)
from mpl_toolkits.mplot3d import Axes3D
__all__ = [
"zoom_plot",
"plot",
"plot_predictions_3d",
'Palette',
'plot_clusters',
]
def sorted_color_maps():
'''List of color name and their hex values sorted by HSV.
This code is taken from:
http://matplotlib.org/examples/color/named_colors.html
'''
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
# Transform to hex color values.
hex_ = [color[1] for color in colors_]
# Get the rgb equivalent.
rgb = [colors.hex2color(color) for color in hex_]
# Get the hsv equivalent.
hsv = [colors.rgb_to_hsv(color) for color in rgb]
# Split the hsv values to sort.
hue = [color[0] for color in hsv]
sat = [color[1] for color in hsv]
val = [color[2] for color in hsv]
# Sort by hue, saturation and value.
ind = np.lexsort((val, sat, hue))
sorted_colors = [colors_[i] for i in ind]
sorted_colors = [
c_1
for (c_1, c_2) in zip(sorted_colors[:-1], sorted_colors[1:])
if c_1[1] != c_2[1]]
return sorted_colors
class Palette(object):
SORTED_COLORS = sorted_color_maps()
GROUPS = (
#(color_name in SORTED_COLORS, group_name)
('k', 'GRAY'),
('whitesmoke', 'WHITE'),
('rosybrown', 'BROWN'),
('firebrick', 'RED'),
('sienna', 'SIENNA'),
('antiquewhite', 'WHITE'),
('orange', 'ORANGE'),
('y', 'GREEN'),
('mediumaquamarine', 'BLUE'),
('mediumpurple', 'PURPLE')
)
def __init__(self):
self.make_palette()
def make_palette(self):
group_names = dict(self.GROUPS)
[setattr(self, grp, []) for (cname, grp) in self.GROUPS]
current_group = None
for (cname, ccode) in self.SORTED_COLORS:
group_name = group_names.get(cname)
if not (group_name is None):
current_group = getattr(self, group_name, current_group)
if current_group is None:
continue
current_group.append(cname)
Palette = Palette()
@contextmanager
def zoom_plot(w, h):
'''Temprarily change the plot size.
'''
shape = rcParams['figure.figsize']
rcParams['figure.figsize'] = w, h
yield
rcParams['figure.figsize'] = shape
@contextmanager
def d3():
import mpld3
mpld3.enable_notebook()
yield
mpld3.disable_notebook()
def plot(X, Y, label=None, style='r-', grid=True, title=None, loc=None,
label_xy=('x', 'y'), show=True):
if label:
plt.plot(X, Y, style, label=label)
else:
plt.plot(X, Y, style)
plt.xlabel(label_xy[0])
plt.ylabel(label_xy[1])
if title: plt.title(title)
if loc: plt.legend(loc=loc)
plt.grid(grid)
if show: plt.show()
def subplots(h, v=1, order='v', sharex=True, sharey=True,
plots=()):
assert (order in ('v', 'vertical', 'h', 'horizontal')), (
'order must be either vertical or horizontal')
f, axes = plt.subplots(h, v, sharex=sharex, sharey=sharey)
def _axes():
I, J = (h, v) if order == 'v' else (v, h)
for i in range(I):
for j in range(J):
axs = axes[i][j] if order == 'v' else axes[j][i]
yield axs
for (axs, (plotter, args, kwargs)) in zip(_axes(), plots):
plt.axes(axs) # set axs as current active axes
kwargs['show'] = False
plotter(*args, **kwargs)
f.tight_layout(pad=1.3)
plt.show()
def plot_predictions_3d(X, Y, predictions, labels,
mirror=False,
title=""):
'''
Plot the [predictions] against the output [Y] projected by
two X.
'''
assert len(labels) == 2, "we are only plotting a 3D projection with 2 features"
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# plot the reality
f1, f2 = labels
x1, x2 = X[:, 0], X[:, 1]
if mirror:
f1, f2 = f2, f1
x1, x2 = x2, x1
ax.scatter(x1, x2, Y, c='r', marker='o', label='actual univ GPA')
# plot the predition
ax.scatter(x1, x2, predictions, c='g', label='predicted univ GPA')
ax.set_xlabel(f1)
ax.set_ylabel(f2)
ax.set_zlabel('prediction VS. example')
plt.title(title)
plt.legend()
plt.show()
def plot_clusters(x, y, k, palette=Palette.GREEN):
colors = random.sample(palette, k)
for i in range(k):
x_i = x[np.nonzero(y==i)]
plt.scatter(
x_i[:, 0], x_i[:, 1],
marker='o', facecolors='none', edgecolors=colors[i])
plt.grid(True)
plt.show() | isaac/plots/basic.py | import random
from contextlib import contextmanager
import six
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import (
rcParams,
colors
)
from mpl_toolkits.mplot3d import Axes3D
__all__ = [
"zoom_plot",
"plot",
"plot_predictions_3d",
'Palette',
'plot_clusters',
]
def sorted_color_maps():
'''List of color name and their hex values sorted by HSV.
This code is taken from:
http://matplotlib.org/examples/color/named_colors.html
'''
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
# Transform to hex color values.
hex_ = [color[1] for color in colors_]
# Get the rgb equivalent.
rgb = [colors.hex2color(color) for color in hex_]
# Get the hsv equivalent.
hsv = [colors.rgb_to_hsv(color) for color in rgb]
# Split the hsv values to sort.
hue = [color[0] for color in hsv]
sat = [color[1] for color in hsv]
val = [color[2] for color in hsv]
# Sort by hue, saturation and value.
ind = np.lexsort((val, sat, hue))
sorted_colors = [colors_[i] for i in ind]
sorted_colors = [
c_1
for (c_1, c_2) in zip(sorted_colors[:-1], sorted_colors[1:])
if c_1[1] != c_2[1]]
return sorted_colors
class Palette(object):
SORTED_COLORS = sorted_color_maps()
GROUPS = (
#(color_name in SORTED_COLORS, group_name)
('k', 'GRAY'),
('whitesmoke', 'WHITE'),
('rosybrown', 'BROWN'),
('firebrick', 'RED'),
('sienna', 'SIENNA'),
('antiquewhite', 'WHITE'),
('orange', 'ORANGE'),
('y', 'GREEN'),
('mediumaquamarine', 'BLUE'),
('mediumpurple', 'PURPLE')
)
def __init__(self):
self.make_palette()
def make_palette(self):
group_names = dict(self.GROUPS)
[setattr(self, grp, []) for (cname, grp) in self.GROUPS]
current_group = None
for (cname, ccode) in self.SORTED_COLORS:
group_name = group_names.get(cname)
if not (group_name is None):
current_group = getattr(self, group_name, current_group)
if current_group is None:
continue
current_group.append(cname)
Palette = Palette()
@contextmanager
def zoom_plot(w, h):
'''Temprarily change the plot size.
'''
shape = rcParams['figure.figsize']
rcParams['figure.figsize'] = w, h
yield
rcParams['figure.figsize'] = shape
@contextmanager
def d3():
import mpld3
mpld3.enable_notebook()
yield
mpld3.disable_notebook()
def plot(X, Y, label=None, style='r-', grid=True, title=None, loc=None,
label_xy=('x', 'y'), show=True):
if label:
plt.plot(X, Y, style, label=label)
else:
plt.plot(X, Y, style)
plt.xlabel(label_xy[0])
plt.ylabel(label_xy[1])
if title: plt.title(title)
if loc: plt.legend(loc=loc)
plt.grid(grid)
if show: plt.show()
def subplots(h, v=1, order='v', sharex=True, sharey=True,
plots=()):
assert (order in ('v', 'vertical', 'h', 'horizontal')), (
'order must be either vertical or horizontal')
f, axes = plt.subplots(h, v, sharex=sharex, sharey=sharey)
def _axes():
I, J = (h, v) if order == 'v' else (v, h)
for i in range(I):
for j in range(J):
axs = axes[i][j] if order == 'v' else axes[j][i]
yield axs
for (axs, (plotter, args, kwargs)) in zip(_axes(), plots):
plt.axes(axs) # set axs as current active axes
kwargs['show'] = False
plotter(*args, **kwargs)
f.tight_layout(pad=1.3)
plt.show()
def plot_predictions_3d(X, Y, predictions, labels,
mirror=False,
title=""):
'''
Plot the [predictions] against the output [Y] projected by
two X.
'''
assert len(labels) == 2, "we are only plotting a 3D projection with 2 features"
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# plot the reality
f1, f2 = labels
x1, x2 = X[:, 0], X[:, 1]
if mirror:
f1, f2 = f2, f1
x1, x2 = x2, x1
ax.scatter(x1, x2, Y, c='r', marker='o', label='actual univ GPA')
# plot the predition
ax.scatter(x1, x2, predictions, c='g', label='predicted univ GPA')
ax.set_xlabel(f1)
ax.set_ylabel(f2)
ax.set_zlabel('prediction VS. example')
plt.title(title)
plt.legend()
plt.show()
def plot_clusters(x, y, k, palette=Palette.GREEN):
colors = random.sample(palette, k)
for i in range(k):
x_i = x[np.nonzero(y==i)]
plt.scatter(
x_i[:, 0], x_i[:, 1],
marker='o', facecolors='none', edgecolors=colors[i])
plt.grid(True)
plt.show() | 0.781372 | 0.541894 |
import time
import curses
import sys
from math import sqrt
try:
from rpi.burnin.ADCPi import ADCPi
except Exception:
sys.path.insert(0, "..")
from rpi.burnin.ADCPi import ADCPi
def main():
stdscr = curses.initscr()
"""
Main program function
"""
start_time = time.time()
try:
adc1 = ADCPi(0x6E, 0x6F, 12)
except Exception:
print("Failed to open i2c to ADC1!")
return
try:
adc2 = ADCPi(0x6C, 0x6D, 12)
except Exception:
print("Failed to open i2c to ADC2!")
return
try:
adc3 = ADCPi(0x6A, 0x6B, 12)
except Exception:
print("Failed to open i2c to ADC3!")
return
try:
adc4 = ADCPi(0x68, 0x69, 12)
except Exception:
print("Failed to open i2c to ADC4!")
return
the_adcs = [adc1, adc2, adc3, adc4]
try:
for adcnum in range(0, 4, 1):
the_adcs[adcnum].arm_channel(1)
ch_assignments = []
for nums in range(0, 8, 1):
ch_assignments.append("i_SENSE_MON" + str(nums + 1))
for nums in range(0, 8, 1):
ch_assignments.append("V_SENSE_MON" + str(nums + 1))
for nums in range(0, 8, 1):
ch_assignments.append("V_REGUL_OUT" + str(nums + 1))
ch_assignments.append("Vin_FPGA_3V3")
ch_assignments.append("Vin_FPGA_1V5")
ch_assignments.append("V_OPAMP_RAIL")
ch_assignments.append("PLAT_THERM_A")
ch_assignments.append("PLAT_THERM_B")
ch_assignments.append("BLANK")
ch_assignments.append("BLANK")
ch_assignments.append("BLANK")
# python dictionary (Channel: [fancy name, fancy reading, bare reading])
collect = {
"ADC Channel 1": ["blank", "blank", 0],
"ADC Channel 2": ["blank", "blank", 0],
"ADC Channel 3": ["blank", "blank", 0],
"ADC Channel 4": ["blank", "blank", 0],
"ADC Channel 5": ["blank", "blank", 0],
"ADC Channel 6": ["blank", "blank", 0],
"ADC Channel 7": ["blank", "blank", 0],
"ADC Channel 8": ["blank", "blank", 0],
"ADC Channel 9": ["blank", "blank", 0],
"ADC Channel 10": ["blank", "blank", 0],
"ADC Channel 11": ["blank", "blank", 0],
"ADC Channel 12": ["blank", "blank", 0],
"ADC Channel 13": ["blank", "blank", 0],
"ADC Channel 14": ["blank", "blank", 0],
"ADC Channel 15": ["blank", "blank", 0],
"ADC Channel 16": ["blank", "blank", 0],
"ADC Channel 17": ["blank", "blank", 0],
"ADC Channel 18": ["blank", "blank", 0],
"ADC Channel 19": ["blank", "blank", 0],
"ADC Channel 20": ["blank", "blank", 0],
"ADC Channel 21": ["blank", "blank", 0],
"ADC Channel 22": ["blank", "blank", 0],
"ADC Channel 23": ["blank", "blank", 0],
"ADC Channel 24": ["blank", "blank", 0],
"ADC Channel 25": ["blank", "blank", 0],
"ADC Channel 26": ["blank", "blank", 0],
"ADC Channel 27": ["blank", "blank", 0],
"ADC Channel 28": ["blank", "blank", 0],
"ADC Channel 29": ["blank", "blank", 0],
"ADC Channel 30": ["blank", "blank", 0],
"ADC Channel 31": ["blank", "blank", 0],
"ADC Channel 32": ["blank", "blank", 0],
}
for i in range(1, 33, 1):
collect["ADC Channel " + str(i)][0] = ch_assignments[i - 1]
while True:
this_time = time.time()
# read from adc channels and print to screen
# collects data from each ADC read and stores in dictionary
arm_threads = [None, None, None, None]
for chNum in range(1, 9, 1):
for nADC in range(0, 4, 1):
test_time = time.time()
split_1 = time.time()
# get that voltage
if (
"BLANK"
in collect["ADC Channel " + str(8 * nADC + chNum)][0]
):
reading = 0
else:
reading = the_adcs[nADC].read_curr_voltage()
nextCh = (chNum % 8) + 1
split_2 = time.time()
the_adcs[nADC].arm_channel(nextCh)
end_test = time.time()
if "i_SENSE" in ch_assignments[8 * nADC + chNum - 1]:
V_ref = collect[
"ADC Channel "
+ str(ch_assignments.index("Vin_FPGA_1V5") + 1)
][2]
i_val = (reading * (17310 / 16800) - V_ref) / 0.16667
collect["ADC Channel " + str(8 * nADC + chNum)][1] = (
str.format("{0:0.2f}", i_val) + "A "
)
elif "PLAT_THERM" in ch_assignments[8 * nADC + chNum - 1]:
Vin = collect[
"ADC Channel "
+ str(ch_assignments.index("Vin_FPGA_3V3") + 1)
][2]
RT = 0
if Vin - reading != 0:
RT = reading * 1000 / (Vin - reading)
if RT > 0:
RT = 1 / (
1 / RT - 1 / 16800
) # the voltage divider (10k:6.8k) on the ADC is another path to ground and changes R2-- fix it
R0 = 1000.0
c = R0 - RT
b = 3.9083e-3 * R0
a = -5.775e-7 * R0
disc = b * b - 4 * a * c
if disc < 0:
disc = 0
Temp = (-b + sqrt(disc)) / (2 * a)
else:
temp = -98
else:
Temp = -99
collect["ADC Channel " + str(8 * nADC + chNum)][1] = (
str.format("{0:0.1f}", Temp) + "C "
)
else:
collect["ADC Channel " + str(8 * nADC + chNum)][1] = (
str.format("{0:0.3f}", reading) + "V "
)
collect["ADC Channel " + str(8 * nADC + chNum)][2] = reading
# print (collect)
# CTRL + C to end program
# wait 0.2 seconds before reading the pins again
counter = 0
stdscr.addstr(
0,
0,
time.asctime()
+ " ("
+ str.format("{0:0.1f}", 1000 * (time.time() - this_time))
+ ")",
)
offset = 1
for i in collect:
if (counter % 9) == 0:
counter += 2
else:
counter += 1
if "BLANK" in collect[i][0]:
continue
stdscr.addstr(
counter + offset,
0,
i + "\t" + collect[i][0] + "\t" + collect[i][1],
)
# stdscr.addstr(counter,0,i+"\t"+collect[i][0]+"\t"+collect[i][1]+"\t\t\t"+str(collect[i][2]))
stdscr.addstr(0, 0, "TEST")
stdscr.refresh()
except KeyboardInterrupt:
pass
except Exception:
print("exception ", sys.exc_info())
pass
if __name__ == "__main__":
curses.wrapper(main()) | bin/LvrMon.py |
import time
import curses
import sys
from math import sqrt
try:
from rpi.burnin.ADCPi import ADCPi
except Exception:
sys.path.insert(0, "..")
from rpi.burnin.ADCPi import ADCPi
def main():
stdscr = curses.initscr()
"""
Main program function
"""
start_time = time.time()
try:
adc1 = ADCPi(0x6E, 0x6F, 12)
except Exception:
print("Failed to open i2c to ADC1!")
return
try:
adc2 = ADCPi(0x6C, 0x6D, 12)
except Exception:
print("Failed to open i2c to ADC2!")
return
try:
adc3 = ADCPi(0x6A, 0x6B, 12)
except Exception:
print("Failed to open i2c to ADC3!")
return
try:
adc4 = ADCPi(0x68, 0x69, 12)
except Exception:
print("Failed to open i2c to ADC4!")
return
the_adcs = [adc1, adc2, adc3, adc4]
try:
for adcnum in range(0, 4, 1):
the_adcs[adcnum].arm_channel(1)
ch_assignments = []
for nums in range(0, 8, 1):
ch_assignments.append("i_SENSE_MON" + str(nums + 1))
for nums in range(0, 8, 1):
ch_assignments.append("V_SENSE_MON" + str(nums + 1))
for nums in range(0, 8, 1):
ch_assignments.append("V_REGUL_OUT" + str(nums + 1))
ch_assignments.append("Vin_FPGA_3V3")
ch_assignments.append("Vin_FPGA_1V5")
ch_assignments.append("V_OPAMP_RAIL")
ch_assignments.append("PLAT_THERM_A")
ch_assignments.append("PLAT_THERM_B")
ch_assignments.append("BLANK")
ch_assignments.append("BLANK")
ch_assignments.append("BLANK")
# python dictionary (Channel: [fancy name, fancy reading, bare reading])
collect = {
"ADC Channel 1": ["blank", "blank", 0],
"ADC Channel 2": ["blank", "blank", 0],
"ADC Channel 3": ["blank", "blank", 0],
"ADC Channel 4": ["blank", "blank", 0],
"ADC Channel 5": ["blank", "blank", 0],
"ADC Channel 6": ["blank", "blank", 0],
"ADC Channel 7": ["blank", "blank", 0],
"ADC Channel 8": ["blank", "blank", 0],
"ADC Channel 9": ["blank", "blank", 0],
"ADC Channel 10": ["blank", "blank", 0],
"ADC Channel 11": ["blank", "blank", 0],
"ADC Channel 12": ["blank", "blank", 0],
"ADC Channel 13": ["blank", "blank", 0],
"ADC Channel 14": ["blank", "blank", 0],
"ADC Channel 15": ["blank", "blank", 0],
"ADC Channel 16": ["blank", "blank", 0],
"ADC Channel 17": ["blank", "blank", 0],
"ADC Channel 18": ["blank", "blank", 0],
"ADC Channel 19": ["blank", "blank", 0],
"ADC Channel 20": ["blank", "blank", 0],
"ADC Channel 21": ["blank", "blank", 0],
"ADC Channel 22": ["blank", "blank", 0],
"ADC Channel 23": ["blank", "blank", 0],
"ADC Channel 24": ["blank", "blank", 0],
"ADC Channel 25": ["blank", "blank", 0],
"ADC Channel 26": ["blank", "blank", 0],
"ADC Channel 27": ["blank", "blank", 0],
"ADC Channel 28": ["blank", "blank", 0],
"ADC Channel 29": ["blank", "blank", 0],
"ADC Channel 30": ["blank", "blank", 0],
"ADC Channel 31": ["blank", "blank", 0],
"ADC Channel 32": ["blank", "blank", 0],
}
for i in range(1, 33, 1):
collect["ADC Channel " + str(i)][0] = ch_assignments[i - 1]
while True:
this_time = time.time()
# read from adc channels and print to screen
# collects data from each ADC read and stores in dictionary
arm_threads = [None, None, None, None]
for chNum in range(1, 9, 1):
for nADC in range(0, 4, 1):
test_time = time.time()
split_1 = time.time()
# get that voltage
if (
"BLANK"
in collect["ADC Channel " + str(8 * nADC + chNum)][0]
):
reading = 0
else:
reading = the_adcs[nADC].read_curr_voltage()
nextCh = (chNum % 8) + 1
split_2 = time.time()
the_adcs[nADC].arm_channel(nextCh)
end_test = time.time()
if "i_SENSE" in ch_assignments[8 * nADC + chNum - 1]:
V_ref = collect[
"ADC Channel "
+ str(ch_assignments.index("Vin_FPGA_1V5") + 1)
][2]
i_val = (reading * (17310 / 16800) - V_ref) / 0.16667
collect["ADC Channel " + str(8 * nADC + chNum)][1] = (
str.format("{0:0.2f}", i_val) + "A "
)
elif "PLAT_THERM" in ch_assignments[8 * nADC + chNum - 1]:
Vin = collect[
"ADC Channel "
+ str(ch_assignments.index("Vin_FPGA_3V3") + 1)
][2]
RT = 0
if Vin - reading != 0:
RT = reading * 1000 / (Vin - reading)
if RT > 0:
RT = 1 / (
1 / RT - 1 / 16800
) # the voltage divider (10k:6.8k) on the ADC is another path to ground and changes R2-- fix it
R0 = 1000.0
c = R0 - RT
b = 3.9083e-3 * R0
a = -5.775e-7 * R0
disc = b * b - 4 * a * c
if disc < 0:
disc = 0
Temp = (-b + sqrt(disc)) / (2 * a)
else:
temp = -98
else:
Temp = -99
collect["ADC Channel " + str(8 * nADC + chNum)][1] = (
str.format("{0:0.1f}", Temp) + "C "
)
else:
collect["ADC Channel " + str(8 * nADC + chNum)][1] = (
str.format("{0:0.3f}", reading) + "V "
)
collect["ADC Channel " + str(8 * nADC + chNum)][2] = reading
# print (collect)
# CTRL + C to end program
# wait 0.2 seconds before reading the pins again
counter = 0
stdscr.addstr(
0,
0,
time.asctime()
+ " ("
+ str.format("{0:0.1f}", 1000 * (time.time() - this_time))
+ ")",
)
offset = 1
for i in collect:
if (counter % 9) == 0:
counter += 2
else:
counter += 1
if "BLANK" in collect[i][0]:
continue
stdscr.addstr(
counter + offset,
0,
i + "\t" + collect[i][0] + "\t" + collect[i][1],
)
# stdscr.addstr(counter,0,i+"\t"+collect[i][0]+"\t"+collect[i][1]+"\t\t\t"+str(collect[i][2]))
stdscr.addstr(0, 0, "TEST")
stdscr.refresh()
except KeyboardInterrupt:
pass
except Exception:
print("exception ", sys.exc_info())
pass
if __name__ == "__main__":
curses.wrapper(main()) | 0.208662 | 0.333598 |
import subprocess
import pathlib
import os
import shutil
# When you move on versions you can change the values here
MAJOR_VERSION = 0
MINOR_VERSION = 1
# What the name of your app should be
APP_NAME = "simple_folder"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def get_build_version():
'''
If there is a BUILD_VERSION file in the directory it
reads in the number and increments it by one so that each
new build is a new number
returns (int) - version number of build
'''
version = 0
if pathlib.Path("BUILD_VERSION").exists():
with open("BUILD_VERSION", "r") as f:
version = int(f.read())
version += 1
with open("BUILD_VERSION", "w") as f:
f.write(str(version))
return version
def get_version(build_version):
'''
Formats version string
'''
return "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, str(build_version).zfill(3))
def build_command_list(version):
'''
This is the arg list to use with subprocess.
More details about the commands can be found at
https://www.pyinstaller.org/
'''
cmds = ["pyinstaller"]
cmds.append("simple_folder.py")
cmds.append("--icon=icon.ico")
cmds.append("--onefile")
cmds.append("--name={}_{}".format(APP_NAME, version))
return cmds
def build(version):
'''
Executes the build command
'''
for x in build_command_list(version):
print(x)
proc = subprocess.check_output(build_command_list(version))
def package(version):
'''
Copies files and neccesary folders from the project directory and build location
into a builds folder
'''
name = "{}_{}".format(APP_NAME, version)
exe_name = pathlib.Path(BASE_DIR, "dist", name+".exe")
custom_dist_folder = pathlib.Path(BASE_DIR, "builds", name.replace(".", "_"))
custom_dist_folder.mkdir(parents=True, exist_ok=True)
exe_src = str(exe_name)
exe_dst = str(pathlib.Path(custom_dist_folder, APP_NAME + ".exe"))
folder_structure = pathlib.Path(BASE_DIR, "folder_structures")
folder_structure_dst = pathlib.Path(custom_dist_folder, "folder_structures")
shutil.move(exe_src, exe_dst)
shutil.copytree(str(folder_structure), str(folder_structure_dst))
def cleanup():
'''
Removes artifacts from the build process
'''
# cleanup .spec
for f in pathlib.Path(BASE_DIR).glob("*.spec"):
f.unlink()
# cleanup dist folder
for f in pathlib.Path(BASE_DIR, "dist").rglob("*"):
if f.is_file():
f.unlink()
shutil.rmtree(pathlib.Path(BASE_DIR, "dist"))
# cleanup build folder
for f in pathlib.Path(BASE_DIR, "build").rglob("*"):
if f.is_file():
f.unlink()
shutil.rmtree(pathlib.Path(BASE_DIR, "build"))
if __name__ == '__main__':
build_version = get_build_version()
version = get_version(build_version)
build(version)
package(version)
cleanup() | installer_build.py | import subprocess
import pathlib
import os
import shutil
# When you move on versions you can change the values here
MAJOR_VERSION = 0
MINOR_VERSION = 1
# What the name of your app should be
APP_NAME = "simple_folder"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def get_build_version():
'''
If there is a BUILD_VERSION file in the directory it
reads in the number and increments it by one so that each
new build is a new number
returns (int) - version number of build
'''
version = 0
if pathlib.Path("BUILD_VERSION").exists():
with open("BUILD_VERSION", "r") as f:
version = int(f.read())
version += 1
with open("BUILD_VERSION", "w") as f:
f.write(str(version))
return version
def get_version(build_version):
'''
Formats version string
'''
return "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, str(build_version).zfill(3))
def build_command_list(version):
'''
This is the arg list to use with subprocess.
More details about the commands can be found at
https://www.pyinstaller.org/
'''
cmds = ["pyinstaller"]
cmds.append("simple_folder.py")
cmds.append("--icon=icon.ico")
cmds.append("--onefile")
cmds.append("--name={}_{}".format(APP_NAME, version))
return cmds
def build(version):
'''
Executes the build command
'''
for x in build_command_list(version):
print(x)
proc = subprocess.check_output(build_command_list(version))
def package(version):
'''
Copies files and neccesary folders from the project directory and build location
into a builds folder
'''
name = "{}_{}".format(APP_NAME, version)
exe_name = pathlib.Path(BASE_DIR, "dist", name+".exe")
custom_dist_folder = pathlib.Path(BASE_DIR, "builds", name.replace(".", "_"))
custom_dist_folder.mkdir(parents=True, exist_ok=True)
exe_src = str(exe_name)
exe_dst = str(pathlib.Path(custom_dist_folder, APP_NAME + ".exe"))
folder_structure = pathlib.Path(BASE_DIR, "folder_structures")
folder_structure_dst = pathlib.Path(custom_dist_folder, "folder_structures")
shutil.move(exe_src, exe_dst)
shutil.copytree(str(folder_structure), str(folder_structure_dst))
def cleanup():
'''
Removes artifacts from the build process
'''
# cleanup .spec
for f in pathlib.Path(BASE_DIR).glob("*.spec"):
f.unlink()
# cleanup dist folder
for f in pathlib.Path(BASE_DIR, "dist").rglob("*"):
if f.is_file():
f.unlink()
shutil.rmtree(pathlib.Path(BASE_DIR, "dist"))
# cleanup build folder
for f in pathlib.Path(BASE_DIR, "build").rglob("*"):
if f.is_file():
f.unlink()
shutil.rmtree(pathlib.Path(BASE_DIR, "build"))
if __name__ == '__main__':
build_version = get_build_version()
version = get_version(build_version)
build(version)
package(version)
cleanup() | 0.311008 | 0.111265 |
import copy
import itertools
from axelrod.action import Action, str_to_actions
from axelrod.player import Player
C, D = Action.C, Action.D
class AntiCycler(Player):
"""
A player that follows a sequence of plays that contains no cycles:
CDD CD CCD CCCD CCCCD ...
Names:
- Anti Cycler: Original name by <NAME>
"""
name = "AntiCycler"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.cycle_length = 1
self.cycle_counter = 0
self.first_three = self._get_first_three()
@staticmethod
def _get_first_three():
return [C, D, D]
def strategy(self, opponent: Player) -> Action:
while self.first_three:
return self.first_three.pop(0)
if self.cycle_counter < self.cycle_length:
self.cycle_counter += 1
return C
else:
self.cycle_length += 1
self.cycle_counter = 0
return D
class Cycler(Player):
"""
A player that repeats a given sequence indefinitely.
Names:
- Cycler: Original name by <NAME>
"""
name = "Cycler"
classifier = {
"memory_depth": 2,
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, cycle: str = "CCD") -> None:
"""This strategy will repeat the parameter `cycle` endlessly,
e.g. C C D C C D C C D ...
Special Cases
-------------
Cooperator is equivalent to Cycler("C")
Defector is equivalent to Cycler("D")
Alternator is equivalent to Cycler("CD")
"""
super().__init__()
self.cycle_str = cycle
self.cycle = self.get_new_itertools_cycle()
self.classifier["memory_depth"] = len(cycle) - 1
def get_new_itertools_cycle(self):
return itertools.cycle(str_to_actions(self.cycle_str))
def strategy(self, opponent: Player) -> Action:
return next(self.cycle)
class CyclerDC(Cycler):
"""
Cycles D, C
Names:
- Cycler DC: Original name by <NAME>
"""
name = "Cycler DC"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 1
def __init__(self) -> None:
super().__init__(cycle="DC")
class CyclerCCD(Cycler):
"""
Cycles C, C, D
Names:
- Cycler CCD: Original name by <NAME>
- Periodic player CCD: [Mittal2009]_
"""
name = "Cycler CCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 2
def __init__(self) -> None:
super().__init__(cycle="CCD")
class CyclerDDC(Cycler):
"""
Cycles D, D, C
Names:
- Cycler DDC: Original name by <NAME>
- Periodic player DDC: [Mittal2009]_
"""
name = "Cycler DDC"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 2
def __init__(self) -> None:
super().__init__(cycle="DDC")
class CyclerCCCD(Cycler):
"""
Cycles C, C, C, D
Names:
- Cycler CCCD: Original name by <NAME>
"""
name = "Cycler CCCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 3
def __init__(self) -> None:
super().__init__(cycle="CCCD")
class CyclerCCCCCD(Cycler):
"""
Cycles C, C, C, C, C, D
Names:
- Cycler CCCD: Original name by <NAME>
"""
name = "Cycler CCCCCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 5
def __init__(self) -> None:
super().__init__(cycle="CCCCCD")
class CyclerCCCDCD(Cycler):
"""
Cycles C, C, C, D, C, D
Names:
- Cycler CCCDCD: Original name by <NAME>
"""
name = "Cycler CCCDCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 5
def __init__(self) -> None:
super().__init__(cycle="CCCDCD") | axelrod/strategies/cycler.py | import copy
import itertools
from axelrod.action import Action, str_to_actions
from axelrod.player import Player
C, D = Action.C, Action.D
class AntiCycler(Player):
"""
A player that follows a sequence of plays that contains no cycles:
CDD CD CCD CCCD CCCCD ...
Names:
- Anti Cycler: Original name by <NAME>
"""
name = "AntiCycler"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.cycle_length = 1
self.cycle_counter = 0
self.first_three = self._get_first_three()
@staticmethod
def _get_first_three():
return [C, D, D]
def strategy(self, opponent: Player) -> Action:
while self.first_three:
return self.first_three.pop(0)
if self.cycle_counter < self.cycle_length:
self.cycle_counter += 1
return C
else:
self.cycle_length += 1
self.cycle_counter = 0
return D
class Cycler(Player):
"""
A player that repeats a given sequence indefinitely.
Names:
- Cycler: Original name by <NAME>
"""
name = "Cycler"
classifier = {
"memory_depth": 2,
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, cycle: str = "CCD") -> None:
"""This strategy will repeat the parameter `cycle` endlessly,
e.g. C C D C C D C C D ...
Special Cases
-------------
Cooperator is equivalent to Cycler("C")
Defector is equivalent to Cycler("D")
Alternator is equivalent to Cycler("CD")
"""
super().__init__()
self.cycle_str = cycle
self.cycle = self.get_new_itertools_cycle()
self.classifier["memory_depth"] = len(cycle) - 1
def get_new_itertools_cycle(self):
return itertools.cycle(str_to_actions(self.cycle_str))
def strategy(self, opponent: Player) -> Action:
return next(self.cycle)
class CyclerDC(Cycler):
"""
Cycles D, C
Names:
- Cycler DC: Original name by <NAME>
"""
name = "Cycler DC"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 1
def __init__(self) -> None:
super().__init__(cycle="DC")
class CyclerCCD(Cycler):
"""
Cycles C, C, D
Names:
- Cycler CCD: Original name by <NAME>
- Periodic player CCD: [Mittal2009]_
"""
name = "Cycler CCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 2
def __init__(self) -> None:
super().__init__(cycle="CCD")
class CyclerDDC(Cycler):
"""
Cycles D, D, C
Names:
- Cycler DDC: Original name by <NAME>
- Periodic player DDC: [Mittal2009]_
"""
name = "Cycler DDC"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 2
def __init__(self) -> None:
super().__init__(cycle="DDC")
class CyclerCCCD(Cycler):
"""
Cycles C, C, C, D
Names:
- Cycler CCCD: Original name by <NAME>
"""
name = "Cycler CCCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 3
def __init__(self) -> None:
super().__init__(cycle="CCCD")
class CyclerCCCCCD(Cycler):
"""
Cycles C, C, C, C, C, D
Names:
- Cycler CCCD: Original name by <NAME>
"""
name = "Cycler CCCCCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 5
def __init__(self) -> None:
super().__init__(cycle="CCCCCD")
class CyclerCCCDCD(Cycler):
"""
Cycles C, C, C, D, C, D
Names:
- Cycler CCCDCD: Original name by <NAME>
"""
name = "Cycler CCCDCD"
classifier = copy.copy(Cycler.classifier)
classifier["memory_depth"] = 5
def __init__(self) -> None:
super().__init__(cycle="CCCDCD") | 0.802323 | 0.307722 |
import psycopg2
from entityservice.cache import progress as progress_cache
from entityservice.cache.active_runs import set_run_state_active, is_run_missing
from entityservice.database import DBConn, check_project_exists, get_run, get_run_state_for_update
from entityservice.database import update_run_set_started
from entityservice.errors import RunDeleted, ProjectDeleted
from entityservice.tasks.base_task import TracedTask, run_failed_handler
from entityservice.tasks.comparing import create_comparison_jobs
from entityservice.async_worker import celery, logger
@celery.task(base=TracedTask, ignore_result=True, args_as_tags=('project_id', 'run_id'))
def prerun_check(project_id, run_id, parent_span=None):
log = logger.bind(pid=project_id, run_id=run_id)
log.debug("Sanity check that we need to compute run")
# being very defensive here checking if the run state is already in the redis cache
if not is_run_missing(run_id):
log.warning("unexpectedly the run state is present in redis before starting")
return
with DBConn() as conn:
if not check_project_exists(conn, project_id):
log.debug("Project not found. Skipping")
raise ProjectDeleted(project_id)
res = get_run(conn, run_id)
if res is None:
log.debug(f"Run not found. Skipping")
raise RunDeleted(run_id)
try:
db_state = get_run_state_for_update(conn, run_id)
except psycopg2.OperationalError:
log.warning("Run started in another task. Skipping this race.")
return
if db_state in {'running', 'completed', 'error'}:
log.warning("Run already started. Skipping")
return
log.debug("Setting run state in db as 'running'")
update_run_set_started(conn, run_id)
log.debug("Updating redis cache for run")
set_run_state_active(run_id)
create_comparison_jobs.apply_async(
kwargs={'project_id': project_id, 'run_id': run_id, 'parent_span': prerun_check.get_serialized_span()},
link_error=run_failed_handler.s()
)
log.info("CLK similarity computation scheduled") | anonlink-entity-service/backend/entityservice/tasks/run.py | import psycopg2
from entityservice.cache import progress as progress_cache
from entityservice.cache.active_runs import set_run_state_active, is_run_missing
from entityservice.database import DBConn, check_project_exists, get_run, get_run_state_for_update
from entityservice.database import update_run_set_started
from entityservice.errors import RunDeleted, ProjectDeleted
from entityservice.tasks.base_task import TracedTask, run_failed_handler
from entityservice.tasks.comparing import create_comparison_jobs
from entityservice.async_worker import celery, logger
@celery.task(base=TracedTask, ignore_result=True, args_as_tags=('project_id', 'run_id'))
def prerun_check(project_id, run_id, parent_span=None):
log = logger.bind(pid=project_id, run_id=run_id)
log.debug("Sanity check that we need to compute run")
# being very defensive here checking if the run state is already in the redis cache
if not is_run_missing(run_id):
log.warning("unexpectedly the run state is present in redis before starting")
return
with DBConn() as conn:
if not check_project_exists(conn, project_id):
log.debug("Project not found. Skipping")
raise ProjectDeleted(project_id)
res = get_run(conn, run_id)
if res is None:
log.debug(f"Run not found. Skipping")
raise RunDeleted(run_id)
try:
db_state = get_run_state_for_update(conn, run_id)
except psycopg2.OperationalError:
log.warning("Run started in another task. Skipping this race.")
return
if db_state in {'running', 'completed', 'error'}:
log.warning("Run already started. Skipping")
return
log.debug("Setting run state in db as 'running'")
update_run_set_started(conn, run_id)
log.debug("Updating redis cache for run")
set_run_state_active(run_id)
create_comparison_jobs.apply_async(
kwargs={'project_id': project_id, 'run_id': run_id, 'parent_span': prerun_check.get_serialized_span()},
link_error=run_failed_handler.s()
)
log.info("CLK similarity computation scheduled") | 0.316053 | 0.067886 |
import json
import os
import glob
import pickle
import re
import time
import wget
import tarfile
import numpy as np
import tensorflow as tf
import matplotlib.image as mpimg
from skimage.transform import resize
from sklearn.decomposition import PCA
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.utils import Sequence, to_categorical
from lib.plot_curves import learning_curves
from scripts.run_BUvsTD import setup
from models.models_imagenette import select_model
from lib.callbacks import ConfusionMatrixCB, scheduler_3_stage
'''
Script for training on Imagenette dataset.
'''
'''
Commandline inputs:
-d IMAGENETTE:
-m ResNet18 -l 0.1 -w 1e-3 -e 50 -r 1 -b 128 -s scheduler_3_stage -p True
-m ResNet18_TD -l 0.05 -w 1e-3 -e 50 -r 1 -b 64 -s scheduler_3_stage -p True
'''
'''
For the PCA augmentation code based on: https://github.com/koshian2/PCAColorAugmentation
MIT License
Copyright (c) 2018 こしあん
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def random_crop(img, random_crop_size):
# Note: image_data_format is 'channel_last'
assert img.shape[2] == 3
height, width = img.shape[0], img.shape[1]
dy, dx = random_crop_size
x = np.random.randint(0, width - dx + 1)
y = np.random.randint(0, height - dy + 1)
return img[y:(y+dy), x:(x+dx), :]
class ImagenetteGenerator_inmem(Sequence):
def __init__(self, X, y, batch_size, shuffle=True, crop_size=128, val=False):
self.X = X
self.y = y
self.batch_size = batch_size
self.shuffle = shuffle
self.crop_size = crop_size
self.val = val
if not self.val:
self.statistics = self.extract_statistics(self.X)
self.augmenter = ImageDataGenerator(horizontal_flip=True)
self.indexes = np.arange(len(self.X), dtype=int)
self.on_epoch_end()
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.indexes)
def extract_statistics(self, x):
statistics = {}
in_shape = x.shape
x = x.reshape(-1, in_shape[1] * in_shape[2], in_shape[-1])
statistics['mean'] = np.mean(x, axis=1, keepdims=True)
statistics['std'] = np.std(x, axis=1, keepdims=True)
x = (x - statistics['mean']) / statistics['std']
cov_n = max(x.shape[1] - 1, 1)
cov = np.matmul(np.swapaxes(x, -1, -2), x) / cov_n
statistics['U'], statistics['S'], statistics['V'] = np.linalg.svd(cov)
return statistics
def pca_aug(self, x, index):
in_shape = x.shape
res_shape = (in_shape[0], in_shape[1]*in_shape[2], in_shape[3])
alphas = np.random.randn(*self.statistics['S'][index].shape) * 0.1
delta = np.squeeze(np.matmul(self.statistics['U'][index], np.expand_dims(alphas * self.statistics['S'][index], axis=-1)))
delta = np.expand_dims(delta, axis=1)
delta = delta * self.statistics['std'][index]
delta = np.broadcast_to(delta, res_shape)
delta = delta.reshape(-1, *in_shape[1:])
x_aug = x + delta
return x_aug
def __len__(self):
return int(np.ceil(len(self.X) / self.batch_size))
def __getitem__(self, item):
index = self.indexes[item * self.batch_size:(item + 1) * self.batch_size]
x = self.X[index]
y = self.y[index]
if not self.val:
x = self.pca_aug(x, index)
x = self.augmenter.flow(x, batch_size=len(x), shuffle=False).next()
xc = []
for img in x:
xc.append(random_crop(img, (self.crop_size, self.crop_size)))
x = np.array(xc, dtype=np.float32)
return x, to_categorical(y, 10)
class ImagenetteGenerator(Sequence):
def __init__(self, root_dir, dset_dir, image_format, batch_size, new_shape=128,
res_shape=156, channels=3, num_classes=10, shuffle=True, statistics=None):
self.root_dir = root_dir
if not os.path.exists(self.root_dir):
self.download_files()
self.dset_dir = dset_dir
self.image_format = image_format
self.batch_size = batch_size
self.res_shape = res_shape
self.new_shape = new_shape
self.channels = channels
self.num_classes = num_classes
self.shuffle = shuffle
self.augmenter = ImageDataGenerator(horizontal_flip=True)
self.image_filenames = []
self.class_mapping = {}
self.labels = []
self.get_image_filenames()
if statistics is None:
X = self.retrieve_set()
self.statistics = self.extract_statistics(X)
else:
self.statistics = statistics
self.on_epoch_end()
def download_files(self):
if 'woof' in self.root_dir:
dataset = 'imagewoof2'
print('Downloading Imagewoof')
wget.download('https://s3.amazonaws.com/fast-ai-imageclas/imagewoof2.tgz', re.sub(dataset + '/', '', self.root_dir))
else:
dataset = 'imagenette2'
print('Downloading Imagenette2')
wget.download('https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz', re.sub(dataset + '/', '', self.root_dir))
print('Downloading complete')
print('Extracting files')
tar = tarfile.open(self.root_dir[:-1] + '.tgz', "r:gz")
tar.extractall(path=re.sub(dataset + '/', '', self.root_dir))
tar.close()
print('Extracting complete')
wget.download('https://raw.githubusercontent.com/ozendelait/wordnet-to-json/master/mapping_imagenet.json',
self.root_dir)
def load_json(self, filepath):
with open(filepath, 'r') as f:
return json.load(f)
def load_img(self, filename):
img = mpimg.imread(filename)
if len(img.shape) < 3:
img = np.tile(img[..., np.newaxis], [1, 1, self.channels])
return img
def retrieve_set(self):
X, y = [], []
for filename, label in zip(self.image_filenames, self.labels):
img = mpimg.imread(filename)
img = img.astype(np.float32) / 255.0
if len(img.shape) < 3:
img = np.tile(img[..., np.newaxis], [1, 1, self.channels])
img = resize(img, [self.res_shape, self.res_shape, self.channels], anti_aliasing=True, mode='reflect')
X.append(img)
y.append(label)
X = np.array(X, dtype=np.float32)
y = np.array(y, dtype='uint8')
np.savez(self.dset_dir + 'data.npz', X, y, self.class_mapping)
return X, y
def extract_statistics(self, X):
statistics = {}
statistics['max'] = np.max(X)
if statistics['max'] > 1:
X /= statistics['max']
statistics['mean'] = np.mean(X, axis=0)
statistics['std'] = np.std(X, axis=0, ddof=1)
pca = PCA(n_components=3)
pca.fit(np.reshape(X - statistics['mean'], [len(X), np.prod(X.shape[1:])]))
statistics['eig_vec'] = np.transpose(np.reshape(pca.components_, [3, X.shape[1], X.shape[1], 3]),
axes=(1, 2, 3, 0))
statistics['eig_val'] = pca.explained_variance_
np.save(self.root_dir + 'statistics.npy', statistics)
return statistics
def get_image_filenames(self):
files = np.array(os.listdir(self.dset_dir))
sorted_ind = np.argsort([int(file[1:]) for file in files])
files = files[sorted_ind]
if not self.class_mapping:
mapping = self.load_json(self.root_dir + 'mapping_imagenet.json')
c = 0
for file in files:
for _, j in enumerate(mapping):
if j['v3p0'] == file:
self.class_mapping[c] = j['label'].split(',')[0]
c += 1
if c == len(files):
break
c = 0
for file in files:
file = file.strip()
image_paths = glob.glob(os.path.join(self.dset_dir, file, "*." + self.image_format))
if image_paths:
self.image_filenames.extend(image_paths)
self.labels.extend(c * np.ones(len(image_paths), dtype='uint8'))
c += 1
self.image_filenames = np.array(self.image_filenames)
self.labels = np.array(self.labels)
def __len__(self):
return int(np.ceil(len(self.labels)/self.batch_size))
def __getitem__(self, index):
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_im_filenames = [self.image_filenames[k] for k in indexes]
X = []
for filename in list_im_filenames:
img = mpimg.imread(filename)
if len(img.shape) < 3:
img = np.tile(img.astype(np.float32)[..., np.newaxis], [1, 1, self.channels])
img = resize(img, [self.res_shape, self.res_shape, self.channels], anti_aliasing=True, mode='reflect')
if np.max(img) > 1:
img /= self.statistics['max']
if 'val' not in self.dset_dir:
img += np.matmul(self.statistics['eig_vec'],
np.random.normal(scale=0.1, size=3)*self.statistics['eig_val'])
if np.min(img) < 0:
img -= np.min(img)
img = np.clip(img, 0, 1)
img = random_crop(img, (self.new_shape, self.new_shape))
X.append(img)
X = np.array(X, dtype='float32')
if 'val' not in self.dset_dir:
X = self.augmenter.flow(X, batch_size=len(X), shuffle=False).next()
y = np.array([self.labels[k] for k in indexes], dtype='uint8')
return X, to_categorical(y, self.num_classes)
def on_epoch_end(self):
self.indexes = np.arange(len(self.labels))
if self.shuffle:
np.random.shuffle(self.indexes)
def train(args, filepath, f_output, model_n, method=None):
out_path = './../../data/'
if not os.path.exists(out_path):
print(f"Generating folder {out_path}")
os.makedirs(out_path)
root_dir = out_path + 'imagenette2/'
params = {'batch_size': args.batch_size, 'image_format': 'JPEG', 'new_shape': 128}
print(model_n)
base_model_name = args.model_name
if args.extension is not None:
base_model_name = re.sub('_' + args.extension, '', base_model_name)
# Extracting statistics for every model-set combination and history for learning curves
history = []
test_acc = np.zeros(args.repetitions)
test_loss = np.zeros_like(test_acc)
training_time = []
callbacks = []
agg_cm = []
if os.path.exists(root_dir + "train/data.npz"):
npzfile = np.load(root_dir + "train/data.npz", allow_pickle=True)
x_train = npzfile['arr_0']
y_train = npzfile['arr_1']
# class_mapping = npzfile['arr_2']
else:
training_generator = ImagenetteGenerator(root_dir=root_dir,
dset_dir=root_dir + 'train/',
statistics=[],
**params)
x_train, y_train = training_generator.retrieve_set()
# class_mapping = training_generator.class_mapping
if os.path.exists(root_dir + "val/data.npz"):
npzfile = np.load(root_dir + "val/data.npz", allow_pickle=True)
x_val = npzfile['arr_0']
y_val = npzfile['arr_1']
else:
validation_generator = ImagenetteGenerator(root_dir=root_dir,
dset_dir=root_dir + 'val/',
statistics=[],
res_shape=128,
**params)
x_val, y_val = validation_generator.retrieve_set()
if args.pixel_mean:
x_train -= np.mean(x_train, axis=0)
x_val -= np.mean(x_val, axis=0)
training_generator = ImagenetteGenerator_inmem(x_train, y_train, batch_size=args.batch_size)
validation_generator = ImagenetteGenerator_inmem(x_val, y_val, batch_size=args.batch_size, val=True)
for i in range(args.repetitions):
sched = globals()[args.scheduler]
if 'stage' in args.scheduler:
print(args.scheduler)
cb_decayLR = tf.keras.callbacks.LearningRateScheduler(sched(args.learning_rate, args.num_epochs),
verbose=0)
else:
cb_decayLR = tf.keras.callbacks.LearningRateScheduler(sched, verbose=0)
if not callbacks:
callbacks.append(cb_decayLR)
else:
callbacks[0] = cb_decayLR
confusion_m_cb = ConfusionMatrixCB(validation_generator)
callbacks.append(confusion_m_cb)
# Resetting the model for the next iteration
input_shape = [params['new_shape'], params['new_shape'], 3]
print('Loading model: ', base_model_name)
optimizer = tf.keras.optimizers.SGD(args.learning_rate, momentum=0.9, nesterov=True)
if method is not None:
model = select_model(input_shape, base_model_name, optimizer, args.weight_decay, method, gpus=args.gpus)
else:
model = select_model(input_shape, base_model_name, optimizer, args.weight_decay, gpus=args.gpus)
start_train = time.time()
hist = model.fit_generator(generator=training_generator,
validation_data=validation_generator,
epochs=args.num_epochs,
verbose=2, callbacks=callbacks)
training_time.append(time.time() - start_train)
test_loss[i], test_acc[i] = model.evaluate_generator(validation_generator, verbose=0)
history.append(hist.history)
agg_cm.append(confusion_m_cb.get_cm())
callbacks = callbacks[:-1]
if i == args.repetitions - 1:
model.save(filepath['models'] + filepath['dataset'] + model_n + '.h5')
# Store history
with open(filepath['history'] + filepath['dataset'] + 'history_' + model_n + '.txt', 'wb') as f_history:
pickle.dump(history, f_history)
mean_agg_cm = np.mean(agg_cm, axis=0)
std_agg_cm = np.std(agg_cm, axis=0, ddof=1)
mean_agg_cm = np.round(mean_agg_cm / np.sum(mean_agg_cm, axis=1), 3)
mean_test_loss = np.mean(test_loss)
std_test_loss = np.std(test_loss, ddof=1)
mean_test_acc = np.mean(test_acc)
std_test_acc = np.std(test_acc, ddof=1)
# Writing statistics to file
print("****************************************", file=f_output)
print("Model: ", model_n, file=f_output)
print(f"Mean test loss: {mean_test_loss} +- {std_test_loss} ", file=f_output)
print(f"Mean test accuracy: {mean_test_acc} +- {std_test_acc}\n", file=f_output)
print("Aggregated confusion matrix: mean +- std", file=f_output)
print(f"{mean_agg_cm}\n", file=f_output)
print(f"{std_agg_cm}\n", file=f_output)
print(f"Mean training time: {np.mean(training_time)} +- {np.std(training_time, ddof=1)}", file=f_output)
print("****************************************\n\n\n", file=f_output)
learning_curves(history, model_n=model_n, filepath=filepath['graphs'] + filepath['dataset'])
def main():
args, filepath, f_output, orig_size = setup()
train(args, filepath, f_output, model_n=args.model_name)
if __name__ == '__main__':
main() | src/scripts/run_imagenette.py | import json
import os
import glob
import pickle
import re
import time
import wget
import tarfile
import numpy as np
import tensorflow as tf
import matplotlib.image as mpimg
from skimage.transform import resize
from sklearn.decomposition import PCA
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.utils import Sequence, to_categorical
from lib.plot_curves import learning_curves
from scripts.run_BUvsTD import setup
from models.models_imagenette import select_model
from lib.callbacks import ConfusionMatrixCB, scheduler_3_stage
'''
Script for training on Imagenette dataset.
'''
'''
Commandline inputs:
-d IMAGENETTE:
-m ResNet18 -l 0.1 -w 1e-3 -e 50 -r 1 -b 128 -s scheduler_3_stage -p True
-m ResNet18_TD -l 0.05 -w 1e-3 -e 50 -r 1 -b 64 -s scheduler_3_stage -p True
'''
'''
For the PCA augmentation code based on: https://github.com/koshian2/PCAColorAugmentation
MIT License
Copyright (c) 2018 こしあん
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def random_crop(img, random_crop_size):
# Note: image_data_format is 'channel_last'
assert img.shape[2] == 3
height, width = img.shape[0], img.shape[1]
dy, dx = random_crop_size
x = np.random.randint(0, width - dx + 1)
y = np.random.randint(0, height - dy + 1)
return img[y:(y+dy), x:(x+dx), :]
class ImagenetteGenerator_inmem(Sequence):
def __init__(self, X, y, batch_size, shuffle=True, crop_size=128, val=False):
self.X = X
self.y = y
self.batch_size = batch_size
self.shuffle = shuffle
self.crop_size = crop_size
self.val = val
if not self.val:
self.statistics = self.extract_statistics(self.X)
self.augmenter = ImageDataGenerator(horizontal_flip=True)
self.indexes = np.arange(len(self.X), dtype=int)
self.on_epoch_end()
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.indexes)
def extract_statistics(self, x):
statistics = {}
in_shape = x.shape
x = x.reshape(-1, in_shape[1] * in_shape[2], in_shape[-1])
statistics['mean'] = np.mean(x, axis=1, keepdims=True)
statistics['std'] = np.std(x, axis=1, keepdims=True)
x = (x - statistics['mean']) / statistics['std']
cov_n = max(x.shape[1] - 1, 1)
cov = np.matmul(np.swapaxes(x, -1, -2), x) / cov_n
statistics['U'], statistics['S'], statistics['V'] = np.linalg.svd(cov)
return statistics
def pca_aug(self, x, index):
in_shape = x.shape
res_shape = (in_shape[0], in_shape[1]*in_shape[2], in_shape[3])
alphas = np.random.randn(*self.statistics['S'][index].shape) * 0.1
delta = np.squeeze(np.matmul(self.statistics['U'][index], np.expand_dims(alphas * self.statistics['S'][index], axis=-1)))
delta = np.expand_dims(delta, axis=1)
delta = delta * self.statistics['std'][index]
delta = np.broadcast_to(delta, res_shape)
delta = delta.reshape(-1, *in_shape[1:])
x_aug = x + delta
return x_aug
def __len__(self):
return int(np.ceil(len(self.X) / self.batch_size))
def __getitem__(self, item):
index = self.indexes[item * self.batch_size:(item + 1) * self.batch_size]
x = self.X[index]
y = self.y[index]
if not self.val:
x = self.pca_aug(x, index)
x = self.augmenter.flow(x, batch_size=len(x), shuffle=False).next()
xc = []
for img in x:
xc.append(random_crop(img, (self.crop_size, self.crop_size)))
x = np.array(xc, dtype=np.float32)
return x, to_categorical(y, 10)
class ImagenetteGenerator(Sequence):
def __init__(self, root_dir, dset_dir, image_format, batch_size, new_shape=128,
res_shape=156, channels=3, num_classes=10, shuffle=True, statistics=None):
self.root_dir = root_dir
if not os.path.exists(self.root_dir):
self.download_files()
self.dset_dir = dset_dir
self.image_format = image_format
self.batch_size = batch_size
self.res_shape = res_shape
self.new_shape = new_shape
self.channels = channels
self.num_classes = num_classes
self.shuffle = shuffle
self.augmenter = ImageDataGenerator(horizontal_flip=True)
self.image_filenames = []
self.class_mapping = {}
self.labels = []
self.get_image_filenames()
if statistics is None:
X = self.retrieve_set()
self.statistics = self.extract_statistics(X)
else:
self.statistics = statistics
self.on_epoch_end()
def download_files(self):
if 'woof' in self.root_dir:
dataset = 'imagewoof2'
print('Downloading Imagewoof')
wget.download('https://s3.amazonaws.com/fast-ai-imageclas/imagewoof2.tgz', re.sub(dataset + '/', '', self.root_dir))
else:
dataset = 'imagenette2'
print('Downloading Imagenette2')
wget.download('https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz', re.sub(dataset + '/', '', self.root_dir))
print('Downloading complete')
print('Extracting files')
tar = tarfile.open(self.root_dir[:-1] + '.tgz', "r:gz")
tar.extractall(path=re.sub(dataset + '/', '', self.root_dir))
tar.close()
print('Extracting complete')
wget.download('https://raw.githubusercontent.com/ozendelait/wordnet-to-json/master/mapping_imagenet.json',
self.root_dir)
def load_json(self, filepath):
with open(filepath, 'r') as f:
return json.load(f)
def load_img(self, filename):
img = mpimg.imread(filename)
if len(img.shape) < 3:
img = np.tile(img[..., np.newaxis], [1, 1, self.channels])
return img
def retrieve_set(self):
X, y = [], []
for filename, label in zip(self.image_filenames, self.labels):
img = mpimg.imread(filename)
img = img.astype(np.float32) / 255.0
if len(img.shape) < 3:
img = np.tile(img[..., np.newaxis], [1, 1, self.channels])
img = resize(img, [self.res_shape, self.res_shape, self.channels], anti_aliasing=True, mode='reflect')
X.append(img)
y.append(label)
X = np.array(X, dtype=np.float32)
y = np.array(y, dtype='uint8')
np.savez(self.dset_dir + 'data.npz', X, y, self.class_mapping)
return X, y
def extract_statistics(self, X):
statistics = {}
statistics['max'] = np.max(X)
if statistics['max'] > 1:
X /= statistics['max']
statistics['mean'] = np.mean(X, axis=0)
statistics['std'] = np.std(X, axis=0, ddof=1)
pca = PCA(n_components=3)
pca.fit(np.reshape(X - statistics['mean'], [len(X), np.prod(X.shape[1:])]))
statistics['eig_vec'] = np.transpose(np.reshape(pca.components_, [3, X.shape[1], X.shape[1], 3]),
axes=(1, 2, 3, 0))
statistics['eig_val'] = pca.explained_variance_
np.save(self.root_dir + 'statistics.npy', statistics)
return statistics
def get_image_filenames(self):
files = np.array(os.listdir(self.dset_dir))
sorted_ind = np.argsort([int(file[1:]) for file in files])
files = files[sorted_ind]
if not self.class_mapping:
mapping = self.load_json(self.root_dir + 'mapping_imagenet.json')
c = 0
for file in files:
for _, j in enumerate(mapping):
if j['v3p0'] == file:
self.class_mapping[c] = j['label'].split(',')[0]
c += 1
if c == len(files):
break
c = 0
for file in files:
file = file.strip()
image_paths = glob.glob(os.path.join(self.dset_dir, file, "*." + self.image_format))
if image_paths:
self.image_filenames.extend(image_paths)
self.labels.extend(c * np.ones(len(image_paths), dtype='uint8'))
c += 1
self.image_filenames = np.array(self.image_filenames)
self.labels = np.array(self.labels)
def __len__(self):
return int(np.ceil(len(self.labels)/self.batch_size))
def __getitem__(self, index):
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_im_filenames = [self.image_filenames[k] for k in indexes]
X = []
for filename in list_im_filenames:
img = mpimg.imread(filename)
if len(img.shape) < 3:
img = np.tile(img.astype(np.float32)[..., np.newaxis], [1, 1, self.channels])
img = resize(img, [self.res_shape, self.res_shape, self.channels], anti_aliasing=True, mode='reflect')
if np.max(img) > 1:
img /= self.statistics['max']
if 'val' not in self.dset_dir:
img += np.matmul(self.statistics['eig_vec'],
np.random.normal(scale=0.1, size=3)*self.statistics['eig_val'])
if np.min(img) < 0:
img -= np.min(img)
img = np.clip(img, 0, 1)
img = random_crop(img, (self.new_shape, self.new_shape))
X.append(img)
X = np.array(X, dtype='float32')
if 'val' not in self.dset_dir:
X = self.augmenter.flow(X, batch_size=len(X), shuffle=False).next()
y = np.array([self.labels[k] for k in indexes], dtype='uint8')
return X, to_categorical(y, self.num_classes)
def on_epoch_end(self):
self.indexes = np.arange(len(self.labels))
if self.shuffle:
np.random.shuffle(self.indexes)
def train(args, filepath, f_output, model_n, method=None):
out_path = './../../data/'
if not os.path.exists(out_path):
print(f"Generating folder {out_path}")
os.makedirs(out_path)
root_dir = out_path + 'imagenette2/'
params = {'batch_size': args.batch_size, 'image_format': 'JPEG', 'new_shape': 128}
print(model_n)
base_model_name = args.model_name
if args.extension is not None:
base_model_name = re.sub('_' + args.extension, '', base_model_name)
# Extracting statistics for every model-set combination and history for learning curves
history = []
test_acc = np.zeros(args.repetitions)
test_loss = np.zeros_like(test_acc)
training_time = []
callbacks = []
agg_cm = []
if os.path.exists(root_dir + "train/data.npz"):
npzfile = np.load(root_dir + "train/data.npz", allow_pickle=True)
x_train = npzfile['arr_0']
y_train = npzfile['arr_1']
# class_mapping = npzfile['arr_2']
else:
training_generator = ImagenetteGenerator(root_dir=root_dir,
dset_dir=root_dir + 'train/',
statistics=[],
**params)
x_train, y_train = training_generator.retrieve_set()
# class_mapping = training_generator.class_mapping
if os.path.exists(root_dir + "val/data.npz"):
npzfile = np.load(root_dir + "val/data.npz", allow_pickle=True)
x_val = npzfile['arr_0']
y_val = npzfile['arr_1']
else:
validation_generator = ImagenetteGenerator(root_dir=root_dir,
dset_dir=root_dir + 'val/',
statistics=[],
res_shape=128,
**params)
x_val, y_val = validation_generator.retrieve_set()
if args.pixel_mean:
x_train -= np.mean(x_train, axis=0)
x_val -= np.mean(x_val, axis=0)
training_generator = ImagenetteGenerator_inmem(x_train, y_train, batch_size=args.batch_size)
validation_generator = ImagenetteGenerator_inmem(x_val, y_val, batch_size=args.batch_size, val=True)
for i in range(args.repetitions):
sched = globals()[args.scheduler]
if 'stage' in args.scheduler:
print(args.scheduler)
cb_decayLR = tf.keras.callbacks.LearningRateScheduler(sched(args.learning_rate, args.num_epochs),
verbose=0)
else:
cb_decayLR = tf.keras.callbacks.LearningRateScheduler(sched, verbose=0)
if not callbacks:
callbacks.append(cb_decayLR)
else:
callbacks[0] = cb_decayLR
confusion_m_cb = ConfusionMatrixCB(validation_generator)
callbacks.append(confusion_m_cb)
# Resetting the model for the next iteration
input_shape = [params['new_shape'], params['new_shape'], 3]
print('Loading model: ', base_model_name)
optimizer = tf.keras.optimizers.SGD(args.learning_rate, momentum=0.9, nesterov=True)
if method is not None:
model = select_model(input_shape, base_model_name, optimizer, args.weight_decay, method, gpus=args.gpus)
else:
model = select_model(input_shape, base_model_name, optimizer, args.weight_decay, gpus=args.gpus)
start_train = time.time()
hist = model.fit_generator(generator=training_generator,
validation_data=validation_generator,
epochs=args.num_epochs,
verbose=2, callbacks=callbacks)
training_time.append(time.time() - start_train)
test_loss[i], test_acc[i] = model.evaluate_generator(validation_generator, verbose=0)
history.append(hist.history)
agg_cm.append(confusion_m_cb.get_cm())
callbacks = callbacks[:-1]
if i == args.repetitions - 1:
model.save(filepath['models'] + filepath['dataset'] + model_n + '.h5')
# Store history
with open(filepath['history'] + filepath['dataset'] + 'history_' + model_n + '.txt', 'wb') as f_history:
pickle.dump(history, f_history)
mean_agg_cm = np.mean(agg_cm, axis=0)
std_agg_cm = np.std(agg_cm, axis=0, ddof=1)
mean_agg_cm = np.round(mean_agg_cm / np.sum(mean_agg_cm, axis=1), 3)
mean_test_loss = np.mean(test_loss)
std_test_loss = np.std(test_loss, ddof=1)
mean_test_acc = np.mean(test_acc)
std_test_acc = np.std(test_acc, ddof=1)
# Writing statistics to file
print("****************************************", file=f_output)
print("Model: ", model_n, file=f_output)
print(f"Mean test loss: {mean_test_loss} +- {std_test_loss} ", file=f_output)
print(f"Mean test accuracy: {mean_test_acc} +- {std_test_acc}\n", file=f_output)
print("Aggregated confusion matrix: mean +- std", file=f_output)
print(f"{mean_agg_cm}\n", file=f_output)
print(f"{std_agg_cm}\n", file=f_output)
print(f"Mean training time: {np.mean(training_time)} +- {np.std(training_time, ddof=1)}", file=f_output)
print("****************************************\n\n\n", file=f_output)
learning_curves(history, model_n=model_n, filepath=filepath['graphs'] + filepath['dataset'])
def main():
args, filepath, f_output, orig_size = setup()
train(args, filepath, f_output, model_n=args.model_name)
if __name__ == '__main__':
main() | 0.68595 | 0.334399 |
from user import User
from login import Login
import random
def create_user(fname,lname,phone,email,username,password):
'''
function to create new user
'''
new_user = User(fname,lname,phone,email,username,password)
return new_user
def create_login(social, firstname, lastname, username,password):
'''
function to create new login
'''
new_login = Login(social, firstname, lastname, username,password)
return new_login
def save_user(user):
'''
functon to save user
'''
user.save_user()
def save_login(login):
'''
functon to save login
'''
login.save_login()
def del_user(user):
'''
function to delete a user
'''
user.delete_user()
def find_user(number):
'''
function that finds a user by number and returns the user
'''
return User.find_by_number(number)
def check_existing_user(username):
'''
function that checks if a user exists with that number and return a boolean
'''
return User.user_exist(username)
def check_existing_login(password):
'''
function that checks if a user exists with that number and return a boolean
'''
return Login.login_exist(password)
def display_login():
'''
Function that returns all the saved users
'''
return Login.display_login()
def main():
print("Welcome to password locker. What is your name?")
user_name = input()
print(f"Hello{user_name}.what would you like to do?")
print('\n')
while True:
print("use these short codes : ca - create a new account,cc - create credentials li - login, dc - display login, fu - find a user, ex - exit the user")
short_code = input().lower()
if short_code == 'ca':
# print("New User")
# print("-"*10)
while True:
print("first name....")
f_name = input()
print("Last name....")
l_name = input()
print("phone number....")
p_number = input()
print("email address....")
e_address = input()
print("username....")
username = input()
print("password....")
password = input()
if f_name == "" or l_name == "" or p_number == "" or e_address == "" or username == "" or password == "":
print('Failed. One input field was blank')
else:
save_user(create_user(f_name,l_name,p_number,e_address,username,password))
print ('\n')
print(f"New User {f_name} {l_name} created successfully")
print ('\n')
print("Please Login to create credentials")
break
elif short_code == 'cc':
print("-"*10)
print("Enter username....")
username = input()
print("Enter password")
password = input()
if check_existing_user(username):
print("Welcome Back")
print(f"New Login {username} {password} login successful")
print("Enter social media you want to create")
social = input()
print("Enter your firstname")
firstname = input()
print("Enter your lastname")
lastname = input()
print("Enter your username")
username = input()
print("You can press gp - to generate a password or cp - to create your own password")
print ('\n')
password_choice = input()
if password_choice == 'gp':
symbols = "abcdefghijklmonpqrstuvwxyz0123456789"
password = "".join(random.choice(symbols) for _ in range(9))
print(f"Here is your password {password}")
print('\n')
elif password_choice == 'cp':
print("Enter Password")
password = input()
save_login(create_login(social, firstname, lastname, username,password))
print('\n')
print(f" {social} account has been created successfully")
else:
print("You entered wrong account details")
print('\n')
print("-"*10)
username = input()
print("Re-enter username")
print('\n')
print("-"*10)
password = input()
print("Re-enter password")
print('\n')
print("-"*10)
if check_existing_login(password):
print(f"Login successfully for{username}")
else:
print(f"you dont have an account")
elif short_code == 'dc':
if display_login():
print("Your Current user accounts are:")
print("*"*10)
for info in display_login():
print(f" Social Media {info.social} \n First Name: {info.firstname} \n Second Name: {info.lastname} \n Username: {info.username} \nPassword {info.password}")
else:
print('\n')
print("You dont have any credentials")
elif short_code == "ex":
print("Bye .......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main() | run.py | from user import User
from login import Login
import random
def create_user(fname,lname,phone,email,username,password):
'''
function to create new user
'''
new_user = User(fname,lname,phone,email,username,password)
return new_user
def create_login(social, firstname, lastname, username,password):
'''
function to create new login
'''
new_login = Login(social, firstname, lastname, username,password)
return new_login
def save_user(user):
'''
functon to save user
'''
user.save_user()
def save_login(login):
'''
functon to save login
'''
login.save_login()
def del_user(user):
'''
function to delete a user
'''
user.delete_user()
def find_user(number):
'''
function that finds a user by number and returns the user
'''
return User.find_by_number(number)
def check_existing_user(username):
'''
function that checks if a user exists with that number and return a boolean
'''
return User.user_exist(username)
def check_existing_login(password):
'''
function that checks if a user exists with that number and return a boolean
'''
return Login.login_exist(password)
def display_login():
'''
Function that returns all the saved users
'''
return Login.display_login()
def main():
print("Welcome to password locker. What is your name?")
user_name = input()
print(f"Hello{user_name}.what would you like to do?")
print('\n')
while True:
print("use these short codes : ca - create a new account,cc - create credentials li - login, dc - display login, fu - find a user, ex - exit the user")
short_code = input().lower()
if short_code == 'ca':
# print("New User")
# print("-"*10)
while True:
print("first name....")
f_name = input()
print("Last name....")
l_name = input()
print("phone number....")
p_number = input()
print("email address....")
e_address = input()
print("username....")
username = input()
print("password....")
password = input()
if f_name == "" or l_name == "" or p_number == "" or e_address == "" or username == "" or password == "":
print('Failed. One input field was blank')
else:
save_user(create_user(f_name,l_name,p_number,e_address,username,password))
print ('\n')
print(f"New User {f_name} {l_name} created successfully")
print ('\n')
print("Please Login to create credentials")
break
elif short_code == 'cc':
print("-"*10)
print("Enter username....")
username = input()
print("Enter password")
password = input()
if check_existing_user(username):
print("Welcome Back")
print(f"New Login {username} {password} login successful")
print("Enter social media you want to create")
social = input()
print("Enter your firstname")
firstname = input()
print("Enter your lastname")
lastname = input()
print("Enter your username")
username = input()
print("You can press gp - to generate a password or cp - to create your own password")
print ('\n')
password_choice = input()
if password_choice == 'gp':
symbols = "abcdefghijklmonpqrstuvwxyz0123456789"
password = "".join(random.choice(symbols) for _ in range(9))
print(f"Here is your password {password}")
print('\n')
elif password_choice == 'cp':
print("Enter Password")
password = input()
save_login(create_login(social, firstname, lastname, username,password))
print('\n')
print(f" {social} account has been created successfully")
else:
print("You entered wrong account details")
print('\n')
print("-"*10)
username = input()
print("Re-enter username")
print('\n')
print("-"*10)
password = input()
print("Re-enter password")
print('\n')
print("-"*10)
if check_existing_login(password):
print(f"Login successfully for{username}")
else:
print(f"you dont have an account")
elif short_code == 'dc':
if display_login():
print("Your Current user accounts are:")
print("*"*10)
for info in display_login():
print(f" Social Media {info.social} \n First Name: {info.firstname} \n Second Name: {info.lastname} \n Username: {info.username} \nPassword {info.password}")
else:
print('\n')
print("You dont have any credentials")
elif short_code == "ex":
print("Bye .......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main() | 0.124639 | 0.081996 |
import torch.nn.functional
import typing as _typing
import torch_geometric
from torch_geometric.nn.conv import GraphConv
from torch_geometric.nn.pool import TopKPooling
from torch_geometric.nn.glob import (
global_add_pool, global_max_pool, global_mean_pool
)
from ...encoders import base_encoder
from .. import base_decoder, decoder_registry
from ... import _utils
class _LogSoftmaxDecoder(torch.nn.Module):
def forward(self, features: _typing.Sequence[torch.Tensor], *__args, **__kwargs) -> torch.Tensor:
return torch.nn.functional.log_softmax(features[-1], dim=1)
@decoder_registry.DecoderUniversalRegistry.register_decoder('log_softmax')
@decoder_registry.DecoderUniversalRegistry.register_decoder('log_softmax_decoder')
@decoder_registry.DecoderUniversalRegistry.register_decoder('LogSoftmax'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('LogSoftmax_decoder'.lower())
class LogSoftmaxDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(self, *args, **kwargs) -> _typing.Optional[bool]:
self._decoder = _LogSoftmaxDecoder().to(self.device)
return True
class _SumPoolMLPDecoder(torch.nn.Module):
def __init__(
self, _final_dimension: int, hidden_dimension: int, output_dimension: int,
_act: _typing.Optional[str], _dropout: _typing.Optional[float],
num_graph_features: _typing.Optional[int]
):
super(_SumPoolMLPDecoder, self).__init__()
if (
isinstance(num_graph_features, int)
and num_graph_features > 0
):
_final_dimension += num_graph_features
self.__num_graph_features: _typing.Optional[int] = num_graph_features
else:
self.__num_graph_features: _typing.Optional[int] = None
self._fc1: torch.nn.Linear = torch.nn.Linear(
_final_dimension, hidden_dimension
)
self._fc2: torch.nn.Linear = torch.nn.Linear(
hidden_dimension, output_dimension
)
self._act: _typing.Optional[str] = _act
self._dropout: _typing.Optional[float] = _dropout
def forward(
self, features: _typing.Sequence[torch.Tensor],
data: torch_geometric.data.Data, *__args, **__kwargs
):
feature = features[-1]
feature = global_add_pool(feature, data.batch)
if (
isinstance(self.__num_graph_features, int)
and self.__num_graph_features > 0
):
if (
hasattr(data, 'gf') and
isinstance(data.gf, torch.Tensor) and data.gf.dim() == 2 and
data.gf.size() == (feature.size(0), self.__num_graph_features)
):
graph_features: torch.Tensor = data.gf
else:
raise ValueError(
f"The provided data is expected to contain property 'gf' "
f"with {self.__num_graph_features} dimensions as graph feature"
)
feature: torch.Tensor = torch.cat([feature, graph_features], dim=-1)
feature: torch.Tensor = self._fc1(feature)
feature: torch.Tensor = _utils.activation.activation_func(feature, self._act)
if isinstance(self._dropout, float) and 0 <= self._dropout <= 1:
feature: torch.Tensor = torch.nn.functional.dropout(
feature, self._dropout, self.training
)
feature: torch.Tensor = self._fc2(feature)
return torch.nn.functional.log_softmax(feature, dim=-1)
@decoder_registry.DecoderUniversalRegistry.register_decoder('SumPoolMLP'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('SumPoolMLPDecoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('SumPoolMLP_Decoder'.lower())
class SumPoolMLPDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(self, encoder: base_encoder.AutoHomogeneousEncoderMaintainer, *args, **kwargs) -> _typing.Optional[bool]:
if (
isinstance(getattr(self, "num_graph_features"), int) and
getattr(self, "num_graph_features") > 0
):
num_graph_features: _typing.Optional[int] = getattr(self, "num_graph_features")
else:
num_graph_features: _typing.Optional[int] = None
self._decoder = _SumPoolMLPDecoder(
tuple(encoder.get_output_dimensions())[-1],
self.hyper_parameters['hidden'], self.output_dimension,
self.hyper_parameters['act'], self.hyper_parameters['dropout'],
num_graph_features
).to(self.device)
return True
def __init__(
self, output_dimension: _typing.Optional[int] = ...,
device: _typing.Union[torch.device, str, int, None] = ...,
*args, **kwargs
):
super(SumPoolMLPDecoderMaintainer, self).__init__(
output_dimension, device, *args, **kwargs
)
self.num_graph_features = kwargs.get("num_graph_features", 0)
self.hyper_parameter_space = [
{
"parameterName": "hidden",
"type": "INTEGER",
"maxValue": 64,
"minValue": 8,
"scalingType": "LINEAR",
},
{
"parameterName": "act",
"type": "CATEGORICAL",
"feasiblePoints": ["leaky_relu", "relu", "elu", "tanh"],
},
{
"parameterName": "dropout",
"type": "DOUBLE",
"maxValue": 0.9,
"minValue": 0.1,
"scalingType": "LINEAR",
}
]
self.hyper_parameters = {
"hidden": 32,
"act": "relu",
"dropout": 0.5
}
class _DiffPoolDecoder(torch.nn.Module):
def __init__(
self, input_dimension: int, output_dimension: int,
_ratio: _typing.Union[float, int], _dropout: _typing.Optional[float],
_act: _typing.Optional[str], num_graph_features: _typing.Optional[int]
):
super(_DiffPoolDecoder, self).__init__()
self.input_dimension = input_dimension
self.output_dimension = output_dimension
self.ratio: _typing.Union[float, int] = _ratio
self._act: _typing.Optional[str] = _act
self.dropout: _typing.Optional[float] = _dropout
self.num_graph_features: _typing.Optional[int] = num_graph_features
self.conv1 = GraphConv(self.input_dimension, 128)
self.pool1 = TopKPooling(128, ratio=self.ratio)
self.conv2 = GraphConv(128, 128)
self.pool2 = TopKPooling(128, ratio=self.ratio)
self.conv3 = GraphConv(128, 128)
self.pool3 = TopKPooling(128, ratio=self.ratio)
if (
isinstance(self.num_graph_features, int)
and self.num_graph_features > 0
):
self.lin1 = torch.nn.Linear(256 + self.num_graph_features, 128)
else:
self.lin1 = torch.nn.Linear(256, 128)
self.lin2 = torch.nn.Linear(128, 64)
self.lin3 = torch.nn.Linear(64, self.output_dimension)
def forward(
self, features: _typing.Sequence[torch.Tensor],
data: torch_geometric.data.Data, *__args, **__kwargs
):
x: torch.Tensor = features[-1]
edge_index: torch.LongTensor = data.edge_index
batch = data.batch
if (
self.num_graph_features is not None and
isinstance(self.num_graph_features, int) and
self.num_graph_features > 0
):
if not (
hasattr(data, 'gf') and
isinstance(data.gf, torch.Tensor) and
data.gf.size() == (x.size(0), self.num_graph_features)
):
raise ValueError(
f"The provided data is expected to contain property 'gf' "
f"with {self.num_graph_features} dimensions as graph feature"
)
x = torch.nn.functional.relu(self.conv1(x, edge_index))
x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)
x1 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)
x = torch.nn.functional.relu(self.conv2(x, edge_index))
x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
x2 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)
x = torch.nn.functional.relu(self.conv3(x, edge_index))
x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
x3 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)
x = x1 + x2 + x3
if (
isinstance(self.num_graph_features, int)
and self.num_graph_features > 0
):
x = torch.cat([x, data.gf], dim=-1)
x = self.lin1(x)
x = _utils.activation.activation_func(x, self._act)
x = torch.nn.functional.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
x = _utils.activation.activation_func(x, self._act)
x = torch.nn.functional.log_softmax(self.lin3(x), dim=-1)
return x
@decoder_registry.DecoderUniversalRegistry.register_decoder('DiffPool'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('DiffPoolDecoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('DiffPool_decoder'.lower())
class DiffPoolDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(
self, encoder: base_encoder.AutoHomogeneousEncoderMaintainer, *args, **kwargs
) -> _typing.Optional[bool]:
if (
isinstance(getattr(self, "num_graph_features"), int) and
getattr(self, "num_graph_features") > 0
):
num_graph_features: _typing.Optional[int] = getattr(self, "num_graph_features")
else:
num_graph_features: _typing.Optional[int] = None
self._decoder = _DiffPoolDecoder(
list(encoder.get_output_dimensions())[-1],
self.output_dimension,
self.hyper_parameters['ratio'], self.hyper_parameters['dropout'],
self.hyper_parameters['act'], num_graph_features
).to(self.device)
return True
def __init__(
self, output_dimension: _typing.Optional[int] = ...,
device: _typing.Union[torch.device, str, int, None] = ...,
*args, **kwargs
):
super(DiffPoolDecoderMaintainer, self).__init__(
output_dimension, device, *args, **kwargs
)
self.num_graph_features = kwargs.get("num_graph_features", 0)
self.hyper_parameter_space = [
{
"parameterName": "ratio",
"type": "DOUBLE",
"maxValue": 0.9,
"minValue": 0.1,
"scalingType": "LINEAR",
},
{
"parameterName": "dropout",
"type": "DOUBLE",
"maxValue": 0.9,
"minValue": 0.1,
"scalingType": "LINEAR",
},
{
"parameterName": "act",
"type": "CATEGORICAL",
"feasiblePoints": ["leaky_relu", "relu", "elu", "tanh"],
},
]
self.hyper_parameters = {
"ratio": 0.8,
"dropout": 0.5,
"act": "relu"
}
class _DotProductLinkPredictonDecoder(torch.nn.Module):
def forward(self,
features: _typing.Sequence[torch.Tensor],
graph: torch_geometric.data.Data,
pos_edge: torch.Tensor,
neg_edge: torch.Tensor,
**__kwargs
):
z = features[-1]
edge_index = torch.cat([pos_edge, neg_edge], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
@decoder_registry.DecoderUniversalRegistry.register_decoder('lpdecoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('dotproduct'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('lp-decoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('dot-product'.lower())
class DotProductLinkPredictionDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(self, *args, **kwargs):
self._decoder = _DotProductLinkPredictonDecoder() | autogl/module/model/decoders/_pyg/_pyg_decoders.py | import torch.nn.functional
import typing as _typing
import torch_geometric
from torch_geometric.nn.conv import GraphConv
from torch_geometric.nn.pool import TopKPooling
from torch_geometric.nn.glob import (
global_add_pool, global_max_pool, global_mean_pool
)
from ...encoders import base_encoder
from .. import base_decoder, decoder_registry
from ... import _utils
class _LogSoftmaxDecoder(torch.nn.Module):
def forward(self, features: _typing.Sequence[torch.Tensor], *__args, **__kwargs) -> torch.Tensor:
return torch.nn.functional.log_softmax(features[-1], dim=1)
@decoder_registry.DecoderUniversalRegistry.register_decoder('log_softmax')
@decoder_registry.DecoderUniversalRegistry.register_decoder('log_softmax_decoder')
@decoder_registry.DecoderUniversalRegistry.register_decoder('LogSoftmax'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('LogSoftmax_decoder'.lower())
class LogSoftmaxDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(self, *args, **kwargs) -> _typing.Optional[bool]:
self._decoder = _LogSoftmaxDecoder().to(self.device)
return True
class _SumPoolMLPDecoder(torch.nn.Module):
def __init__(
self, _final_dimension: int, hidden_dimension: int, output_dimension: int,
_act: _typing.Optional[str], _dropout: _typing.Optional[float],
num_graph_features: _typing.Optional[int]
):
super(_SumPoolMLPDecoder, self).__init__()
if (
isinstance(num_graph_features, int)
and num_graph_features > 0
):
_final_dimension += num_graph_features
self.__num_graph_features: _typing.Optional[int] = num_graph_features
else:
self.__num_graph_features: _typing.Optional[int] = None
self._fc1: torch.nn.Linear = torch.nn.Linear(
_final_dimension, hidden_dimension
)
self._fc2: torch.nn.Linear = torch.nn.Linear(
hidden_dimension, output_dimension
)
self._act: _typing.Optional[str] = _act
self._dropout: _typing.Optional[float] = _dropout
def forward(
self, features: _typing.Sequence[torch.Tensor],
data: torch_geometric.data.Data, *__args, **__kwargs
):
feature = features[-1]
feature = global_add_pool(feature, data.batch)
if (
isinstance(self.__num_graph_features, int)
and self.__num_graph_features > 0
):
if (
hasattr(data, 'gf') and
isinstance(data.gf, torch.Tensor) and data.gf.dim() == 2 and
data.gf.size() == (feature.size(0), self.__num_graph_features)
):
graph_features: torch.Tensor = data.gf
else:
raise ValueError(
f"The provided data is expected to contain property 'gf' "
f"with {self.__num_graph_features} dimensions as graph feature"
)
feature: torch.Tensor = torch.cat([feature, graph_features], dim=-1)
feature: torch.Tensor = self._fc1(feature)
feature: torch.Tensor = _utils.activation.activation_func(feature, self._act)
if isinstance(self._dropout, float) and 0 <= self._dropout <= 1:
feature: torch.Tensor = torch.nn.functional.dropout(
feature, self._dropout, self.training
)
feature: torch.Tensor = self._fc2(feature)
return torch.nn.functional.log_softmax(feature, dim=-1)
@decoder_registry.DecoderUniversalRegistry.register_decoder('SumPoolMLP'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('SumPoolMLPDecoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('SumPoolMLP_Decoder'.lower())
class SumPoolMLPDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(self, encoder: base_encoder.AutoHomogeneousEncoderMaintainer, *args, **kwargs) -> _typing.Optional[bool]:
if (
isinstance(getattr(self, "num_graph_features"), int) and
getattr(self, "num_graph_features") > 0
):
num_graph_features: _typing.Optional[int] = getattr(self, "num_graph_features")
else:
num_graph_features: _typing.Optional[int] = None
self._decoder = _SumPoolMLPDecoder(
tuple(encoder.get_output_dimensions())[-1],
self.hyper_parameters['hidden'], self.output_dimension,
self.hyper_parameters['act'], self.hyper_parameters['dropout'],
num_graph_features
).to(self.device)
return True
def __init__(
self, output_dimension: _typing.Optional[int] = ...,
device: _typing.Union[torch.device, str, int, None] = ...,
*args, **kwargs
):
super(SumPoolMLPDecoderMaintainer, self).__init__(
output_dimension, device, *args, **kwargs
)
self.num_graph_features = kwargs.get("num_graph_features", 0)
self.hyper_parameter_space = [
{
"parameterName": "hidden",
"type": "INTEGER",
"maxValue": 64,
"minValue": 8,
"scalingType": "LINEAR",
},
{
"parameterName": "act",
"type": "CATEGORICAL",
"feasiblePoints": ["leaky_relu", "relu", "elu", "tanh"],
},
{
"parameterName": "dropout",
"type": "DOUBLE",
"maxValue": 0.9,
"minValue": 0.1,
"scalingType": "LINEAR",
}
]
self.hyper_parameters = {
"hidden": 32,
"act": "relu",
"dropout": 0.5
}
class _DiffPoolDecoder(torch.nn.Module):
def __init__(
self, input_dimension: int, output_dimension: int,
_ratio: _typing.Union[float, int], _dropout: _typing.Optional[float],
_act: _typing.Optional[str], num_graph_features: _typing.Optional[int]
):
super(_DiffPoolDecoder, self).__init__()
self.input_dimension = input_dimension
self.output_dimension = output_dimension
self.ratio: _typing.Union[float, int] = _ratio
self._act: _typing.Optional[str] = _act
self.dropout: _typing.Optional[float] = _dropout
self.num_graph_features: _typing.Optional[int] = num_graph_features
self.conv1 = GraphConv(self.input_dimension, 128)
self.pool1 = TopKPooling(128, ratio=self.ratio)
self.conv2 = GraphConv(128, 128)
self.pool2 = TopKPooling(128, ratio=self.ratio)
self.conv3 = GraphConv(128, 128)
self.pool3 = TopKPooling(128, ratio=self.ratio)
if (
isinstance(self.num_graph_features, int)
and self.num_graph_features > 0
):
self.lin1 = torch.nn.Linear(256 + self.num_graph_features, 128)
else:
self.lin1 = torch.nn.Linear(256, 128)
self.lin2 = torch.nn.Linear(128, 64)
self.lin3 = torch.nn.Linear(64, self.output_dimension)
def forward(
self, features: _typing.Sequence[torch.Tensor],
data: torch_geometric.data.Data, *__args, **__kwargs
):
x: torch.Tensor = features[-1]
edge_index: torch.LongTensor = data.edge_index
batch = data.batch
if (
self.num_graph_features is not None and
isinstance(self.num_graph_features, int) and
self.num_graph_features > 0
):
if not (
hasattr(data, 'gf') and
isinstance(data.gf, torch.Tensor) and
data.gf.size() == (x.size(0), self.num_graph_features)
):
raise ValueError(
f"The provided data is expected to contain property 'gf' "
f"with {self.num_graph_features} dimensions as graph feature"
)
x = torch.nn.functional.relu(self.conv1(x, edge_index))
x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)
x1 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)
x = torch.nn.functional.relu(self.conv2(x, edge_index))
x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
x2 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)
x = torch.nn.functional.relu(self.conv3(x, edge_index))
x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
x3 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)
x = x1 + x2 + x3
if (
isinstance(self.num_graph_features, int)
and self.num_graph_features > 0
):
x = torch.cat([x, data.gf], dim=-1)
x = self.lin1(x)
x = _utils.activation.activation_func(x, self._act)
x = torch.nn.functional.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
x = _utils.activation.activation_func(x, self._act)
x = torch.nn.functional.log_softmax(self.lin3(x), dim=-1)
return x
@decoder_registry.DecoderUniversalRegistry.register_decoder('DiffPool'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('DiffPoolDecoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('DiffPool_decoder'.lower())
class DiffPoolDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(
self, encoder: base_encoder.AutoHomogeneousEncoderMaintainer, *args, **kwargs
) -> _typing.Optional[bool]:
if (
isinstance(getattr(self, "num_graph_features"), int) and
getattr(self, "num_graph_features") > 0
):
num_graph_features: _typing.Optional[int] = getattr(self, "num_graph_features")
else:
num_graph_features: _typing.Optional[int] = None
self._decoder = _DiffPoolDecoder(
list(encoder.get_output_dimensions())[-1],
self.output_dimension,
self.hyper_parameters['ratio'], self.hyper_parameters['dropout'],
self.hyper_parameters['act'], num_graph_features
).to(self.device)
return True
def __init__(
self, output_dimension: _typing.Optional[int] = ...,
device: _typing.Union[torch.device, str, int, None] = ...,
*args, **kwargs
):
super(DiffPoolDecoderMaintainer, self).__init__(
output_dimension, device, *args, **kwargs
)
self.num_graph_features = kwargs.get("num_graph_features", 0)
self.hyper_parameter_space = [
{
"parameterName": "ratio",
"type": "DOUBLE",
"maxValue": 0.9,
"minValue": 0.1,
"scalingType": "LINEAR",
},
{
"parameterName": "dropout",
"type": "DOUBLE",
"maxValue": 0.9,
"minValue": 0.1,
"scalingType": "LINEAR",
},
{
"parameterName": "act",
"type": "CATEGORICAL",
"feasiblePoints": ["leaky_relu", "relu", "elu", "tanh"],
},
]
self.hyper_parameters = {
"ratio": 0.8,
"dropout": 0.5,
"act": "relu"
}
class _DotProductLinkPredictonDecoder(torch.nn.Module):
def forward(self,
features: _typing.Sequence[torch.Tensor],
graph: torch_geometric.data.Data,
pos_edge: torch.Tensor,
neg_edge: torch.Tensor,
**__kwargs
):
z = features[-1]
edge_index = torch.cat([pos_edge, neg_edge], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
@decoder_registry.DecoderUniversalRegistry.register_decoder('lpdecoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('dotproduct'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('lp-decoder'.lower())
@decoder_registry.DecoderUniversalRegistry.register_decoder('dot-product'.lower())
class DotProductLinkPredictionDecoderMaintainer(base_decoder.BaseDecoderMaintainer):
def _initialize(self, *args, **kwargs):
self._decoder = _DotProductLinkPredictonDecoder() | 0.92367 | 0.37014 |
import math
import heapq
import numba as nb
import numpy as np
import copy
def get_id():
i = 0
while True:
yield i
i += 1
def graph_parse(adj_matrix):
g_num_nodes = adj_matrix.shape[0]
adj_table = {}
VOL = 0
node_vol = []
for i in range(g_num_nodes):
n_v = 0
adj = set()
for j in range(g_num_nodes):
if adj_matrix[i,j] != 0:
n_v += adj_matrix[i,j]
VOL += adj_matrix[i,j]
adj.add(j)
adj_table[i] = adj
node_vol.append(n_v)
return g_num_nodes,VOL,node_vol,adj_table
@nb.jit(nopython=True)
def cut_volume(adj_matrix,p1,p2):
c12 = 0
for i in range(len(p1)):
for j in range(len(p2)):
c = adj_matrix[p1[i],p2[j]]
if c != 0:
c12 += c
return c12
def LayerFirst(node_dict,start_id):
stack = [start_id]
while len(stack) != 0:
node_id = stack.pop(0)
yield node_id
if node_dict[node_id].children:
for c_id in node_dict[node_id].children:
stack.append(c_id)
def merge(new_ID, id1, id2, cut_v, node_dict):
new_partition = node_dict[id1].partition + node_dict[id2].partition
v = node_dict[id1].vol + node_dict[id2].vol
g = node_dict[id1].g + node_dict[id2].g - 2 * cut_v
child_h = max(node_dict[id1].child_h,node_dict[id2].child_h) + 1
new_node = PartitionTreeNode(ID=new_ID,partition=new_partition,children={id1,id2},
g=g, vol=v,child_h= child_h,child_cut = cut_v)
node_dict[id1].parent = new_ID
node_dict[id2].parent = new_ID
node_dict[new_ID] = new_node
def compressNode(node_dict, node_id, parent_id):
p_child_h = node_dict[parent_id].child_h
node_children = node_dict[node_id].children
node_dict[parent_id].child_cut += node_dict[node_id].child_cut
node_dict[parent_id].children.remove(node_id)
node_dict[parent_id].children = node_dict[parent_id].children.union(node_children)
for c in node_children:
node_dict[c].parent = parent_id
com_node_child_h = node_dict[node_id].child_h
node_dict.pop(node_id)
if (p_child_h - com_node_child_h) == 1:
while True:
max_child_h = max([node_dict[f_c].child_h for f_c in node_dict[parent_id].children])
if node_dict[parent_id].child_h == (max_child_h + 1):
break
node_dict[parent_id].child_h = max_child_h + 1
parent_id = node_dict[parent_id].parent
if parent_id is None:
break
def child_tree_deepth(node_dict,nid):
node = node_dict[nid]
deepth = 0
while node.parent is not None:
node = node_dict[node.parent]
deepth+=1
deepth += node_dict[nid].child_h
return deepth
def CompressDelta(node1,p_node):
a = node1.child_cut
v1 = node1.vol
v2 = p_node.vol
return a * math.log(v2 / v1)
def CombineDelta(node1, node2, cut_v, g_vol):
v1 = node1.vol
v2 = node2.vol
g1 = node1.g
g2 = node2.g
v12 = v1 + v2
return ((v1 - g1) * math.log(v12 / v1,2) + (v2 - g2) * math.log(v12 / v2,2) - 2 * cut_v * math.log(g_vol / v12,2)) / g_vol
class PartitionTreeNode():
def __init__(self, ID, partition, vol, g, children:set = None,parent = None,child_h = 0, child_cut = 0):
self.ID = ID
self.partition = partition
self.parent = parent
self.children = children
self.vol = vol
self.g = g
self.merged = False
self.child_h = child_h #不包括该节点的子树高度
self.child_cut = child_cut
def __str__(self):
return "{" + "{}:{}".format(self.__class__.__name__, self.gatherAttrs()) + "}"
def gatherAttrs(self):
return ",".join("{}={}"
.format(k, getattr(self, k))
for k in self.__dict__.keys())
class PartitionTree():
def __init__(self,adj_matrix):
self.adj_matrix = adj_matrix
self.tree_node = {}
self.g_num_nodes, self.VOL, self.node_vol, self.adj_table = graph_parse(adj_matrix)
self.id_g = get_id()
self.leaves = []
self.build_leaves()
def build_leaves(self):
for vertex in range(self.g_num_nodes):
ID = next(self.id_g)
v = self.node_vol[vertex]
leaf_node = PartitionTreeNode(ID=ID, partition=[vertex], g = v, vol=v)
self.tree_node[ID] = leaf_node
self.leaves.append(ID)
def build_sub_leaves(self,node_list,p_vol):
subgraph_node_dict = {}
ori_ent = 0
for vertex in node_list:
ori_ent += -(self.tree_node[vertex].g / self.VOL)\
* math.log2(self.tree_node[vertex].vol / p_vol)
sub_n = set()
vol = 0
for vertex_n in node_list:
c = self.adj_matrix[vertex,vertex_n]
if c != 0:
vol += c
sub_n.add(vertex_n)
sub_leaf = PartitionTreeNode(ID=vertex,partition=[vertex],g=vol,vol=vol)
subgraph_node_dict[vertex] = sub_leaf
self.adj_table[vertex] = sub_n
return subgraph_node_dict,ori_ent
def build_root_down(self):
root_child = self.tree_node[self.root_id].children
subgraph_node_dict = {}
ori_en = 0
g_vol = self.tree_node[self.root_id].vol
for node_id in root_child:
node = self.tree_node[node_id]
ori_en += -(node.g / g_vol) * math.log2(node.vol / g_vol)
new_n = set()
for nei in self.adj_table[node_id]:
if nei in root_child:
new_n.add(nei)
self.adj_table[node_id] = new_n
new_node = PartitionTreeNode(ID=node_id,partition=node.partition,vol=node.vol,g = node.g,children=node.children)
subgraph_node_dict[node_id] = new_node
return subgraph_node_dict, ori_en
def entropy(self,node_dict = None):
if node_dict is None:
node_dict = self.tree_node
ent = 0
for node_id,node in node_dict.items():
if node.parent is not None:
node_p = node_dict[node.parent]
node_vol = node.vol
node_g = node.g
node_p_vol = node_p.vol
ent += - (node_g / self.VOL) * math.log2(node_vol / node_p_vol)
return ent
def __build_k_tree(self,g_vol,nodes_dict:dict,k = None,):
min_heap = []
cmp_heap = []
nodes_ids = nodes_dict.keys()
new_id = None
for i in nodes_ids:
for j in self.adj_table[i]:
if j > i:
n1 = nodes_dict[i]
n2 = nodes_dict[j]
if len(n1.partition) == 1 and len(n2.partition) == 1:
cut_v = self.adj_matrix[n1.partition[0],n2.partition[0]]
else:
cut_v = cut_volume(self.adj_matrix,p1 = np.array(n1.partition),p2=np.array(n2.partition))
diff = CombineDelta(nodes_dict[i], nodes_dict[j], cut_v, g_vol)
heapq.heappush(min_heap, (diff, i, j, cut_v))
unmerged_count = len(nodes_ids)
while unmerged_count > 1:
if len(min_heap) == 0:
break
diff, id1, id2, cut_v = heapq.heappop(min_heap)
if nodes_dict[id1].merged or nodes_dict[id2].merged:
continue
nodes_dict[id1].merged = True
nodes_dict[id2].merged = True
new_id = next(self.id_g)
merge(new_id, id1, id2, cut_v, nodes_dict)
self.adj_table[new_id] = self.adj_table[id1].union(self.adj_table[id2])
for i in self.adj_table[new_id]:
self.adj_table[i].add(new_id)
#compress delta
if nodes_dict[id1].child_h > 0:
heapq.heappush(cmp_heap,[CompressDelta(nodes_dict[id1],nodes_dict[new_id]),id1,new_id])
if nodes_dict[id2].child_h > 0:
heapq.heappush(cmp_heap,[CompressDelta(nodes_dict[id2],nodes_dict[new_id]),id2,new_id])
unmerged_count -= 1
for ID in self.adj_table[new_id]:
if not nodes_dict[ID].merged:
n1 = nodes_dict[ID]
n2 = nodes_dict[new_id]
cut_v = cut_volume(self.adj_matrix,np.array(n1.partition), np.array(n2.partition))
new_diff = CombineDelta(nodes_dict[ID], nodes_dict[new_id], cut_v, g_vol)
heapq.heappush(min_heap, (new_diff, ID, new_id, cut_v))
root = new_id
if unmerged_count > 1:
#combine solitary node
# print('processing solitary node')
assert len(min_heap) == 0
unmerged_nodes = {i for i, j in nodes_dict.items() if not j.merged}
new_child_h = max([nodes_dict[i].child_h for i in unmerged_nodes]) + 1
new_id = next(self.id_g)
new_node = PartitionTreeNode(ID=new_id,partition=list(nodes_ids),children=unmerged_nodes,
vol=g_vol,g = 0,child_h=new_child_h)
nodes_dict[new_id] = new_node
for i in unmerged_nodes:
nodes_dict[i].merged = True
nodes_dict[i].parent = new_id
if nodes_dict[i].child_h > 0:
heapq.heappush(cmp_heap, [CompressDelta(nodes_dict[i], nodes_dict[new_id]), i, new_id])
root = new_id
if k is not None:
while nodes_dict[root].child_h > k:
diff, node_id, p_id = heapq.heappop(cmp_heap)
if child_tree_deepth(nodes_dict, node_id) <= k:
continue
children = nodes_dict[node_id].children
compressNode(nodes_dict, node_id, p_id)
if nodes_dict[root].child_h == k:
break
for e in cmp_heap:
if e[1] == p_id:
if child_tree_deepth(nodes_dict, p_id) > k:
e[0] = CompressDelta(nodes_dict[e[1]], nodes_dict[e[2]])
if e[1] in children:
if nodes_dict[e[1]].child_h == 0:
continue
if child_tree_deepth(nodes_dict, e[1]) > k:
e[2] = p_id
e[0] = CompressDelta(nodes_dict[e[1]], nodes_dict[p_id])
heapq.heapify(cmp_heap)
return root
def check_balance(self,node_dict,root_id):
root_c = copy.deepcopy(node_dict[root_id].children)
for c in root_c:
if node_dict[c].child_h == 0:
self.single_up(node_dict,c)
def single_up(self,node_dict,node_id):
new_id = next(self.id_g)
p_id = node_dict[node_id].parent
grow_node = PartitionTreeNode(ID=new_id, partition=node_dict[node_id].partition, parent=p_id,
children={node_id}, vol=node_dict[node_id].vol, g=node_dict[node_id].g)
node_dict[node_id].parent = new_id
node_dict[p_id].children.remove(node_id)
node_dict[p_id].children.add(new_id)
node_dict[new_id] = grow_node
node_dict[new_id].child_h = node_dict[node_id].child_h + 1
self.adj_table[new_id] = self.adj_table[node_id]
for i in self.adj_table[node_id]:
self.adj_table[i].add(new_id)
def root_down_delta(self):
if len(self.tree_node[self.root_id].children) < 3:
return 0 , None , None
subgraph_node_dict, ori_entropy = self.build_root_down()
g_vol = self.tree_node[self.root_id].vol
new_root = self.__build_k_tree(g_vol=g_vol,nodes_dict=subgraph_node_dict,k=2)
self.check_balance(subgraph_node_dict,new_root)
new_entropy = self.entropy(subgraph_node_dict)
delta = (ori_entropy - new_entropy) / len(self.tree_node[self.root_id].children)
return delta, new_root, subgraph_node_dict
def leaf_up_entropy(self,sub_node_dict,sub_root_id,node_id):
ent = 0
for sub_node_id in LayerFirst(sub_node_dict,sub_root_id):
if sub_node_id == sub_root_id:
sub_node_dict[sub_root_id].vol = self.tree_node[node_id].vol
sub_node_dict[sub_root_id].g = self.tree_node[node_id].g
elif sub_node_dict[sub_node_id].child_h == 1:
node = sub_node_dict[sub_node_id]
inner_vol = node.vol - node.g
partition = node.partition
ori_vol = sum(self.tree_node[i].vol for i in partition)
ori_g = ori_vol - inner_vol
node.vol = ori_vol
node.g = ori_g
node_p = sub_node_dict[node.parent]
ent += -(node.g / self.VOL) * math.log2(node.vol / node_p.vol)
else:
node = sub_node_dict[sub_node_id]
node.g = self.tree_node[sub_node_id].g
node.vol = self.tree_node[sub_node_id].vol
node_p = sub_node_dict[node.parent]
ent += -(node.g / self.VOL) * math.log2(node.vol / node_p.vol)
return ent
def leaf_up(self):
h1_id = set()
h1_new_child_tree = {}
id_mapping = {}
for l in self.leaves:
p = self.tree_node[l].parent
h1_id.add(p)
delta = 0
for node_id in h1_id:
candidate_node = self.tree_node[node_id]
sub_nodes = candidate_node.partition
if len(sub_nodes) == 1:
id_mapping[node_id] = None
if len(sub_nodes) == 2:
id_mapping[node_id] = None
if len(sub_nodes) >= 3:
sub_g_vol = candidate_node.vol - candidate_node.g
subgraph_node_dict,ori_ent = self.build_sub_leaves(sub_nodes,candidate_node.vol)
sub_root = self.__build_k_tree(g_vol=sub_g_vol,nodes_dict=subgraph_node_dict,k = 2)
self.check_balance(subgraph_node_dict,sub_root)
new_ent = self.leaf_up_entropy(subgraph_node_dict,sub_root,node_id)
delta += (ori_ent - new_ent)
h1_new_child_tree[node_id] = subgraph_node_dict
id_mapping[node_id] = sub_root
delta = delta / self.g_num_nodes
return delta,id_mapping,h1_new_child_tree
def leaf_up_update(self,id_mapping,leaf_up_dict):
for node_id,h1_root in id_mapping.items():
if h1_root is None:
children = copy.deepcopy(self.tree_node[node_id].children)
for i in children:
self.single_up(self.tree_node,i)
else:
h1_dict = leaf_up_dict[node_id]
self.tree_node[node_id].children = h1_dict[h1_root].children
for h1_c in h1_dict[h1_root].children:
assert h1_c not in self.tree_node
h1_dict[h1_c].parent = node_id
h1_dict.pop(h1_root)
self.tree_node.update(h1_dict)
self.tree_node[self.root_id].child_h += 1
def root_down_update(self, new_id , root_down_dict):
self.tree_node[self.root_id].children = root_down_dict[new_id].children
for node_id in root_down_dict[new_id].children:
assert node_id not in self.tree_node
root_down_dict[node_id].parent = self.root_id
root_down_dict.pop(new_id)
self.tree_node.update(root_down_dict)
self.tree_node[self.root_id].child_h += 1
def build_encoding_tree(self, k=2, mode='v2'):
if k == 1:
return
if mode == 'v1' or k is None:
self.root_id = self.__build_k_tree(self.VOL, self.tree_node, k = k)
elif mode == 'v2':
self.root_id = self.__build_k_tree(self.VOL, self.tree_node, k = 2)
self.check_balance(self.tree_node,self.root_id)
if self.tree_node[self.root_id].child_h < 2:
self.tree_node[self.root_id].child_h = 2
flag = 0
while self.tree_node[self.root_id].child_h < k:
if flag == 0:
leaf_up_delta,id_mapping,leaf_up_dict = self.leaf_up()
root_down_delta, new_id , root_down_dict = self.root_down_delta()
elif flag == 1:
leaf_up_delta, id_mapping, leaf_up_dict = self.leaf_up()
elif flag == 2:
root_down_delta, new_id , root_down_dict = self.root_down_delta()
else:
raise ValueError
if leaf_up_delta < root_down_delta:
# print('root down')
# root down update and recompute root down delta
flag = 2
self.root_down_update(new_id,root_down_dict)
else:
# leaf up update
# print('leave up')
flag = 1
# print(self.tree_node[self.root_id].child_h)
self.leaf_up_update(id_mapping,leaf_up_dict)
# print(self.tree_node[self.root_id].child_h)
# update root down leave nodes' children
if root_down_delta != 0:
for root_down_id, root_down_node in root_down_dict.items():
if root_down_node.child_h == 0:
root_down_node.children = self.tree_node[root_down_id].children
count = 0
for _ in LayerFirst(self.tree_node, self.root_id):
count += 1
assert len(self.tree_node) == count
if __name__ == "__main__":
undirected_adj = [[0, 3, 5, 8, 0], [3, 0, 6, 4, 11],
[5, 6, 0, 2, 0], [8, 4, 2, 0, 10],
[0, 11, 0, 10, 0]]
undirected_adj = [[0, 1], [1, 0]]
undirected_adj = np.array(undirected_adj)
y = PartitionTree(adj_matrix=undirected_adj)
x = y.build_encoding_tree(5)
for k, v in y.tree_node.items():
print(k, v.__dict__) | lib/encoding_tree.py | import math
import heapq
import numba as nb
import numpy as np
import copy
def get_id():
i = 0
while True:
yield i
i += 1
def graph_parse(adj_matrix):
g_num_nodes = adj_matrix.shape[0]
adj_table = {}
VOL = 0
node_vol = []
for i in range(g_num_nodes):
n_v = 0
adj = set()
for j in range(g_num_nodes):
if adj_matrix[i,j] != 0:
n_v += adj_matrix[i,j]
VOL += adj_matrix[i,j]
adj.add(j)
adj_table[i] = adj
node_vol.append(n_v)
return g_num_nodes,VOL,node_vol,adj_table
@nb.jit(nopython=True)
def cut_volume(adj_matrix,p1,p2):
c12 = 0
for i in range(len(p1)):
for j in range(len(p2)):
c = adj_matrix[p1[i],p2[j]]
if c != 0:
c12 += c
return c12
def LayerFirst(node_dict,start_id):
stack = [start_id]
while len(stack) != 0:
node_id = stack.pop(0)
yield node_id
if node_dict[node_id].children:
for c_id in node_dict[node_id].children:
stack.append(c_id)
def merge(new_ID, id1, id2, cut_v, node_dict):
new_partition = node_dict[id1].partition + node_dict[id2].partition
v = node_dict[id1].vol + node_dict[id2].vol
g = node_dict[id1].g + node_dict[id2].g - 2 * cut_v
child_h = max(node_dict[id1].child_h,node_dict[id2].child_h) + 1
new_node = PartitionTreeNode(ID=new_ID,partition=new_partition,children={id1,id2},
g=g, vol=v,child_h= child_h,child_cut = cut_v)
node_dict[id1].parent = new_ID
node_dict[id2].parent = new_ID
node_dict[new_ID] = new_node
def compressNode(node_dict, node_id, parent_id):
p_child_h = node_dict[parent_id].child_h
node_children = node_dict[node_id].children
node_dict[parent_id].child_cut += node_dict[node_id].child_cut
node_dict[parent_id].children.remove(node_id)
node_dict[parent_id].children = node_dict[parent_id].children.union(node_children)
for c in node_children:
node_dict[c].parent = parent_id
com_node_child_h = node_dict[node_id].child_h
node_dict.pop(node_id)
if (p_child_h - com_node_child_h) == 1:
while True:
max_child_h = max([node_dict[f_c].child_h for f_c in node_dict[parent_id].children])
if node_dict[parent_id].child_h == (max_child_h + 1):
break
node_dict[parent_id].child_h = max_child_h + 1
parent_id = node_dict[parent_id].parent
if parent_id is None:
break
def child_tree_deepth(node_dict,nid):
node = node_dict[nid]
deepth = 0
while node.parent is not None:
node = node_dict[node.parent]
deepth+=1
deepth += node_dict[nid].child_h
return deepth
def CompressDelta(node1,p_node):
a = node1.child_cut
v1 = node1.vol
v2 = p_node.vol
return a * math.log(v2 / v1)
def CombineDelta(node1, node2, cut_v, g_vol):
v1 = node1.vol
v2 = node2.vol
g1 = node1.g
g2 = node2.g
v12 = v1 + v2
return ((v1 - g1) * math.log(v12 / v1,2) + (v2 - g2) * math.log(v12 / v2,2) - 2 * cut_v * math.log(g_vol / v12,2)) / g_vol
class PartitionTreeNode():
def __init__(self, ID, partition, vol, g, children:set = None,parent = None,child_h = 0, child_cut = 0):
self.ID = ID
self.partition = partition
self.parent = parent
self.children = children
self.vol = vol
self.g = g
self.merged = False
self.child_h = child_h #不包括该节点的子树高度
self.child_cut = child_cut
def __str__(self):
return "{" + "{}:{}".format(self.__class__.__name__, self.gatherAttrs()) + "}"
def gatherAttrs(self):
return ",".join("{}={}"
.format(k, getattr(self, k))
for k in self.__dict__.keys())
class PartitionTree():
def __init__(self,adj_matrix):
self.adj_matrix = adj_matrix
self.tree_node = {}
self.g_num_nodes, self.VOL, self.node_vol, self.adj_table = graph_parse(adj_matrix)
self.id_g = get_id()
self.leaves = []
self.build_leaves()
def build_leaves(self):
for vertex in range(self.g_num_nodes):
ID = next(self.id_g)
v = self.node_vol[vertex]
leaf_node = PartitionTreeNode(ID=ID, partition=[vertex], g = v, vol=v)
self.tree_node[ID] = leaf_node
self.leaves.append(ID)
def build_sub_leaves(self,node_list,p_vol):
subgraph_node_dict = {}
ori_ent = 0
for vertex in node_list:
ori_ent += -(self.tree_node[vertex].g / self.VOL)\
* math.log2(self.tree_node[vertex].vol / p_vol)
sub_n = set()
vol = 0
for vertex_n in node_list:
c = self.adj_matrix[vertex,vertex_n]
if c != 0:
vol += c
sub_n.add(vertex_n)
sub_leaf = PartitionTreeNode(ID=vertex,partition=[vertex],g=vol,vol=vol)
subgraph_node_dict[vertex] = sub_leaf
self.adj_table[vertex] = sub_n
return subgraph_node_dict,ori_ent
def build_root_down(self):
root_child = self.tree_node[self.root_id].children
subgraph_node_dict = {}
ori_en = 0
g_vol = self.tree_node[self.root_id].vol
for node_id in root_child:
node = self.tree_node[node_id]
ori_en += -(node.g / g_vol) * math.log2(node.vol / g_vol)
new_n = set()
for nei in self.adj_table[node_id]:
if nei in root_child:
new_n.add(nei)
self.adj_table[node_id] = new_n
new_node = PartitionTreeNode(ID=node_id,partition=node.partition,vol=node.vol,g = node.g,children=node.children)
subgraph_node_dict[node_id] = new_node
return subgraph_node_dict, ori_en
def entropy(self,node_dict = None):
if node_dict is None:
node_dict = self.tree_node
ent = 0
for node_id,node in node_dict.items():
if node.parent is not None:
node_p = node_dict[node.parent]
node_vol = node.vol
node_g = node.g
node_p_vol = node_p.vol
ent += - (node_g / self.VOL) * math.log2(node_vol / node_p_vol)
return ent
def __build_k_tree(self,g_vol,nodes_dict:dict,k = None,):
min_heap = []
cmp_heap = []
nodes_ids = nodes_dict.keys()
new_id = None
for i in nodes_ids:
for j in self.adj_table[i]:
if j > i:
n1 = nodes_dict[i]
n2 = nodes_dict[j]
if len(n1.partition) == 1 and len(n2.partition) == 1:
cut_v = self.adj_matrix[n1.partition[0],n2.partition[0]]
else:
cut_v = cut_volume(self.adj_matrix,p1 = np.array(n1.partition),p2=np.array(n2.partition))
diff = CombineDelta(nodes_dict[i], nodes_dict[j], cut_v, g_vol)
heapq.heappush(min_heap, (diff, i, j, cut_v))
unmerged_count = len(nodes_ids)
while unmerged_count > 1:
if len(min_heap) == 0:
break
diff, id1, id2, cut_v = heapq.heappop(min_heap)
if nodes_dict[id1].merged or nodes_dict[id2].merged:
continue
nodes_dict[id1].merged = True
nodes_dict[id2].merged = True
new_id = next(self.id_g)
merge(new_id, id1, id2, cut_v, nodes_dict)
self.adj_table[new_id] = self.adj_table[id1].union(self.adj_table[id2])
for i in self.adj_table[new_id]:
self.adj_table[i].add(new_id)
#compress delta
if nodes_dict[id1].child_h > 0:
heapq.heappush(cmp_heap,[CompressDelta(nodes_dict[id1],nodes_dict[new_id]),id1,new_id])
if nodes_dict[id2].child_h > 0:
heapq.heappush(cmp_heap,[CompressDelta(nodes_dict[id2],nodes_dict[new_id]),id2,new_id])
unmerged_count -= 1
for ID in self.adj_table[new_id]:
if not nodes_dict[ID].merged:
n1 = nodes_dict[ID]
n2 = nodes_dict[new_id]
cut_v = cut_volume(self.adj_matrix,np.array(n1.partition), np.array(n2.partition))
new_diff = CombineDelta(nodes_dict[ID], nodes_dict[new_id], cut_v, g_vol)
heapq.heappush(min_heap, (new_diff, ID, new_id, cut_v))
root = new_id
if unmerged_count > 1:
#combine solitary node
# print('processing solitary node')
assert len(min_heap) == 0
unmerged_nodes = {i for i, j in nodes_dict.items() if not j.merged}
new_child_h = max([nodes_dict[i].child_h for i in unmerged_nodes]) + 1
new_id = next(self.id_g)
new_node = PartitionTreeNode(ID=new_id,partition=list(nodes_ids),children=unmerged_nodes,
vol=g_vol,g = 0,child_h=new_child_h)
nodes_dict[new_id] = new_node
for i in unmerged_nodes:
nodes_dict[i].merged = True
nodes_dict[i].parent = new_id
if nodes_dict[i].child_h > 0:
heapq.heappush(cmp_heap, [CompressDelta(nodes_dict[i], nodes_dict[new_id]), i, new_id])
root = new_id
if k is not None:
while nodes_dict[root].child_h > k:
diff, node_id, p_id = heapq.heappop(cmp_heap)
if child_tree_deepth(nodes_dict, node_id) <= k:
continue
children = nodes_dict[node_id].children
compressNode(nodes_dict, node_id, p_id)
if nodes_dict[root].child_h == k:
break
for e in cmp_heap:
if e[1] == p_id:
if child_tree_deepth(nodes_dict, p_id) > k:
e[0] = CompressDelta(nodes_dict[e[1]], nodes_dict[e[2]])
if e[1] in children:
if nodes_dict[e[1]].child_h == 0:
continue
if child_tree_deepth(nodes_dict, e[1]) > k:
e[2] = p_id
e[0] = CompressDelta(nodes_dict[e[1]], nodes_dict[p_id])
heapq.heapify(cmp_heap)
return root
def check_balance(self,node_dict,root_id):
root_c = copy.deepcopy(node_dict[root_id].children)
for c in root_c:
if node_dict[c].child_h == 0:
self.single_up(node_dict,c)
def single_up(self,node_dict,node_id):
new_id = next(self.id_g)
p_id = node_dict[node_id].parent
grow_node = PartitionTreeNode(ID=new_id, partition=node_dict[node_id].partition, parent=p_id,
children={node_id}, vol=node_dict[node_id].vol, g=node_dict[node_id].g)
node_dict[node_id].parent = new_id
node_dict[p_id].children.remove(node_id)
node_dict[p_id].children.add(new_id)
node_dict[new_id] = grow_node
node_dict[new_id].child_h = node_dict[node_id].child_h + 1
self.adj_table[new_id] = self.adj_table[node_id]
for i in self.adj_table[node_id]:
self.adj_table[i].add(new_id)
def root_down_delta(self):
if len(self.tree_node[self.root_id].children) < 3:
return 0 , None , None
subgraph_node_dict, ori_entropy = self.build_root_down()
g_vol = self.tree_node[self.root_id].vol
new_root = self.__build_k_tree(g_vol=g_vol,nodes_dict=subgraph_node_dict,k=2)
self.check_balance(subgraph_node_dict,new_root)
new_entropy = self.entropy(subgraph_node_dict)
delta = (ori_entropy - new_entropy) / len(self.tree_node[self.root_id].children)
return delta, new_root, subgraph_node_dict
def leaf_up_entropy(self,sub_node_dict,sub_root_id,node_id):
ent = 0
for sub_node_id in LayerFirst(sub_node_dict,sub_root_id):
if sub_node_id == sub_root_id:
sub_node_dict[sub_root_id].vol = self.tree_node[node_id].vol
sub_node_dict[sub_root_id].g = self.tree_node[node_id].g
elif sub_node_dict[sub_node_id].child_h == 1:
node = sub_node_dict[sub_node_id]
inner_vol = node.vol - node.g
partition = node.partition
ori_vol = sum(self.tree_node[i].vol for i in partition)
ori_g = ori_vol - inner_vol
node.vol = ori_vol
node.g = ori_g
node_p = sub_node_dict[node.parent]
ent += -(node.g / self.VOL) * math.log2(node.vol / node_p.vol)
else:
node = sub_node_dict[sub_node_id]
node.g = self.tree_node[sub_node_id].g
node.vol = self.tree_node[sub_node_id].vol
node_p = sub_node_dict[node.parent]
ent += -(node.g / self.VOL) * math.log2(node.vol / node_p.vol)
return ent
def leaf_up(self):
h1_id = set()
h1_new_child_tree = {}
id_mapping = {}
for l in self.leaves:
p = self.tree_node[l].parent
h1_id.add(p)
delta = 0
for node_id in h1_id:
candidate_node = self.tree_node[node_id]
sub_nodes = candidate_node.partition
if len(sub_nodes) == 1:
id_mapping[node_id] = None
if len(sub_nodes) == 2:
id_mapping[node_id] = None
if len(sub_nodes) >= 3:
sub_g_vol = candidate_node.vol - candidate_node.g
subgraph_node_dict,ori_ent = self.build_sub_leaves(sub_nodes,candidate_node.vol)
sub_root = self.__build_k_tree(g_vol=sub_g_vol,nodes_dict=subgraph_node_dict,k = 2)
self.check_balance(subgraph_node_dict,sub_root)
new_ent = self.leaf_up_entropy(subgraph_node_dict,sub_root,node_id)
delta += (ori_ent - new_ent)
h1_new_child_tree[node_id] = subgraph_node_dict
id_mapping[node_id] = sub_root
delta = delta / self.g_num_nodes
return delta,id_mapping,h1_new_child_tree
def leaf_up_update(self,id_mapping,leaf_up_dict):
for node_id,h1_root in id_mapping.items():
if h1_root is None:
children = copy.deepcopy(self.tree_node[node_id].children)
for i in children:
self.single_up(self.tree_node,i)
else:
h1_dict = leaf_up_dict[node_id]
self.tree_node[node_id].children = h1_dict[h1_root].children
for h1_c in h1_dict[h1_root].children:
assert h1_c not in self.tree_node
h1_dict[h1_c].parent = node_id
h1_dict.pop(h1_root)
self.tree_node.update(h1_dict)
self.tree_node[self.root_id].child_h += 1
def root_down_update(self, new_id , root_down_dict):
self.tree_node[self.root_id].children = root_down_dict[new_id].children
for node_id in root_down_dict[new_id].children:
assert node_id not in self.tree_node
root_down_dict[node_id].parent = self.root_id
root_down_dict.pop(new_id)
self.tree_node.update(root_down_dict)
self.tree_node[self.root_id].child_h += 1
def build_encoding_tree(self, k=2, mode='v2'):
if k == 1:
return
if mode == 'v1' or k is None:
self.root_id = self.__build_k_tree(self.VOL, self.tree_node, k = k)
elif mode == 'v2':
self.root_id = self.__build_k_tree(self.VOL, self.tree_node, k = 2)
self.check_balance(self.tree_node,self.root_id)
if self.tree_node[self.root_id].child_h < 2:
self.tree_node[self.root_id].child_h = 2
flag = 0
while self.tree_node[self.root_id].child_h < k:
if flag == 0:
leaf_up_delta,id_mapping,leaf_up_dict = self.leaf_up()
root_down_delta, new_id , root_down_dict = self.root_down_delta()
elif flag == 1:
leaf_up_delta, id_mapping, leaf_up_dict = self.leaf_up()
elif flag == 2:
root_down_delta, new_id , root_down_dict = self.root_down_delta()
else:
raise ValueError
if leaf_up_delta < root_down_delta:
# print('root down')
# root down update and recompute root down delta
flag = 2
self.root_down_update(new_id,root_down_dict)
else:
# leaf up update
# print('leave up')
flag = 1
# print(self.tree_node[self.root_id].child_h)
self.leaf_up_update(id_mapping,leaf_up_dict)
# print(self.tree_node[self.root_id].child_h)
# update root down leave nodes' children
if root_down_delta != 0:
for root_down_id, root_down_node in root_down_dict.items():
if root_down_node.child_h == 0:
root_down_node.children = self.tree_node[root_down_id].children
count = 0
for _ in LayerFirst(self.tree_node, self.root_id):
count += 1
assert len(self.tree_node) == count
if __name__ == "__main__":
undirected_adj = [[0, 3, 5, 8, 0], [3, 0, 6, 4, 11],
[5, 6, 0, 2, 0], [8, 4, 2, 0, 10],
[0, 11, 0, 10, 0]]
undirected_adj = [[0, 1], [1, 0]]
undirected_adj = np.array(undirected_adj)
y = PartitionTree(adj_matrix=undirected_adj)
x = y.build_encoding_tree(5)
for k, v in y.tree_node.items():
print(k, v.__dict__) | 0.139045 | 0.173288 |
from actstream import action
from actstream.models import any_stream
from apollo.forms import ToggleStaffForm
from applications.business.models import Business
from django.contrib import messages
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
def base(request):
if request.user.is_authenticated():
data = dict()
data['businesses'] = Business.objects.filter(businessmembership__user=request.user)
return render_to_response('business/business_home.html', data, context_instance=RequestContext(request))
else:
return base_prototype(request)
def base_idea(request):
return render_to_response('base/base_idea.html', {}, context_instance=RequestContext(request))
def base_prototype(request):
return render_to_response('base/base_prototype.html', {}, context_instance=RequestContext(request))
def base_contact(request):
return render_to_response('base/base_contact.html', {}, context_instance=RequestContext(request))
def ws_demo(request):
return render_to_response('demo.html', {}, context_instance=RequestContext(request))
def toggle_staff_view(request):
data = dict()
if request.method == 'GET':
data['form'] = ToggleStaffForm(instance=request.user)
elif request.method == 'POST':
form = ToggleStaffForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.success(request, "You have successfully edited your staff privileges.")
action.send(request.user, verb='toggled staff mode {boolean}'.format(boolean=form.cleaned_data['is_staff']))
return redirect('/')
return render_to_response('account/toggle_staff.html', data, context_instance=RequestContext(request))
def view_self_activity(request):
"""
Return all the actions that the user performed on the site.
"""
data = dict()
data['activity'] = any_stream(request.user)
return render_to_response('base/activity_stream.html', data, context_instance=RequestContext(request)) | apollo/views.py | from actstream import action
from actstream.models import any_stream
from apollo.forms import ToggleStaffForm
from applications.business.models import Business
from django.contrib import messages
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
def base(request):
if request.user.is_authenticated():
data = dict()
data['businesses'] = Business.objects.filter(businessmembership__user=request.user)
return render_to_response('business/business_home.html', data, context_instance=RequestContext(request))
else:
return base_prototype(request)
def base_idea(request):
return render_to_response('base/base_idea.html', {}, context_instance=RequestContext(request))
def base_prototype(request):
return render_to_response('base/base_prototype.html', {}, context_instance=RequestContext(request))
def base_contact(request):
return render_to_response('base/base_contact.html', {}, context_instance=RequestContext(request))
def ws_demo(request):
return render_to_response('demo.html', {}, context_instance=RequestContext(request))
def toggle_staff_view(request):
data = dict()
if request.method == 'GET':
data['form'] = ToggleStaffForm(instance=request.user)
elif request.method == 'POST':
form = ToggleStaffForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.success(request, "You have successfully edited your staff privileges.")
action.send(request.user, verb='toggled staff mode {boolean}'.format(boolean=form.cleaned_data['is_staff']))
return redirect('/')
return render_to_response('account/toggle_staff.html', data, context_instance=RequestContext(request))
def view_self_activity(request):
"""
Return all the actions that the user performed on the site.
"""
data = dict()
data['activity'] = any_stream(request.user)
return render_to_response('base/activity_stream.html', data, context_instance=RequestContext(request)) | 0.470007 | 0.069668 |
import numpy as np
from skimage.measure import marching_cubes_lewiner
from tqdm import tqdm
import trimesh
import torch
def decode_feature_grid(
nerf,
volume,
weight_mask,
num_hits,
sdf_delta,
min_coords,
max_coords,
volume_resolution,
voxel_size,
step_size=0.25,
batch_size=500,
level=0.,
path=None
):
device = volume.device
occupied_voxels = torch.nonzero(num_hits[0][0]).cpu().numpy()
assert step_size <= 1
all_vertices = []
all_faces = []
last_face_id = 0
min_sdf = []
max_sdf = []
for i in tqdm(range(0, len(occupied_voxels), batch_size)):
origin = occupied_voxels[i:i+batch_size]
n_batches = len(origin)
range_ = np.arange(0, 1+step_size, step_size) - 0.5
spacing = [range_[1] - range_[0]] * 3
voxel_coords = np.stack(
np.meshgrid(range_, range_, range_, indexing="ij"),
axis=-1
)
voxel_coords = np.tile(voxel_coords, (n_batches, 1, 1, 1, 1))
voxel_coords += origin[:, None, None, None, :]
voxel_coords = torch.from_numpy(
voxel_coords).float().to(device)
voxel_pts = voxel_coords * voxel_size + min_coords
H, W, D = voxel_pts.shape[1:4]
voxel_pts = voxel_pts.reshape(1, n_batches, -1, 3)
dirs = torch.zeros_like(voxel_pts)
pts_and_dirs = torch.cat([voxel_pts, dirs], dim=-1)
out, _ = nerf(
pts_and_dirs,
volume,
weight_mask,
sdf_delta,
voxel_size,
volume_resolution,
min_coords,
max_coords,
active_voxels=None,
)
sdf = out[0, :, :, -1].reshape(n_batches, H, W, D)
sdf = sdf.detach().cpu().numpy()
min_sdf.append(np.min(sdf))
max_sdf.append(np.max(sdf))
for j in range(n_batches):
if np.max(sdf[j]) > level and np.min(sdf[j]) < level:
verts, faces, normals, values = \
marching_cubes_lewiner(
sdf[j],
level=level,
spacing=spacing
)
verts += origin[j] - 0.5
all_vertices.append(verts)
all_faces.append(faces + last_face_id)
last_face_id += np.max(faces) + 1
print(np.min(min_sdf))
print(np.max(max_sdf))
if len(all_vertices) == 0:
return None
final_vertices = np.concatenate(all_vertices, axis=0)
final_faces = np.concatenate(all_faces, axis=0)
final_vertices = final_vertices * voxel_size + min_coords.cpu().numpy()
# all_normals = np.concatenate(all_normals, axis=0)
mesh = trimesh.Trimesh(
vertices=final_vertices,
faces=final_faces,
# vertex_normals=all_normals,
process=False
)
if path is None:
return mesh
else:
mesh.export(path)
def get_neighbors(points):
"""
args: voxel_coordinates: [b, n_steps, n_samples, 3]
"""
return torch.stack([
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
], dim=1) | src/models/fusion/utils.py | import numpy as np
from skimage.measure import marching_cubes_lewiner
from tqdm import tqdm
import trimesh
import torch
def decode_feature_grid(
nerf,
volume,
weight_mask,
num_hits,
sdf_delta,
min_coords,
max_coords,
volume_resolution,
voxel_size,
step_size=0.25,
batch_size=500,
level=0.,
path=None
):
device = volume.device
occupied_voxels = torch.nonzero(num_hits[0][0]).cpu().numpy()
assert step_size <= 1
all_vertices = []
all_faces = []
last_face_id = 0
min_sdf = []
max_sdf = []
for i in tqdm(range(0, len(occupied_voxels), batch_size)):
origin = occupied_voxels[i:i+batch_size]
n_batches = len(origin)
range_ = np.arange(0, 1+step_size, step_size) - 0.5
spacing = [range_[1] - range_[0]] * 3
voxel_coords = np.stack(
np.meshgrid(range_, range_, range_, indexing="ij"),
axis=-1
)
voxel_coords = np.tile(voxel_coords, (n_batches, 1, 1, 1, 1))
voxel_coords += origin[:, None, None, None, :]
voxel_coords = torch.from_numpy(
voxel_coords).float().to(device)
voxel_pts = voxel_coords * voxel_size + min_coords
H, W, D = voxel_pts.shape[1:4]
voxel_pts = voxel_pts.reshape(1, n_batches, -1, 3)
dirs = torch.zeros_like(voxel_pts)
pts_and_dirs = torch.cat([voxel_pts, dirs], dim=-1)
out, _ = nerf(
pts_and_dirs,
volume,
weight_mask,
sdf_delta,
voxel_size,
volume_resolution,
min_coords,
max_coords,
active_voxels=None,
)
sdf = out[0, :, :, -1].reshape(n_batches, H, W, D)
sdf = sdf.detach().cpu().numpy()
min_sdf.append(np.min(sdf))
max_sdf.append(np.max(sdf))
for j in range(n_batches):
if np.max(sdf[j]) > level and np.min(sdf[j]) < level:
verts, faces, normals, values = \
marching_cubes_lewiner(
sdf[j],
level=level,
spacing=spacing
)
verts += origin[j] - 0.5
all_vertices.append(verts)
all_faces.append(faces + last_face_id)
last_face_id += np.max(faces) + 1
print(np.min(min_sdf))
print(np.max(max_sdf))
if len(all_vertices) == 0:
return None
final_vertices = np.concatenate(all_vertices, axis=0)
final_faces = np.concatenate(all_faces, axis=0)
final_vertices = final_vertices * voxel_size + min_coords.cpu().numpy()
# all_normals = np.concatenate(all_normals, axis=0)
mesh = trimesh.Trimesh(
vertices=final_vertices,
faces=final_faces,
# vertex_normals=all_normals,
process=False
)
if path is None:
return mesh
else:
mesh.export(path)
def get_neighbors(points):
"""
args: voxel_coordinates: [b, n_steps, n_samples, 3]
"""
return torch.stack([
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.floor(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.floor(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.floor(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
torch.stack(
[
torch.ceil(points[:, :, :, 0]),
torch.ceil(points[:, :, :, 1]),
torch.ceil(points[:, :, :, 2])
],
dim=-1
),
], dim=1) | 0.433981 | 0.417746 |
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from ..models import DatabaseResize
from .factory import DatabaseResizeFactory
class DatabaseResizeTestCase(TestCase):
def setUp(self):
self.database_resize = DatabaseResizeFactory()
def tearDown(self):
self.database_resize.delete()
def test_update_step(self):
self.assertIsNone(self.database_resize.started_at)
self.assertEqual(self.database_resize.status, DatabaseResize.WAITING)
self.assertEqual(self.database_resize.current_step, 0)
self.database_resize.update_step(1)
self.assertIsNotNone(self.database_resize.started_at)
self.assertEqual(self.database_resize.status, DatabaseResize.RUNNING)
self.assertEqual(self.database_resize.current_step, 1)
started_at_first = self.database_resize.started_at
self.database_resize.update_step(2)
self.assertEqual(self.database_resize.started_at, started_at_first)
self.assertEqual(self.database_resize.status, DatabaseResize.RUNNING)
self.assertEqual(self.database_resize.current_step, 2)
def test_status_error(self):
self.assertIsNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.WAITING)
self.database_resize.set_error()
self.assertIsNotNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.ERROR)
def test_status_success(self):
self.assertIsNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.WAITING)
self.database_resize.set_success()
self.assertIsNotNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.SUCCESS)
def test_is_status_error(self):
self.assertFalse(self.database_resize.is_status_error)
self.database_resize.set_error()
self.assertTrue(self.database_resize.is_status_error)
def test_can_do_retry(self):
self.assertTrue(self.database_resize.can_do_retry)
def test_can_do_retry_to_other_database(self):
self.assertTrue(self.database_resize.can_do_retry)
new_resize = DatabaseResizeFactory()
self.assertTrue(new_resize.can_do_retry)
self.assertTrue(self.database_resize.can_do_retry)
def test_cannot_do_retry(self):
self.assertTrue(self.database_resize.can_do_retry)
new_resize = DatabaseResizeFactory(
database=self.database_resize.database,
source_offer=self.database_resize.source_offer
)
self.assertTrue(new_resize.can_do_retry)
old_resize = DatabaseResize.objects.get(id=self.database_resize.id)
self.assertFalse(old_resize.can_do_retry) | dbaas/maintenance/tests/test_database_resize_model.py | from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from ..models import DatabaseResize
from .factory import DatabaseResizeFactory
class DatabaseResizeTestCase(TestCase):
def setUp(self):
self.database_resize = DatabaseResizeFactory()
def tearDown(self):
self.database_resize.delete()
def test_update_step(self):
self.assertIsNone(self.database_resize.started_at)
self.assertEqual(self.database_resize.status, DatabaseResize.WAITING)
self.assertEqual(self.database_resize.current_step, 0)
self.database_resize.update_step(1)
self.assertIsNotNone(self.database_resize.started_at)
self.assertEqual(self.database_resize.status, DatabaseResize.RUNNING)
self.assertEqual(self.database_resize.current_step, 1)
started_at_first = self.database_resize.started_at
self.database_resize.update_step(2)
self.assertEqual(self.database_resize.started_at, started_at_first)
self.assertEqual(self.database_resize.status, DatabaseResize.RUNNING)
self.assertEqual(self.database_resize.current_step, 2)
def test_status_error(self):
self.assertIsNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.WAITING)
self.database_resize.set_error()
self.assertIsNotNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.ERROR)
def test_status_success(self):
self.assertIsNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.WAITING)
self.database_resize.set_success()
self.assertIsNotNone(self.database_resize.finished_at)
self.assertEqual(self.database_resize.status, DatabaseResize.SUCCESS)
def test_is_status_error(self):
self.assertFalse(self.database_resize.is_status_error)
self.database_resize.set_error()
self.assertTrue(self.database_resize.is_status_error)
def test_can_do_retry(self):
self.assertTrue(self.database_resize.can_do_retry)
def test_can_do_retry_to_other_database(self):
self.assertTrue(self.database_resize.can_do_retry)
new_resize = DatabaseResizeFactory()
self.assertTrue(new_resize.can_do_retry)
self.assertTrue(self.database_resize.can_do_retry)
def test_cannot_do_retry(self):
self.assertTrue(self.database_resize.can_do_retry)
new_resize = DatabaseResizeFactory(
database=self.database_resize.database,
source_offer=self.database_resize.source_offer
)
self.assertTrue(new_resize.can_do_retry)
old_resize = DatabaseResize.objects.get(id=self.database_resize.id)
self.assertFalse(old_resize.can_do_retry) | 0.60964 | 0.294133 |
import requests
from bs4 import BeautifulSoup
from proxy import Random_Proxy
def getMovies(query, page, proxie):
moviesDictionary = {
'success': True,
'query': query,
'data': [],
}
proxy = Random_Proxy()
try:
if proxie == 'true':
if page != None:
base_url = f'https://fmovies.to/search?keyword={query}&page={page}'
currentPage = page
r = proxy.Proxy_Request(url=base_url, request_type='get')
soup = BeautifulSoup(r.content, 'lxml')
else:
base_url = f'https://fmovies.to/search?keyword={query}'
currentPage = '1'
r = proxy.Proxy_Request(url=base_url, request_type='get')
soup = BeautifulSoup(r.content, 'lxml')
else:
if page != None:
base_url = f'https://fmovies.to/search?keyword={query}&page={page}'
currentPage = page
soup = BeautifulSoup(requests.get(base_url).content, 'lxml')
else:
base_url = f'https://fmovies.to/search?keyword={query}'
currentPage = '1'
soup = BeautifulSoup(requests.get(base_url).content, 'lxml')
except requests.exceptions.RequestException as e:
moviesDictionary['success'] = False,
moviesDictionary['error'] = str(e),
return moviesDictionary
moviesDictionary['currentPage'] = currentPage
items = soup.find_all('div', class_='item')
for item in items:
try:
a = item.find('a')
href = a.get('href')
link = f'https://fmovies.to{href}'
except Exception as e:
link = str(e)
try:
a = item.find('a')
title = a.get('title')
except Exception as e:
title = str(e)
try:
img = item.find('img')
cover = img['src']
except Exception as e:
cover = str(e)
try:
quality = item.find('div', class_="quality").text
except Exception as e:
quality = str(e)
try:
imdb = item.find('span', class_='imdb').text
except Exception as e:
imdb = str(e)
try:
type = item.find('i', class_='type').text
except Exception as e:
type = str(e)
try:
if(type == 'Movie'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
year = listData[0]
else:
year = 'N/A'
except Exception as e:
year = str(e)
try:
if(type == 'Movie'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
duration = listData[1] + " " + listData[2]
else:
duration = 'N/A'
except Exception as e:
duration = str(e)
try:
if(type == 'TV'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
seasons = listData[1]
else:
seasons = 'N/A'
except Exception as e:
seasons = str(e)
try:
if(type == 'TV'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
episodes = listData[-2]
else:
episodes = 'N/A'
except Exception as e:
episodes = str(e)
moviesObject = {
'link': link,
'cover': cover,
'quality': quality,
'imdb': imdb,
'title': title,
'type': type,
'year': year,
'duration': duration,
'seasons': seasons,
'episodes': episodes
}
moviesDictionary['data'].append(moviesObject)
moviesDictionary['totalPages'] = getPages(soup, query)
return moviesDictionary
def getPages(soup, query):
try:
ul = soup.find('ul', class_='pagination')
li = ul.find_all('li')
except:
pages = '1'
return pages
for l in li:
a = l.find('a', text='»')
if a != None:
href = a['href']
hrefSplit = href.split('page=')
pages = hrefSplit[1]
return pages | search.py | import requests
from bs4 import BeautifulSoup
from proxy import Random_Proxy
def getMovies(query, page, proxie):
moviesDictionary = {
'success': True,
'query': query,
'data': [],
}
proxy = Random_Proxy()
try:
if proxie == 'true':
if page != None:
base_url = f'https://fmovies.to/search?keyword={query}&page={page}'
currentPage = page
r = proxy.Proxy_Request(url=base_url, request_type='get')
soup = BeautifulSoup(r.content, 'lxml')
else:
base_url = f'https://fmovies.to/search?keyword={query}'
currentPage = '1'
r = proxy.Proxy_Request(url=base_url, request_type='get')
soup = BeautifulSoup(r.content, 'lxml')
else:
if page != None:
base_url = f'https://fmovies.to/search?keyword={query}&page={page}'
currentPage = page
soup = BeautifulSoup(requests.get(base_url).content, 'lxml')
else:
base_url = f'https://fmovies.to/search?keyword={query}'
currentPage = '1'
soup = BeautifulSoup(requests.get(base_url).content, 'lxml')
except requests.exceptions.RequestException as e:
moviesDictionary['success'] = False,
moviesDictionary['error'] = str(e),
return moviesDictionary
moviesDictionary['currentPage'] = currentPage
items = soup.find_all('div', class_='item')
for item in items:
try:
a = item.find('a')
href = a.get('href')
link = f'https://fmovies.to{href}'
except Exception as e:
link = str(e)
try:
a = item.find('a')
title = a.get('title')
except Exception as e:
title = str(e)
try:
img = item.find('img')
cover = img['src']
except Exception as e:
cover = str(e)
try:
quality = item.find('div', class_="quality").text
except Exception as e:
quality = str(e)
try:
imdb = item.find('span', class_='imdb').text
except Exception as e:
imdb = str(e)
try:
type = item.find('i', class_='type').text
except Exception as e:
type = str(e)
try:
if(type == 'Movie'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
year = listData[0]
else:
year = 'N/A'
except Exception as e:
year = str(e)
try:
if(type == 'Movie'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
duration = listData[1] + " " + listData[2]
else:
duration = 'N/A'
except Exception as e:
duration = str(e)
try:
if(type == 'TV'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
seasons = listData[1]
else:
seasons = 'N/A'
except Exception as e:
seasons = str(e)
try:
if(type == 'TV'):
rawData = item.find('div', class_='meta').text
listData = rawData.split()
episodes = listData[-2]
else:
episodes = 'N/A'
except Exception as e:
episodes = str(e)
moviesObject = {
'link': link,
'cover': cover,
'quality': quality,
'imdb': imdb,
'title': title,
'type': type,
'year': year,
'duration': duration,
'seasons': seasons,
'episodes': episodes
}
moviesDictionary['data'].append(moviesObject)
moviesDictionary['totalPages'] = getPages(soup, query)
return moviesDictionary
def getPages(soup, query):
try:
ul = soup.find('ul', class_='pagination')
li = ul.find_all('li')
except:
pages = '1'
return pages
for l in li:
a = l.find('a', text='»')
if a != None:
href = a['href']
hrefSplit = href.split('page=')
pages = hrefSplit[1]
return pages | 0.165965 | 0.068133 |
import pickle
import time
import discord
from discord import Game
from discord.ext.commands import Bot
from lstm_network import create
NEURAL_NET = create()
BOT_PREFIX = '!'
# Get at https://discordapp.com/developers/applications/me
TOKEN = open('../Bot/token.txt', 'r').readline().rstrip()
client = Bot(command_prefix=BOT_PREFIX)
MAX_SCORE = 100
WARNING_SCORE = 20
BAN_SCORE = 0
def get_sentiment(sentence):
prediction = NEURAL_NET.predict(sentence)
negative_score = prediction[0]
non_negative_score = prediction[1]
string_format = f'Positive: {non_negative_score}\n' \
f'Negative: {negative_score}\n' \
f'Composite: {non_negative_score - negative_score}'
return non_negative_score - negative_score, string_format
# Class for user info
class DiscordMember:
def __init__(self, uid, last_message_time):
self.id = uid
self.score = MAX_SCORE
self.last_message_time = last_message_time
def __eq__(self, other):
return self.id == other.id
def __str__(self):
return f'ID: {self.id}\n' \
f'Score: {self.score}\n\n'
# Loads data from previous session of bot
try:
member_list = pickle.load(open('users.pickle', 'rb'))
except (OSError, IOError) as e:
member_list = []
pickle.dump(member_list, open('users.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
@client.event
async def on_ready():
await client.change_presence(game=Game(name='positively'))
print(f'Logged in as {client.user.name}\n')
servers = list(client.servers)
for server in servers:
for member in server.members:
temp = DiscordMember(member.id, time.time())
if temp not in member_list:
member_list.append(temp)
for member in member_list:
print(member)
async def list_servers():
await client.wait_until_ready()
print('Current servers:')
for server in client.servers:
print(server.name)
print()
@client.event
async def on_message(message):
await client.process_commands(message)
if message.content and message.content != '!score' and message.author.id != client.user.id:
score_change, string_format = get_sentiment(message.content)
score_change = score_change if score_change + 1 < 0 else 0 # Only count if score sentiment < -1
# print(string_format) # For testing
# Update score
current_time = time.time()
temp = DiscordMember(message.author.id, time.time())
if temp not in member_list:
member_list.append(temp)
for user in member_list:
if user.id == message.author.id:
prev_score = user.score
old_time = user.last_message_time
time_points = (current_time - old_time) / 600
new_score = min(prev_score + time_points, MAX_SCORE) + score_change
user.score = max(new_score, 0)
user.last_message_time = current_time
pickle.dump(member_list, open('users.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
if new_score <= BAN_SCORE:
try:
await client.ban(message.server.get_member(message.author.id), delete_message_days=0)
except discord.errors.Forbidden:
print('Privilege too low')
else:
member_list.remove(temp)
elif new_score <= WARNING_SCORE:
await client.send_message(message.channel,
f'**WARNING <@{<EMAIL>}> your positivity score is very low '
f'({"{0:0.1f}".format(new_score)}/{MAX_SCORE})**'
f'\nYou will be banned if your score reaches {BAN_SCORE}.')
break
@client.command(pass_context=True)
async def score(ctx):
temp = DiscordMember(ctx.message.author.id, time.time())
if temp not in member_list:
member_list.append(temp)
current_time = time.time()
for user in member_list:
if user.id == ctx.message.author.id:
prev_score = user.score
old_time = user.last_message_time
time_points = (current_time - old_time) / 600
user.score = min(prev_score + time_points, MAX_SCORE)
user.last_message_time = current_time
pickle.dump(member_list, open('users.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
await client.send_message(ctx.message.channel,
f'{ctx.message.author}\'s score is '
f'{"{0:0.1f}".format(min(prev_score + time_points, MAX_SCORE))}/{MAX_SCORE}')
if __name__ == '__main__':
client.loop.create_task(list_servers())
client.run(TOKEN) | Bot/bot.py |
import pickle
import time
import discord
from discord import Game
from discord.ext.commands import Bot
from lstm_network import create
NEURAL_NET = create()
BOT_PREFIX = '!'
# Get at https://discordapp.com/developers/applications/me
TOKEN = open('../Bot/token.txt', 'r').readline().rstrip()
client = Bot(command_prefix=BOT_PREFIX)
MAX_SCORE = 100
WARNING_SCORE = 20
BAN_SCORE = 0
def get_sentiment(sentence):
prediction = NEURAL_NET.predict(sentence)
negative_score = prediction[0]
non_negative_score = prediction[1]
string_format = f'Positive: {non_negative_score}\n' \
f'Negative: {negative_score}\n' \
f'Composite: {non_negative_score - negative_score}'
return non_negative_score - negative_score, string_format
# Class for user info
class DiscordMember:
def __init__(self, uid, last_message_time):
self.id = uid
self.score = MAX_SCORE
self.last_message_time = last_message_time
def __eq__(self, other):
return self.id == other.id
def __str__(self):
return f'ID: {self.id}\n' \
f'Score: {self.score}\n\n'
# Loads data from previous session of bot
try:
member_list = pickle.load(open('users.pickle', 'rb'))
except (OSError, IOError) as e:
member_list = []
pickle.dump(member_list, open('users.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
@client.event
async def on_ready():
await client.change_presence(game=Game(name='positively'))
print(f'Logged in as {client.user.name}\n')
servers = list(client.servers)
for server in servers:
for member in server.members:
temp = DiscordMember(member.id, time.time())
if temp not in member_list:
member_list.append(temp)
for member in member_list:
print(member)
async def list_servers():
await client.wait_until_ready()
print('Current servers:')
for server in client.servers:
print(server.name)
print()
@client.event
async def on_message(message):
await client.process_commands(message)
if message.content and message.content != '!score' and message.author.id != client.user.id:
score_change, string_format = get_sentiment(message.content)
score_change = score_change if score_change + 1 < 0 else 0 # Only count if score sentiment < -1
# print(string_format) # For testing
# Update score
current_time = time.time()
temp = DiscordMember(message.author.id, time.time())
if temp not in member_list:
member_list.append(temp)
for user in member_list:
if user.id == message.author.id:
prev_score = user.score
old_time = user.last_message_time
time_points = (current_time - old_time) / 600
new_score = min(prev_score + time_points, MAX_SCORE) + score_change
user.score = max(new_score, 0)
user.last_message_time = current_time
pickle.dump(member_list, open('users.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
if new_score <= BAN_SCORE:
try:
await client.ban(message.server.get_member(message.author.id), delete_message_days=0)
except discord.errors.Forbidden:
print('Privilege too low')
else:
member_list.remove(temp)
elif new_score <= WARNING_SCORE:
await client.send_message(message.channel,
f'**WARNING <@{<EMAIL>}> your positivity score is very low '
f'({"{0:0.1f}".format(new_score)}/{MAX_SCORE})**'
f'\nYou will be banned if your score reaches {BAN_SCORE}.')
break
@client.command(pass_context=True)
async def score(ctx):
temp = DiscordMember(ctx.message.author.id, time.time())
if temp not in member_list:
member_list.append(temp)
current_time = time.time()
for user in member_list:
if user.id == ctx.message.author.id:
prev_score = user.score
old_time = user.last_message_time
time_points = (current_time - old_time) / 600
user.score = min(prev_score + time_points, MAX_SCORE)
user.last_message_time = current_time
pickle.dump(member_list, open('users.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
await client.send_message(ctx.message.channel,
f'{ctx.message.author}\'s score is '
f'{"{0:0.1f}".format(min(prev_score + time_points, MAX_SCORE))}/{MAX_SCORE}')
if __name__ == '__main__':
client.loop.create_task(list_servers())
client.run(TOKEN) | 0.386995 | 0.096621 |
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_dequeue_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
version = kwargs.pop('version', "2018-03-28") # type: str
number_of_messages = kwargs.pop('number_of_messages', None) # type: Optional[int]
visibilitytimeout = kwargs.pop('visibilitytimeout', None) # type: Optional[int]
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if number_of_messages is not None:
_query_parameters['numofmessages'] = _SERIALIZER.query("number_of_messages", number_of_messages, 'int', minimum=1)
if visibilitytimeout is not None:
_query_parameters['visibilitytimeout'] = _SERIALIZER.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_clear_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
version = kwargs.pop('version', "2018-03-28") # type: str
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_enqueue_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
version = kwargs.pop('version', "2018-03-28") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
visibilitytimeout = kwargs.pop('visibilitytimeout', None) # type: Optional[int]
message_time_to_live = kwargs.pop('message_time_to_live', None) # type: Optional[int]
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if visibilitytimeout is not None:
_query_parameters['visibilitytimeout'] = _SERIALIZER.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
if message_time_to_live is not None:
_query_parameters['messagettl'] = _SERIALIZER.query("message_time_to_live", message_time_to_live, 'int', minimum=-1)
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_peek_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
peekonly = kwargs.pop('peekonly', "true") # type: str
version = kwargs.pop('version', "2018-03-28") # type: str
number_of_messages = kwargs.pop('number_of_messages', None) # type: Optional[int]
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['peekonly'] = _SERIALIZER.query("peekonly", peekonly, 'str')
if number_of_messages is not None:
_query_parameters['numofmessages'] = _SERIALIZER.query("number_of_messages", number_of_messages, 'int', minimum=1)
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class MessagesOperations(object):
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.storage.queue.AzureQueueStorage`'s
:attr:`messages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
args = list(args)
self._client = args.pop(0) if args else kwargs.pop("client")
self._config = args.pop(0) if args else kwargs.pop("config")
self._serialize = args.pop(0) if args else kwargs.pop("serializer")
self._deserialize = args.pop(0) if args else kwargs.pop("deserializer")
@distributed_trace
def dequeue(
self,
number_of_messages=None, # type: Optional[int]
visibilitytimeout=None, # type: Optional[int]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.DequeuedMessageItem"]
"""The Dequeue operation retrieves one or more messages from the front of the queue.
:param number_of_messages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible
messages are returned. By default, a single message is retrieved from the queue with this
operation. Default value is None.
:type number_of_messages: int
:param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds,
relative to server time. The default value is 30 seconds. A specified value must be larger than
or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol
versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value
later than the expiry time.
:type visibilitytimeout: int
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of DequeuedMessageItem, or the result of cls(response)
:rtype: list[~azure.storage.queue.models.DequeuedMessageItem]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DequeuedMessageItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_dequeue_request(
url=self._config.url,
version=self._config.version,
number_of_messages=number_of_messages,
visibilitytimeout=visibilitytimeout,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.dequeue.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[DequeuedMessageItem]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
dequeue.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore
@distributed_trace
def clear( # pylint: disable=inconsistent-return-statements
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""The Clear operation deletes all messages from the specified queue.
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_clear_request(
url=self._config.url,
version=self._config.version,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.clear.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
clear.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore
@distributed_trace
def enqueue(
self,
queue_message, # type: "_models.QueueMessage"
visibilitytimeout=None, # type: Optional[int]
message_time_to_live=None, # type: Optional[int]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.EnqueuedMessage"]
"""The Enqueue operation adds a new message to the back of the message queue. A visibility timeout
can also be specified to make the message invisible until the visibility timeout expires. A
message must be in a format that can be included in an XML request with UTF-8 encoding. The
encoded message can be up to 64 KB in size for versions 2011-08-18 and newer, or 8 KB in size
for previous versions.
:param queue_message: A Message object which can be stored in a Queue.
:type queue_message: ~azure.storage.queue.models.QueueMessage
:param visibilitytimeout: Optional. If specified, the request must be made using an
x-ms-version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time. The new value must be larger
than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message
cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value
smaller than the time-to-live value.
:type visibilitytimeout: int
:param message_time_to_live: Optional. Specifies the time-to-live interval for the message, in
seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version
2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1
indicating that the message does not expire. If this parameter is omitted, the default
time-to-live is 7 days. Default value is None.
:type message_time_to_live: int
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of EnqueuedMessage, or the result of cls(response)
:rtype: list[~azure.storage.queue.models.EnqueuedMessage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.EnqueuedMessage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/xml") # type: Optional[str]
_content = self._serialize.body(queue_message, 'QueueMessage', is_xml=True)
request = build_enqueue_request(
url=self._config.url,
version=self._config.version,
content_type=content_type,
content=_content,
visibilitytimeout=visibilitytimeout,
message_time_to_live=message_time_to_live,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.enqueue.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[EnqueuedMessage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
enqueue.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore
@distributed_trace
def peek(
self,
number_of_messages=None, # type: Optional[int]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.PeekedMessageItem"]
"""The Peek operation retrieves one or more messages from the front of the queue, but does not
alter the visibility of the message.
:param number_of_messages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible
messages are returned. By default, a single message is retrieved from the queue with this
operation. Default value is None.
:type number_of_messages: int
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword peekonly: Peek message(s). Default value is "true". Note that overriding this default
value may result in unsupported behavior.
:paramtype peekonly: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PeekedMessageItem, or the result of cls(response)
:rtype: list[~azure.storage.queue.models.PeekedMessageItem]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PeekedMessageItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
peekonly = kwargs.pop('peekonly', "true") # type: str
request = build_peek_request(
url=self._config.url,
peekonly=peekonly,
version=self._config.version,
number_of_messages=number_of_messages,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.peek.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[PeekedMessageItem]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
peek.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore | sdk/storage/azure-storage-queue/azure/storage/queue/_generated/operations/_messages_operations.py | from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_dequeue_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
version = kwargs.pop('version', "2018-03-28") # type: str
number_of_messages = kwargs.pop('number_of_messages', None) # type: Optional[int]
visibilitytimeout = kwargs.pop('visibilitytimeout', None) # type: Optional[int]
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if number_of_messages is not None:
_query_parameters['numofmessages'] = _SERIALIZER.query("number_of_messages", number_of_messages, 'int', minimum=1)
if visibilitytimeout is not None:
_query_parameters['visibilitytimeout'] = _SERIALIZER.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_clear_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
version = kwargs.pop('version', "2018-03-28") # type: str
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_enqueue_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
version = kwargs.pop('version', "2018-03-28") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
visibilitytimeout = kwargs.pop('visibilitytimeout', None) # type: Optional[int]
message_time_to_live = kwargs.pop('message_time_to_live', None) # type: Optional[int]
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if visibilitytimeout is not None:
_query_parameters['visibilitytimeout'] = _SERIALIZER.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
if message_time_to_live is not None:
_query_parameters['messagettl'] = _SERIALIZER.query("message_time_to_live", message_time_to_live, 'int', minimum=-1)
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_peek_request(
url, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
peekonly = kwargs.pop('peekonly', "true") # type: str
version = kwargs.pop('version', "2018-03-28") # type: str
number_of_messages = kwargs.pop('number_of_messages', None) # type: Optional[int]
timeout = kwargs.pop('timeout', None) # type: Optional[int]
request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str]
accept = "application/xml"
# Construct URL
_url = kwargs.pop("template_url", "{url}/{queueName}/messages")
path_format_arguments = {
"url": _SERIALIZER.url("url", url, 'str', skip_quote=True),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['peekonly'] = _SERIALIZER.query("peekonly", peekonly, 'str')
if number_of_messages is not None:
_query_parameters['numofmessages'] = _SERIALIZER.query("number_of_messages", number_of_messages, 'int', minimum=1)
if timeout is not None:
_query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str')
if request_id_parameter is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class MessagesOperations(object):
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.storage.queue.AzureQueueStorage`'s
:attr:`messages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
args = list(args)
self._client = args.pop(0) if args else kwargs.pop("client")
self._config = args.pop(0) if args else kwargs.pop("config")
self._serialize = args.pop(0) if args else kwargs.pop("serializer")
self._deserialize = args.pop(0) if args else kwargs.pop("deserializer")
@distributed_trace
def dequeue(
self,
number_of_messages=None, # type: Optional[int]
visibilitytimeout=None, # type: Optional[int]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.DequeuedMessageItem"]
"""The Dequeue operation retrieves one or more messages from the front of the queue.
:param number_of_messages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible
messages are returned. By default, a single message is retrieved from the queue with this
operation. Default value is None.
:type number_of_messages: int
:param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds,
relative to server time. The default value is 30 seconds. A specified value must be larger than
or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol
versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value
later than the expiry time.
:type visibilitytimeout: int
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of DequeuedMessageItem, or the result of cls(response)
:rtype: list[~azure.storage.queue.models.DequeuedMessageItem]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DequeuedMessageItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_dequeue_request(
url=self._config.url,
version=self._config.version,
number_of_messages=number_of_messages,
visibilitytimeout=visibilitytimeout,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.dequeue.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[DequeuedMessageItem]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
dequeue.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore
@distributed_trace
def clear( # pylint: disable=inconsistent-return-statements
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""The Clear operation deletes all messages from the specified queue.
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_clear_request(
url=self._config.url,
version=self._config.version,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.clear.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
clear.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore
@distributed_trace
def enqueue(
self,
queue_message, # type: "_models.QueueMessage"
visibilitytimeout=None, # type: Optional[int]
message_time_to_live=None, # type: Optional[int]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.EnqueuedMessage"]
"""The Enqueue operation adds a new message to the back of the message queue. A visibility timeout
can also be specified to make the message invisible until the visibility timeout expires. A
message must be in a format that can be included in an XML request with UTF-8 encoding. The
encoded message can be up to 64 KB in size for versions 2011-08-18 and newer, or 8 KB in size
for previous versions.
:param queue_message: A Message object which can be stored in a Queue.
:type queue_message: ~azure.storage.queue.models.QueueMessage
:param visibilitytimeout: Optional. If specified, the request must be made using an
x-ms-version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time. The new value must be larger
than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message
cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value
smaller than the time-to-live value.
:type visibilitytimeout: int
:param message_time_to_live: Optional. Specifies the time-to-live interval for the message, in
seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version
2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1
indicating that the message does not expire. If this parameter is omitted, the default
time-to-live is 7 days. Default value is None.
:type message_time_to_live: int
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of EnqueuedMessage, or the result of cls(response)
:rtype: list[~azure.storage.queue.models.EnqueuedMessage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.EnqueuedMessage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/xml") # type: Optional[str]
_content = self._serialize.body(queue_message, 'QueueMessage', is_xml=True)
request = build_enqueue_request(
url=self._config.url,
version=self._config.version,
content_type=content_type,
content=_content,
visibilitytimeout=visibilitytimeout,
message_time_to_live=message_time_to_live,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.enqueue.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[EnqueuedMessage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
enqueue.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore
@distributed_trace
def peek(
self,
number_of_messages=None, # type: Optional[int]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.PeekedMessageItem"]
"""The Peek operation retrieves one or more messages from the front of the queue, but does not
alter the visibility of the message.
:param number_of_messages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible
messages are returned. By default, a single message is retrieved from the queue with this
operation. Default value is None.
:type number_of_messages: int
:param timeout: The The timeout parameter is expressed in seconds. For more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
Timeouts for Queue Service Operations.</a>. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword peekonly: Peek message(s). Default value is "true". Note that overriding this default
value may result in unsupported behavior.
:paramtype peekonly: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PeekedMessageItem, or the result of cls(response)
:rtype: list[~azure.storage.queue.models.PeekedMessageItem]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PeekedMessageItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
peekonly = kwargs.pop('peekonly', "true") # type: str
request = build_peek_request(
url=self._config.url,
peekonly=peekonly,
version=self._config.version,
number_of_messages=number_of_messages,
timeout=timeout,
request_id_parameter=request_id_parameter,
template_url=self.peek.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[PeekedMessageItem]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
peek.metadata = {'url': "{url}/{queueName}/messages"} # type: ignore | 0.786336 | 0.073364 |
import builtins
import os
from os.path import join
import sys
import time
import argparse
import random
import pdb
import json
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
from PIL import Image
from PIL import ImageFilter
from simreg import SimReg
from dataloader import get_train_loader
from tools import adjust_learning_rate, AverageMeterv2 as AverageMeter, subset_classes, get_logger
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('data', type=str, help='path to dataset')
parser.add_argument('--dataset', type=str, default='imagenet',
choices=['imagenet', 'imagenet100'],
help='use full or subset of the dataset')
parser.add_argument('--base_dir', default='./',
help='experiment root directory')
parser.add_argument('--exp', default='./outputs',
help='experiment root directory')
parser.add_argument('--debug', action='store_true',
help='whether in debug mode or not')
parser.add_argument('--print_freq', type=int, default=100,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=10,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=24,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=130,
help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.05,
help='learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--sgd_momentum', type=float, default=0.9,
help='SGD momentum')
# model definition
parser.add_argument('--arch_teacher', type=str, default='resnet50',
choices=['resnet50', 'byol_resnet50', 'resnet50x4', 'sup_resnet50'])
parser.add_argument('--arch_student', type=str, default='resnet50',
choices=['resnet18', 'resnet50', 'mobilenet', 'byol_resnet50'])
parser.add_argument('--n_mlp_layers', type=int, default=4,
help='number of layers in prediction MLP head')
parser.add_argument('--linear_pred', action='store_true',
help='use linear prediction layer for student')
parser.add_argument('--use_cache', action='store_true',
help='use cached features for teacher instead of loading network')
parser.add_argument('--teacher_fc', action='store_true',
help='use pretrained projection head for teacher')
# Augmentations
parser.add_argument('--single_aug', action='store_true',
help='use single augmentation (same aug for both nets)')
parser.add_argument('--weak_strong', action='store_true',
help='whether to strong/strong or weak/strong augmentation')
parser.add_argument('--weak_weak', action='store_true',
help='whether to use weak/weak augmentation')
parser.add_argument('--mse_nonorm', action='store_true',
help='calculate mse loss from un-normalized vectors')
# Load model
parser.add_argument('--weights', type=str,
help='path to weights file to initialize the student model from')
parser.add_argument('--teacher_weights', type=str,
help='path to weights(trained model) file to initialize the teacher model from')
parser.add_argument('--teacher_feats', type=str,
help='path to stored teacher training features, used instead of loading weights')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--restart', action='store_true',
help='restart training using ckpt - do not load optim parameters')
opt = parser.parse_args()
return opt
def main():
args = parse_option()
save_dir = join(args.base_dir, 'exp')
args.ckpt_dir = join(save_dir, args.exp, 'checkpoints')
args.logs_dir = join(save_dir, args.exp, 'logs')
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
if not os.path.exists(args.logs_dir):
os.makedirs(args.logs_dir)
args_file = join(args.logs_dir, 'train_args.json')
s = '*' * 50
with open(args_file, 'a') as f:
json.dump(s, f)
json.dump(vars(args), f, indent=4)
if not args.debug:
os.environ['PYTHONBREAKPOINT'] = '0'
logger = get_logger(
logpath=os.path.join(args.ckpt_dir, 'logs'),
filepath=os.path.abspath(__file__)
)
def print_pass(*arg):
logger.info(*arg)
builtins.print = print_pass
print(args)
train_loader = get_train_loader(args)
simreg = SimReg(
args,
args.arch_teacher,
args.arch_student,
args.teacher_weights,
)
simreg.data_parallel()
simreg = simreg.cuda()
print(simreg)
params = [p for p in simreg.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params,
lr=args.learning_rate,
momentum=args.sgd_momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
args.start_epoch = 1
if args.weights:
print('==> load weights from checkpoint: {}'.format(args.weights))
ckpt = torch.load(args.weights)
print('==> resume from epoch: {}'.format(ckpt['epoch']))
if 'model' in ckpt:
sd = ckpt['model']
else:
sd = ckpt['state_dict']
msg = simreg.load_state_dict(sd, strict=False)
optimizer.load_state_dict(ckpt['optimizer'])
args.start_epoch = ckpt['epoch'] + 1
print(msg)
if args.resume:
print('==> resume from checkpoint: {}'.format(args.resume))
ckpt = torch.load(args.resume)
print('==> resume from epoch: {}'.format(ckpt['epoch']))
msg = simreg.load_state_dict(ckpt['state_dict'], strict=True)
print(msg)
if not args.restart:
optimizer.load_state_dict(ckpt['optimizer'])
args.start_epoch = ckpt['epoch'] + 1
# routine
if args.use_cache:
print('Using cached features!!!')
time0 = time.time()
for epoch in range(args.start_epoch, args.epochs + 1):
print(args.exp)
adjust_learning_rate(epoch, args, optimizer)
print("==> training...")
time1 = time.time()
train(epoch, train_loader, simreg, optimizer, args)
time2 = time.time()
print('epoch {}, epoch time {:.2f}, total time {:.2f}'.format(epoch, (time2 - time1)/60.,
(time2 - time0)/(60*60.)))
# saving the model
if epoch % args.save_freq == 0:
print('==> Saving...')
state = {
'opt': args,
'state_dict': simreg.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
}
save_file = os.path.join(args.ckpt_dir, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
torch.save(state, save_file)
# help release GPU memory
del state
torch.cuda.empty_cache()
def train(epoch, train_loader, simreg, optimizer, opt):
"""
one epoch training for SimReg
"""
simreg.train()
if not opt.use_cache:
simreg.encoder_t.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
end = time.time()
for idx, (indices, names, (im_q, im_t), labels) in enumerate(train_loader):
data_time.update(time.time() - end)
im_q = im_q.cuda(non_blocking=True)
im_t = im_t.cuda(non_blocking=True)
# ===================forward=====================
loss = simreg(im_q=im_q, im_t=im_t, names=names)
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
loss_meter.update(loss.item(), im_q.size(0))
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time,
loss=loss_meter))
sys.stdout.flush()
sys.stdout.flush()
return loss_meter.avg
if __name__ == '__main__':
main() | main.py | import builtins
import os
from os.path import join
import sys
import time
import argparse
import random
import pdb
import json
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
from PIL import Image
from PIL import ImageFilter
from simreg import SimReg
from dataloader import get_train_loader
from tools import adjust_learning_rate, AverageMeterv2 as AverageMeter, subset_classes, get_logger
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('data', type=str, help='path to dataset')
parser.add_argument('--dataset', type=str, default='imagenet',
choices=['imagenet', 'imagenet100'],
help='use full or subset of the dataset')
parser.add_argument('--base_dir', default='./',
help='experiment root directory')
parser.add_argument('--exp', default='./outputs',
help='experiment root directory')
parser.add_argument('--debug', action='store_true',
help='whether in debug mode or not')
parser.add_argument('--print_freq', type=int, default=100,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=10,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=24,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=130,
help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.05,
help='learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--sgd_momentum', type=float, default=0.9,
help='SGD momentum')
# model definition
parser.add_argument('--arch_teacher', type=str, default='resnet50',
choices=['resnet50', 'byol_resnet50', 'resnet50x4', 'sup_resnet50'])
parser.add_argument('--arch_student', type=str, default='resnet50',
choices=['resnet18', 'resnet50', 'mobilenet', 'byol_resnet50'])
parser.add_argument('--n_mlp_layers', type=int, default=4,
help='number of layers in prediction MLP head')
parser.add_argument('--linear_pred', action='store_true',
help='use linear prediction layer for student')
parser.add_argument('--use_cache', action='store_true',
help='use cached features for teacher instead of loading network')
parser.add_argument('--teacher_fc', action='store_true',
help='use pretrained projection head for teacher')
# Augmentations
parser.add_argument('--single_aug', action='store_true',
help='use single augmentation (same aug for both nets)')
parser.add_argument('--weak_strong', action='store_true',
help='whether to strong/strong or weak/strong augmentation')
parser.add_argument('--weak_weak', action='store_true',
help='whether to use weak/weak augmentation')
parser.add_argument('--mse_nonorm', action='store_true',
help='calculate mse loss from un-normalized vectors')
# Load model
parser.add_argument('--weights', type=str,
help='path to weights file to initialize the student model from')
parser.add_argument('--teacher_weights', type=str,
help='path to weights(trained model) file to initialize the teacher model from')
parser.add_argument('--teacher_feats', type=str,
help='path to stored teacher training features, used instead of loading weights')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--restart', action='store_true',
help='restart training using ckpt - do not load optim parameters')
opt = parser.parse_args()
return opt
def main():
args = parse_option()
save_dir = join(args.base_dir, 'exp')
args.ckpt_dir = join(save_dir, args.exp, 'checkpoints')
args.logs_dir = join(save_dir, args.exp, 'logs')
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
if not os.path.exists(args.logs_dir):
os.makedirs(args.logs_dir)
args_file = join(args.logs_dir, 'train_args.json')
s = '*' * 50
with open(args_file, 'a') as f:
json.dump(s, f)
json.dump(vars(args), f, indent=4)
if not args.debug:
os.environ['PYTHONBREAKPOINT'] = '0'
logger = get_logger(
logpath=os.path.join(args.ckpt_dir, 'logs'),
filepath=os.path.abspath(__file__)
)
def print_pass(*arg):
logger.info(*arg)
builtins.print = print_pass
print(args)
train_loader = get_train_loader(args)
simreg = SimReg(
args,
args.arch_teacher,
args.arch_student,
args.teacher_weights,
)
simreg.data_parallel()
simreg = simreg.cuda()
print(simreg)
params = [p for p in simreg.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params,
lr=args.learning_rate,
momentum=args.sgd_momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
args.start_epoch = 1
if args.weights:
print('==> load weights from checkpoint: {}'.format(args.weights))
ckpt = torch.load(args.weights)
print('==> resume from epoch: {}'.format(ckpt['epoch']))
if 'model' in ckpt:
sd = ckpt['model']
else:
sd = ckpt['state_dict']
msg = simreg.load_state_dict(sd, strict=False)
optimizer.load_state_dict(ckpt['optimizer'])
args.start_epoch = ckpt['epoch'] + 1
print(msg)
if args.resume:
print('==> resume from checkpoint: {}'.format(args.resume))
ckpt = torch.load(args.resume)
print('==> resume from epoch: {}'.format(ckpt['epoch']))
msg = simreg.load_state_dict(ckpt['state_dict'], strict=True)
print(msg)
if not args.restart:
optimizer.load_state_dict(ckpt['optimizer'])
args.start_epoch = ckpt['epoch'] + 1
# routine
if args.use_cache:
print('Using cached features!!!')
time0 = time.time()
for epoch in range(args.start_epoch, args.epochs + 1):
print(args.exp)
adjust_learning_rate(epoch, args, optimizer)
print("==> training...")
time1 = time.time()
train(epoch, train_loader, simreg, optimizer, args)
time2 = time.time()
print('epoch {}, epoch time {:.2f}, total time {:.2f}'.format(epoch, (time2 - time1)/60.,
(time2 - time0)/(60*60.)))
# saving the model
if epoch % args.save_freq == 0:
print('==> Saving...')
state = {
'opt': args,
'state_dict': simreg.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
}
save_file = os.path.join(args.ckpt_dir, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
torch.save(state, save_file)
# help release GPU memory
del state
torch.cuda.empty_cache()
def train(epoch, train_loader, simreg, optimizer, opt):
"""
one epoch training for SimReg
"""
simreg.train()
if not opt.use_cache:
simreg.encoder_t.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
end = time.time()
for idx, (indices, names, (im_q, im_t), labels) in enumerate(train_loader):
data_time.update(time.time() - end)
im_q = im_q.cuda(non_blocking=True)
im_t = im_t.cuda(non_blocking=True)
# ===================forward=====================
loss = simreg(im_q=im_q, im_t=im_t, names=names)
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
loss_meter.update(loss.item(), im_q.size(0))
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time,
loss=loss_meter))
sys.stdout.flush()
sys.stdout.flush()
return loss_meter.avg
if __name__ == '__main__':
main() | 0.608361 | 0.070752 |
import re
import subprocess
import os.path
def extractFileFromIncludes( include ):
return re.match('#include \"(.+)\"(.*)\n', include).group(1)
def extractFilesFromIncludes( fileContent ):
res = []
for line in fileContent:
if line.startswith('#include \"'):
res.append(extractFileFromIncludes(line))
return res
def getIncludeFiles( proxyHeader ):
dir_ = os.path.dirname(proxyHeader)
with open(proxyHeader,'r') as f:
headers = extractFilesFromIncludes(f.readlines())
return [dir_ + '/' + header for header in headers]
def headerContent( fileContent, ns ):
includes = []
dst = []
bracketLevel = 0
for line in fileContent:
bracketLevel += line.count('{') - line.count('}')
if line.startswith('#pragma once'):
assert bracketLevel==0
continue
if line.startswith('#include \"'):
assert bracketLevel==0
continue
if line.startswith('#include <'):
assert bracketLevel==0
includes.append(line)
continue
if line.startswith('namespace ' + ns):
assert bracketLevel==1
continue
if line.startswith('}') and bracketLevel == 0: # end of ns scope
continue
if line =='\n' and bracketLevel <= 1:
continue
dst.append(line)
dst.append('\n')
assert bracketLevel==0
return includes,dst
def headerContents( headers, ns ):
inc = []
dst_ = []
for headerFile in headers:
with open(headerFile) as f:
i, d = headerContent(f.readlines(), ns)
inc.extend(i)
dst_.extend(d)
return inc, dst_
def collapsNamespace( ns, lines ):
return re.sub('\n}[\n| ].*\n*namespace ' + ns + ' {.*\n', '', ''.join(lines))
def assembleHeader(includes, body, ns) :
r = ''
r = r + '#pragma once\n'
r = r + '\n'
r = r + includes
r = r + '\n'
r = r + 'namespace ' + ns + ' {\n'
r = r + '\n'
r = r + body
r = r + '\n'
r = r + '} // namespace ' + ns + '\n'
r = r + '\n'
return r
def writeHeader(singleHeaderTarget, includes, codeJoined, ns):
with open(singleHeaderTarget,'w') as f:
f.write(assembleHeader(''.join(includes),codeJoined,ns))
LLVM = 'LLVM'
Google = 'Google'
Chromium = 'Chromium'
Mozilla = 'Mozilla'
WebKit = 'WebKit'
def clang_format_inplace(file, style):
subprocess.check_output( ['clang-format-5.0', '-i', '-style=' + style, file] ) | devel/tools/makeSingleHeaderHelpers.py | import re
import subprocess
import os.path
def extractFileFromIncludes( include ):
return re.match('#include \"(.+)\"(.*)\n', include).group(1)
def extractFilesFromIncludes( fileContent ):
res = []
for line in fileContent:
if line.startswith('#include \"'):
res.append(extractFileFromIncludes(line))
return res
def getIncludeFiles( proxyHeader ):
dir_ = os.path.dirname(proxyHeader)
with open(proxyHeader,'r') as f:
headers = extractFilesFromIncludes(f.readlines())
return [dir_ + '/' + header for header in headers]
def headerContent( fileContent, ns ):
includes = []
dst = []
bracketLevel = 0
for line in fileContent:
bracketLevel += line.count('{') - line.count('}')
if line.startswith('#pragma once'):
assert bracketLevel==0
continue
if line.startswith('#include \"'):
assert bracketLevel==0
continue
if line.startswith('#include <'):
assert bracketLevel==0
includes.append(line)
continue
if line.startswith('namespace ' + ns):
assert bracketLevel==1
continue
if line.startswith('}') and bracketLevel == 0: # end of ns scope
continue
if line =='\n' and bracketLevel <= 1:
continue
dst.append(line)
dst.append('\n')
assert bracketLevel==0
return includes,dst
def headerContents( headers, ns ):
inc = []
dst_ = []
for headerFile in headers:
with open(headerFile) as f:
i, d = headerContent(f.readlines(), ns)
inc.extend(i)
dst_.extend(d)
return inc, dst_
def collapsNamespace( ns, lines ):
return re.sub('\n}[\n| ].*\n*namespace ' + ns + ' {.*\n', '', ''.join(lines))
def assembleHeader(includes, body, ns) :
r = ''
r = r + '#pragma once\n'
r = r + '\n'
r = r + includes
r = r + '\n'
r = r + 'namespace ' + ns + ' {\n'
r = r + '\n'
r = r + body
r = r + '\n'
r = r + '} // namespace ' + ns + '\n'
r = r + '\n'
return r
def writeHeader(singleHeaderTarget, includes, codeJoined, ns):
with open(singleHeaderTarget,'w') as f:
f.write(assembleHeader(''.join(includes),codeJoined,ns))
LLVM = 'LLVM'
Google = 'Google'
Chromium = 'Chromium'
Mozilla = 'Mozilla'
WebKit = 'WebKit'
def clang_format_inplace(file, style):
subprocess.check_output( ['clang-format-5.0', '-i', '-style=' + style, file] ) | 0.103601 | 0.177098 |
from import_export import resources
from import_export.fields import Field
from import_export.admin import ImportExportModelAdmin
from apps.import_excel.models import PartsAuthority,Shopify
class PartsAuthorityResource(resources.ModelResource):
class Meta:
model = PartsAuthority
class ShopifyResource(resources.ModelResource):
handle = Field(attribute='handle', column_name='Handle')
bodyHTML = Field(attribute='bodyHTML', column_name='Body (HTML)')
title = Field(attribute='title', column_name='Title')
vendor = Field(attribute='vendor',column_name="Vendor")
standard_product_type = Field(attribute='standard_product_type',column_name="Standard Product Type")
custom_product_type = Field(attribute='custom_product_type',column_name="Custom Product Type")
tags = Field(attribute='tags',column_name="Tags")
published = Field(attribute='published',column_name="Published")
option1_name = Field(attribute='option1_name',column_name="Option 1 Name")
option1_value = Field(attribute='option1_value',column_name="Option 1 Value")
option2_name = Field(attribute='option2_name',column_name="Option 2 Name")
option2_value = Field(attribute='option2_value',column_name="Option 2 Value")
option3_name = Field(attribute='option3_name',column_name="Option 3 Name")
option3_value = Field(attribute='option3_value',column_name="Option 3 Value")
variant_sku = Field(attribute='variant_sku',column_name="Variant SKU")
variant_grams = Field(attribute='variant_grams',column_name="Variant Grams")
variant_inventory_tracker = Field(attribute='variant_inventory_tracker',column_name="Variant Inventory Tracker")
variant_inventory_qty = Field(attribute='variant_inventory_qty',column_name="Variant Inventory Qty")
variant_inventory_policy = Field(attribute='variant_inventory_policy',column_name="Variant Inventory Policy")
variant_fulfillment_service = Field(attribute='variant_fulfillment_service',column_name="Variant Fulfillment Service")
variant_price = Field(attribute='variant_price',column_name="Variant Price")
variant_compare_at_price = Field(attribute='variant_compare_at_price',column_name="Variant Compare At Price")
variant_requires_shipping = Field(attribute='variant_requires_shipping',column_name="Variant Requires Shipping")
variant_taxable = Field(attribute='variant_taxable',column_name="Variant Taxable")
variant_barcode = Field(attribute='variant_barcode',column_name="Variant Barcode")
image_src = Field(attribute='image_src',column_name="Image Src")
image_position = Field(attribute='image_position',column_name="Image Position")
image_alt_text = Field(attribute='image_alt_text',column_name="Image Alt Text")
gift_card = Field(attribute='gift_card',column_name="Gift Card")
seo_title = Field(attribute='seo_title',column_name="SEO Title")
seo_description = Field(attribute='seo_description',column_name="SEO Description")
google_shopping_google_product_category = Field(attribute='google_shopping_google_product_category',column_name="Google Shopping / Google Product Category")
google_shopping_gender = Field(attribute='google_shopping_gender',column_name="Google Shopping / Gender")
google_shopping_age_group = Field(attribute='google_shopping_age_group',column_name="Google Shopping Age Group")
google_shopping_MPN = Field(attribute='google_shopping_MPN',column_name="Google Shopping MPN")
google_shopping_adWords_grouping = Field(attribute='google_shopping_adWords_grouping',column_name="Google Shopping AdWords Grouping")
google_shopping_adWords_labels = Field(attribute='google_shopping_adWords_labels',column_name="Google Shopping AdWords Labels")
google_shopping_condition = Field(attribute='google_shopping_condition',column_name="Google Shopping Condition")
google_shopping_custom_product = Field(attribute='google_shopping_custom_product',column_name="Google Shopping Custom Product")
google_shopping_custom_label_0 = Field(attribute='google_shopping_custom_label_0',column_name="Google Shopping Custom Label 0")
google_shopping_custom_label_1 = Field(attribute='google_shopping_custom_label_1',column_name="Google Shopping Custom Label 1")
google_shopping_custom_label_2 = Field(attribute='google_shopping_custom_label_2',column_name="Google Shopping Custom Label 2")
google_shopping_custom_label_3 = Field(attribute='google_shopping_custom_label_3',column_name="Google Shopping Custom Label 3")
google_shopping_custom_label_4 = Field(attribute='google_shopping_custom_label_4',column_name="Google Shopping Custom Label 4")
variant_image = Field(attribute='variant_image',column_name="Variant Image")
variant_weight_unit = Field(attribute='variant_weight_unit',column_name="Variant Weight Unit")
variant_tax_code = Field(attribute='variant_tax_code',column_name="Variant Tax Code")
cost_per_item = Field(attribute='cost_per_item',column_name="Cost per item")
status = Field(attribute='status',column_name="Status")
class Meta:
model = Shopify
export_order = (
'handle',
)
exclude=('id','date') | apps/import_excel/resources.py |
from import_export import resources
from import_export.fields import Field
from import_export.admin import ImportExportModelAdmin
from apps.import_excel.models import PartsAuthority,Shopify
class PartsAuthorityResource(resources.ModelResource):
class Meta:
model = PartsAuthority
class ShopifyResource(resources.ModelResource):
handle = Field(attribute='handle', column_name='Handle')
bodyHTML = Field(attribute='bodyHTML', column_name='Body (HTML)')
title = Field(attribute='title', column_name='Title')
vendor = Field(attribute='vendor',column_name="Vendor")
standard_product_type = Field(attribute='standard_product_type',column_name="Standard Product Type")
custom_product_type = Field(attribute='custom_product_type',column_name="Custom Product Type")
tags = Field(attribute='tags',column_name="Tags")
published = Field(attribute='published',column_name="Published")
option1_name = Field(attribute='option1_name',column_name="Option 1 Name")
option1_value = Field(attribute='option1_value',column_name="Option 1 Value")
option2_name = Field(attribute='option2_name',column_name="Option 2 Name")
option2_value = Field(attribute='option2_value',column_name="Option 2 Value")
option3_name = Field(attribute='option3_name',column_name="Option 3 Name")
option3_value = Field(attribute='option3_value',column_name="Option 3 Value")
variant_sku = Field(attribute='variant_sku',column_name="Variant SKU")
variant_grams = Field(attribute='variant_grams',column_name="Variant Grams")
variant_inventory_tracker = Field(attribute='variant_inventory_tracker',column_name="Variant Inventory Tracker")
variant_inventory_qty = Field(attribute='variant_inventory_qty',column_name="Variant Inventory Qty")
variant_inventory_policy = Field(attribute='variant_inventory_policy',column_name="Variant Inventory Policy")
variant_fulfillment_service = Field(attribute='variant_fulfillment_service',column_name="Variant Fulfillment Service")
variant_price = Field(attribute='variant_price',column_name="Variant Price")
variant_compare_at_price = Field(attribute='variant_compare_at_price',column_name="Variant Compare At Price")
variant_requires_shipping = Field(attribute='variant_requires_shipping',column_name="Variant Requires Shipping")
variant_taxable = Field(attribute='variant_taxable',column_name="Variant Taxable")
variant_barcode = Field(attribute='variant_barcode',column_name="Variant Barcode")
image_src = Field(attribute='image_src',column_name="Image Src")
image_position = Field(attribute='image_position',column_name="Image Position")
image_alt_text = Field(attribute='image_alt_text',column_name="Image Alt Text")
gift_card = Field(attribute='gift_card',column_name="Gift Card")
seo_title = Field(attribute='seo_title',column_name="SEO Title")
seo_description = Field(attribute='seo_description',column_name="SEO Description")
google_shopping_google_product_category = Field(attribute='google_shopping_google_product_category',column_name="Google Shopping / Google Product Category")
google_shopping_gender = Field(attribute='google_shopping_gender',column_name="Google Shopping / Gender")
google_shopping_age_group = Field(attribute='google_shopping_age_group',column_name="Google Shopping Age Group")
google_shopping_MPN = Field(attribute='google_shopping_MPN',column_name="Google Shopping MPN")
google_shopping_adWords_grouping = Field(attribute='google_shopping_adWords_grouping',column_name="Google Shopping AdWords Grouping")
google_shopping_adWords_labels = Field(attribute='google_shopping_adWords_labels',column_name="Google Shopping AdWords Labels")
google_shopping_condition = Field(attribute='google_shopping_condition',column_name="Google Shopping Condition")
google_shopping_custom_product = Field(attribute='google_shopping_custom_product',column_name="Google Shopping Custom Product")
google_shopping_custom_label_0 = Field(attribute='google_shopping_custom_label_0',column_name="Google Shopping Custom Label 0")
google_shopping_custom_label_1 = Field(attribute='google_shopping_custom_label_1',column_name="Google Shopping Custom Label 1")
google_shopping_custom_label_2 = Field(attribute='google_shopping_custom_label_2',column_name="Google Shopping Custom Label 2")
google_shopping_custom_label_3 = Field(attribute='google_shopping_custom_label_3',column_name="Google Shopping Custom Label 3")
google_shopping_custom_label_4 = Field(attribute='google_shopping_custom_label_4',column_name="Google Shopping Custom Label 4")
variant_image = Field(attribute='variant_image',column_name="Variant Image")
variant_weight_unit = Field(attribute='variant_weight_unit',column_name="Variant Weight Unit")
variant_tax_code = Field(attribute='variant_tax_code',column_name="Variant Tax Code")
cost_per_item = Field(attribute='cost_per_item',column_name="Cost per item")
status = Field(attribute='status',column_name="Status")
class Meta:
model = Shopify
export_order = (
'handle',
)
exclude=('id','date') | 0.482673 | 0.098469 |
from ConversionUtil import wrapClass
from RegisterContext import registerContext
from pyspark.sql import DataFrame,SQLContext
class CaffeOnSpark:
"""CaffeOnSpark is the main class for distributed deep learning.
It will launch multiple Caffe cores within Spark executors, and conduct coordinated learning from HDFS datasets.
:ivar SparkContext, SQLContext: The spark and sql context of the current spark session
"""
def __init__(self,sc):
registerContext(sc)
spark_major_version = int(sc.version.split('.')[0])
if spark_major_version >= 2:
wrapClass("org.apache.spark.sql.Dataset")
else:
wrapClass("org.apache.spark.sql.DataFrame")
self.__dict__['caffeonspark']=wrapClass("com.yahoo.ml.caffe.CaffeOnSpark")
self.__dict__['cos']=self.__dict__.get('caffeonspark')(sc)
self.__dict__['sqlcontext']=SQLContext(sc,self.__dict__['cos'].sqlContext)
def train(self,train_source):
"""Training with a specific data source
:param DataSource: the source for training data
"""
self.__dict__.get('cos').train(train_source)
def test(self,test_source):
"""Test with a specific data source.
:param DataSource: the source for the test data
"""
return self.__dict__.get('cos').test(test_source)
def features(self,source):
"""Extract features from a specific data source.
:param DataSource: the features to extract
"""
extracted_df = self.__dict__.get('cos').features(source)
extracted_pydf = DataFrame(extracted_df.javaInstance,self.__dict__.get('sqlcontext'))
return extracted_pydf
def trainWithValidation(self,train_source, validation_source):
"""Training with interleaved validation
:param DataSource: source for training data
:param DataSource: source for validation data
"""
validation_df = self.__dict__.get('cos').trainWithValidation(train_source, validation_source)
validation_pydf = DataFrame(validation_df.javaInstance,self.__dict__.get('sqlcontext'))
return validation_pydf | caffe-grid/src/main/python/com/yahoo/ml/caffe/CaffeOnSpark.py | from ConversionUtil import wrapClass
from RegisterContext import registerContext
from pyspark.sql import DataFrame,SQLContext
class CaffeOnSpark:
"""CaffeOnSpark is the main class for distributed deep learning.
It will launch multiple Caffe cores within Spark executors, and conduct coordinated learning from HDFS datasets.
:ivar SparkContext, SQLContext: The spark and sql context of the current spark session
"""
def __init__(self,sc):
registerContext(sc)
spark_major_version = int(sc.version.split('.')[0])
if spark_major_version >= 2:
wrapClass("org.apache.spark.sql.Dataset")
else:
wrapClass("org.apache.spark.sql.DataFrame")
self.__dict__['caffeonspark']=wrapClass("com.yahoo.ml.caffe.CaffeOnSpark")
self.__dict__['cos']=self.__dict__.get('caffeonspark')(sc)
self.__dict__['sqlcontext']=SQLContext(sc,self.__dict__['cos'].sqlContext)
def train(self,train_source):
"""Training with a specific data source
:param DataSource: the source for training data
"""
self.__dict__.get('cos').train(train_source)
def test(self,test_source):
"""Test with a specific data source.
:param DataSource: the source for the test data
"""
return self.__dict__.get('cos').test(test_source)
def features(self,source):
"""Extract features from a specific data source.
:param DataSource: the features to extract
"""
extracted_df = self.__dict__.get('cos').features(source)
extracted_pydf = DataFrame(extracted_df.javaInstance,self.__dict__.get('sqlcontext'))
return extracted_pydf
def trainWithValidation(self,train_source, validation_source):
"""Training with interleaved validation
:param DataSource: source for training data
:param DataSource: source for validation data
"""
validation_df = self.__dict__.get('cos').trainWithValidation(train_source, validation_source)
validation_pydf = DataFrame(validation_df.javaInstance,self.__dict__.get('sqlcontext'))
return validation_pydf | 0.830388 | 0.394318 |
from __future__ import print_function #pylint bug workaround
import argparse
import os
import numpy as np
import pandas
import obj_tools
import neuralnets.grammar as grammar
SMILES_COL_NAME = "structure"
MAX_WORD_LENGTH = 120
ITERATIONS = 2
def get_arguments():
parser = argparse.ArgumentParser(description="Wavefront .obj shape sampling and string conversion")
parser.add_argument("in_folder", type=str, help="The folder containing the input .obj files.")
parser.add_argument("out_filepath", type=str, help="The output file path in HDF5 format.")
parser.add_argument("out_grammarpath", type=str, help="The tiling grammar export path in HDF5 format.")
parser.add_argument('--num_iterations', type=int, metavar='N', default=ITERATIONS, help="Number of iterations for creating random variations out of pairs of objects in the input folder.")
parser.add_argument("--smiles_column", type=str, default = SMILES_COL_NAME, help="Name of the column that contains the SMILES strings. Default: %s" % SMILES_COL_NAME)
parser.add_argument('--fix_variations', dest='fix_variations', action='store_true',
help='Try to fix local part orientations and remove variations if attempt fails.')
return parser.parse_args()
def process_folder(folder_name, file_list = []):
for item_name in os.listdir(folder_name):
subfolfer_name = os.path.join(folder_name, item_name)
if os.path.isdir(subfolfer_name):
process_folder(subfolfer_name, file_list)
if not item_name.endswith("_coll_graph.obj") and item_name.endswith(".obj"):
file_list.append(folder_name + "/" + item_name)
def augment_folder(file_list=[], word_list=[]):
for item_id in range(len(file_list) - 1):
item_name_1 = file_list[item_id]
sample_id = np.random.randint(item_id, len(file_list))
item_name_2 = file_list[sample_id]
current_str = obj_tools.create_variations(item_name_1, item_name_2)
current_words = current_str.split("\n")
for w in current_words:
word_list.append(str(w))
#if(len(str(w)) <= MAX_WORD_LENGTH and len(str(w)) > 0):
#word_list.append(str(w))
def fix_variations(folder_name, exclude_file_list, inputA, inputB):
for item_name in os.listdir(folder_name):
subfolfer_name = os.path.join(folder_name, item_name)
if os.path.isdir(subfolfer_name):
fix_variations(subfolfer_name, exclude_file_list, inputA, inputB)
if not item_name.endswith("_coll_graph.obj") and item_name.endswith(".obj"):
file_path = folder_name + "/" + item_name
if file_path != inputA and file_path != inputB and file_path not in exclude_file_list:
fixed = obj_tools.fix_variation(inputA, inputB, file_path, file_path)
if fixed != 0:
fixed = obj_tools.fix_variation(inputA, inputB, file_path, file_path)
if fixed != 0:
os.remove(file_path)
base_path, extension = os.path.splitext(file_path)
os.remove(base_path + ".mtl")
def remove_duplicates(tile_grammar, folder_name, inputA, inputB, word_list = []):
current_words = []
for old_str in word_list:
current_words.append(old_str)
for item_name in os.listdir(folder_name):
subfolfer_name = os.path.join(folder_name, item_name)
if os.path.isdir(subfolfer_name):
remove_duplicates(tile_grammar, subfolfer_name, inputA, inputB, word_list)
file_path = folder_name + "/" + item_name
if file_path != inputA and file_path != inputB and not item_name.endswith("_coll_graph.obj") and item_name.endswith(".obj"):
current_str = obj_tools.obj2string(file_path)
base_path, extension = os.path.splitext(file_path)
os.remove(base_path + "_coll_graph.obj")
os.remove(base_path + "_coll_graph.mtl")
if len(current_str) > 8 * MAX_WORD_LENGTH or not tile_grammar.check_word(current_str):
os.remove(file_path)
os.remove(base_path + ".mtl")
continue
current_words.append(current_str)
for i in range(len(current_words) - 1):
if tile_grammar.similar_words(current_words[i], current_str):
os.remove(file_path)
os.remove(base_path + ".mtl")
current_words.pop()
break
def main():
args = get_arguments()
initial_file_list = []
process_folder(args.in_folder, initial_file_list)
if len(initial_file_list) == 0:
print("Did not find a valid input file in " + args.in_folder)
exit()
if len(initial_file_list) == 1:
initial_file_list.append(initial_file_list[0])
else:
initial_file_list = sorted(initial_file_list)
inputA = initial_file_list[0]
inputB = initial_file_list[len(initial_file_list) - 1]
initial_smiles_strings = []
initial_smiles_strings.append(str(obj_tools.obj2string(inputA)))
initial_smiles_strings.append(str(obj_tools.obj2string(inputB)))
tile_grammar = grammar.TilingGrammar(initial_smiles_strings)
print("max # neighbors: " + str(tile_grammar.max_degree()))
tile_grammar.store(args.out_grammarpath)
if args.fix_variations:
print("fixing variations...")
fix_variations(args.in_folder, [], inputA, inputB)
print("removing duplicates...")
remove_duplicates(tile_grammar, args.in_folder, inputA, inputB, initial_smiles_strings)
smiles_strings = []
for i in range(args.num_iterations):
current_file_list = []
process_folder(args.in_folder, current_file_list)
print("Current # of variations: " + str(len(current_file_list)))
if len(current_file_list) == 1:
current_file_list.append(current_file_list[0])
augment_folder(current_file_list, smiles_strings)
smiles_strings = list(set(smiles_strings))
if args.fix_variations:
print("fixing variations...")
fix_variations(args.in_folder, current_file_list, inputA, inputB)
print("removing duplicates...")
remove_duplicates(tile_grammar, args.in_folder, inputA, inputB, initial_smiles_strings)
print("Iteration " + str(i) + " # of strings: " + str(len(smiles_strings)))
loaded_grammar = grammar.TilingGrammar([])
loaded_grammar.load(args.out_grammarpath)
valid_strings = []
for w in smiles_strings:
if(loaded_grammar.check_word(w) == True):
if len(str(w)) > 0 :
valid_strings.append(w)
print("# valid strings: " + str(len(valid_strings)))
df = pandas.DataFrame({args.smiles_column : valid_strings})
df.to_hdf(args.out_filepath, "table", format = "table", data_columns = True)
if __name__ == "__main__":
main() | augment_dataset.py | from __future__ import print_function #pylint bug workaround
import argparse
import os
import numpy as np
import pandas
import obj_tools
import neuralnets.grammar as grammar
SMILES_COL_NAME = "structure"
MAX_WORD_LENGTH = 120
ITERATIONS = 2
def get_arguments():
parser = argparse.ArgumentParser(description="Wavefront .obj shape sampling and string conversion")
parser.add_argument("in_folder", type=str, help="The folder containing the input .obj files.")
parser.add_argument("out_filepath", type=str, help="The output file path in HDF5 format.")
parser.add_argument("out_grammarpath", type=str, help="The tiling grammar export path in HDF5 format.")
parser.add_argument('--num_iterations', type=int, metavar='N', default=ITERATIONS, help="Number of iterations for creating random variations out of pairs of objects in the input folder.")
parser.add_argument("--smiles_column", type=str, default = SMILES_COL_NAME, help="Name of the column that contains the SMILES strings. Default: %s" % SMILES_COL_NAME)
parser.add_argument('--fix_variations', dest='fix_variations', action='store_true',
help='Try to fix local part orientations and remove variations if attempt fails.')
return parser.parse_args()
def process_folder(folder_name, file_list = []):
for item_name in os.listdir(folder_name):
subfolfer_name = os.path.join(folder_name, item_name)
if os.path.isdir(subfolfer_name):
process_folder(subfolfer_name, file_list)
if not item_name.endswith("_coll_graph.obj") and item_name.endswith(".obj"):
file_list.append(folder_name + "/" + item_name)
def augment_folder(file_list=[], word_list=[]):
for item_id in range(len(file_list) - 1):
item_name_1 = file_list[item_id]
sample_id = np.random.randint(item_id, len(file_list))
item_name_2 = file_list[sample_id]
current_str = obj_tools.create_variations(item_name_1, item_name_2)
current_words = current_str.split("\n")
for w in current_words:
word_list.append(str(w))
#if(len(str(w)) <= MAX_WORD_LENGTH and len(str(w)) > 0):
#word_list.append(str(w))
def fix_variations(folder_name, exclude_file_list, inputA, inputB):
for item_name in os.listdir(folder_name):
subfolfer_name = os.path.join(folder_name, item_name)
if os.path.isdir(subfolfer_name):
fix_variations(subfolfer_name, exclude_file_list, inputA, inputB)
if not item_name.endswith("_coll_graph.obj") and item_name.endswith(".obj"):
file_path = folder_name + "/" + item_name
if file_path != inputA and file_path != inputB and file_path not in exclude_file_list:
fixed = obj_tools.fix_variation(inputA, inputB, file_path, file_path)
if fixed != 0:
fixed = obj_tools.fix_variation(inputA, inputB, file_path, file_path)
if fixed != 0:
os.remove(file_path)
base_path, extension = os.path.splitext(file_path)
os.remove(base_path + ".mtl")
def remove_duplicates(tile_grammar, folder_name, inputA, inputB, word_list = []):
current_words = []
for old_str in word_list:
current_words.append(old_str)
for item_name in os.listdir(folder_name):
subfolfer_name = os.path.join(folder_name, item_name)
if os.path.isdir(subfolfer_name):
remove_duplicates(tile_grammar, subfolfer_name, inputA, inputB, word_list)
file_path = folder_name + "/" + item_name
if file_path != inputA and file_path != inputB and not item_name.endswith("_coll_graph.obj") and item_name.endswith(".obj"):
current_str = obj_tools.obj2string(file_path)
base_path, extension = os.path.splitext(file_path)
os.remove(base_path + "_coll_graph.obj")
os.remove(base_path + "_coll_graph.mtl")
if len(current_str) > 8 * MAX_WORD_LENGTH or not tile_grammar.check_word(current_str):
os.remove(file_path)
os.remove(base_path + ".mtl")
continue
current_words.append(current_str)
for i in range(len(current_words) - 1):
if tile_grammar.similar_words(current_words[i], current_str):
os.remove(file_path)
os.remove(base_path + ".mtl")
current_words.pop()
break
def main():
args = get_arguments()
initial_file_list = []
process_folder(args.in_folder, initial_file_list)
if len(initial_file_list) == 0:
print("Did not find a valid input file in " + args.in_folder)
exit()
if len(initial_file_list) == 1:
initial_file_list.append(initial_file_list[0])
else:
initial_file_list = sorted(initial_file_list)
inputA = initial_file_list[0]
inputB = initial_file_list[len(initial_file_list) - 1]
initial_smiles_strings = []
initial_smiles_strings.append(str(obj_tools.obj2string(inputA)))
initial_smiles_strings.append(str(obj_tools.obj2string(inputB)))
tile_grammar = grammar.TilingGrammar(initial_smiles_strings)
print("max # neighbors: " + str(tile_grammar.max_degree()))
tile_grammar.store(args.out_grammarpath)
if args.fix_variations:
print("fixing variations...")
fix_variations(args.in_folder, [], inputA, inputB)
print("removing duplicates...")
remove_duplicates(tile_grammar, args.in_folder, inputA, inputB, initial_smiles_strings)
smiles_strings = []
for i in range(args.num_iterations):
current_file_list = []
process_folder(args.in_folder, current_file_list)
print("Current # of variations: " + str(len(current_file_list)))
if len(current_file_list) == 1:
current_file_list.append(current_file_list[0])
augment_folder(current_file_list, smiles_strings)
smiles_strings = list(set(smiles_strings))
if args.fix_variations:
print("fixing variations...")
fix_variations(args.in_folder, current_file_list, inputA, inputB)
print("removing duplicates...")
remove_duplicates(tile_grammar, args.in_folder, inputA, inputB, initial_smiles_strings)
print("Iteration " + str(i) + " # of strings: " + str(len(smiles_strings)))
loaded_grammar = grammar.TilingGrammar([])
loaded_grammar.load(args.out_grammarpath)
valid_strings = []
for w in smiles_strings:
if(loaded_grammar.check_word(w) == True):
if len(str(w)) > 0 :
valid_strings.append(w)
print("# valid strings: " + str(len(valid_strings)))
df = pandas.DataFrame({args.smiles_column : valid_strings})
df.to_hdf(args.out_filepath, "table", format = "table", data_columns = True)
if __name__ == "__main__":
main() | 0.228587 | 0.084455 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import re
import six
import json
import requests
import jsonpointer
from . import config
from . import exceptions
# Get descriptor base path
def get_descriptor_base_path(descriptor):
"""Get descriptor base path if string or return None.
"""
# Infer from path/url
if isinstance(descriptor, six.string_types):
if os.path.exists(descriptor):
base_path = os.path.dirname(os.path.abspath(descriptor))
else:
# suppose descriptor is a URL
base_path = os.path.dirname(descriptor)
# Current dir by default
else:
base_path = '.'
return base_path
# Retrieve descriptor
def retrieve_descriptor(descriptor):
"""Retrieve descriptor.
"""
the_descriptor = descriptor
if the_descriptor is None:
the_descriptor = {}
if isinstance(the_descriptor, six.string_types):
try:
if os.path.isfile(the_descriptor):
with open(the_descriptor, 'r') as f:
the_descriptor = json.load(f)
else:
req = requests.get(the_descriptor)
req.raise_for_status()
# Force UTF8 encoding for 'text/plain' sources
req.encoding = 'utf8'
the_descriptor = req.json()
except (IOError, requests.exceptions.RequestException) as error:
message = 'Unable to load JSON at "%s"' % descriptor
six.raise_from(exceptions.DataPackageException(message), error)
except ValueError as error:
# Python2 doesn't have json.JSONDecodeError (use ValueErorr)
message = 'Unable to parse JSON at "%s". %s' % (descriptor, error)
six.raise_from(exceptions.DataPackageException(message), error)
if hasattr(the_descriptor, 'read'):
try:
the_descriptor = json.load(the_descriptor)
except ValueError as e:
six.raise_from(exceptions.DataPackageException(str(e)), e)
if not isinstance(the_descriptor, dict):
msg = 'Data must be a \'dict\', but was a \'{0}\''
raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__))
return the_descriptor
# Dereference descriptor
def dereference_package_descriptor(descriptor, base_path):
"""Dereference data package descriptor (IN-PLACE FOR NOW).
"""
for resource in descriptor.get('resources', []):
dereference_resource_descriptor(resource, base_path, descriptor)
return descriptor
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None):
"""Dereference resource descriptor (IN-PLACE FOR NOW).
"""
PROPERTIES = ['schema', 'dialect']
if base_descriptor is None:
base_descriptor = descriptor
for property in PROPERTIES:
value = descriptor.get(property)
# URI -> No
if not isinstance(value, six.string_types):
continue
# URI -> Pointer
if value.startswith('#'):
try:
pointer = jsonpointer.JsonPointer(value[1:])
descriptor[property] = pointer.resolve(base_descriptor)
except Exception as error:
message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Remote
elif base_path.startswith('http') or value.startswith('http'):
try:
fullpath = value
if not value.startswith('http'):
fullpath = os.path.join(base_path, value)
response = requests.get(fullpath)
response.raise_for_status()
descriptor[property] = response.json()
except Exception as error:
message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Local
else:
if not is_safe_path(value):
raise exceptions.DataPackageException(
'Not safe path in Local URI "%s" '
'for resource.%s' % (value, property))
if not base_path:
raise exceptions.DataPackageException(
'Local URI "%s" requires base path '
'for resource.%s' % (value, property))
fullpath = os.path.join(base_path, value)
try:
with io.open(fullpath, encoding='utf-8') as file:
descriptor[property] = json.load(file)
except Exception as error:
message = 'Not resolved Local URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
return descriptor
# Expand descriptor
def expand_package_descriptor(descriptor):
"""Apply defaults to data package descriptor (IN-PLACE FOR NOW).
"""
descriptor.setdefault('profile', config.DEFAULT_DATA_PACKAGE_PROFILE)
for resource in descriptor.get('resources', []):
expand_resource_descriptor(resource)
return descriptor
def expand_resource_descriptor(descriptor):
"""Apply defaults to resource descriptor (IN-PLACE FOR NOW).
"""
descriptor.setdefault('profile', config.DEFAULT_RESOURCE_PROFILE)
if descriptor['profile'] == 'tabular-data-resource':
# Schema
schema = descriptor.get('schema')
if schema is not None:
for field in schema.get('fields', []):
field.setdefault('type', config.DEFAULT_FIELD_TYPE)
field.setdefault('format', config.DEFAULT_FIELD_FORMAT)
schema.setdefault('missingValues', config.DEFAULT_MISSING_VALUES)
# Dialect
dialect = descriptor.get('dialect')
if dialect is not None:
for key, value in config.DEFAULT_DIALECT.items():
dialect.setdefault(key, value)
return descriptor
# Miscellaneous
def ensure_dir(path):
"""Ensure directory exists.
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
def is_safe_path(path):
"""Check if path is safe and allowed.
"""
contains_windows_var = lambda val: re.match(r'%.+%', val)
contains_posix_var = lambda val: re.match(r'\$.+', val)
unsafeness_conditions = [
os.path.isabs(path),
('..%s' % os.path.sep) in path,
path.startswith('~'),
os.path.expandvars(path) != path,
contains_windows_var(path),
contains_posix_var(path),
]
return not any(unsafeness_conditions)
def extract_sha256_hash(hash):
"""Extrach SHA256 hash or return None
"""
prefix = 'sha256:'
if hash and hash.startswith(prefix):
return hash.replace(prefix, '')
return None | datapackage/helpers.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import re
import six
import json
import requests
import jsonpointer
from . import config
from . import exceptions
# Get descriptor base path
def get_descriptor_base_path(descriptor):
"""Get descriptor base path if string or return None.
"""
# Infer from path/url
if isinstance(descriptor, six.string_types):
if os.path.exists(descriptor):
base_path = os.path.dirname(os.path.abspath(descriptor))
else:
# suppose descriptor is a URL
base_path = os.path.dirname(descriptor)
# Current dir by default
else:
base_path = '.'
return base_path
# Retrieve descriptor
def retrieve_descriptor(descriptor):
"""Retrieve descriptor.
"""
the_descriptor = descriptor
if the_descriptor is None:
the_descriptor = {}
if isinstance(the_descriptor, six.string_types):
try:
if os.path.isfile(the_descriptor):
with open(the_descriptor, 'r') as f:
the_descriptor = json.load(f)
else:
req = requests.get(the_descriptor)
req.raise_for_status()
# Force UTF8 encoding for 'text/plain' sources
req.encoding = 'utf8'
the_descriptor = req.json()
except (IOError, requests.exceptions.RequestException) as error:
message = 'Unable to load JSON at "%s"' % descriptor
six.raise_from(exceptions.DataPackageException(message), error)
except ValueError as error:
# Python2 doesn't have json.JSONDecodeError (use ValueErorr)
message = 'Unable to parse JSON at "%s". %s' % (descriptor, error)
six.raise_from(exceptions.DataPackageException(message), error)
if hasattr(the_descriptor, 'read'):
try:
the_descriptor = json.load(the_descriptor)
except ValueError as e:
six.raise_from(exceptions.DataPackageException(str(e)), e)
if not isinstance(the_descriptor, dict):
msg = 'Data must be a \'dict\', but was a \'{0}\''
raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__))
return the_descriptor
# Dereference descriptor
def dereference_package_descriptor(descriptor, base_path):
"""Dereference data package descriptor (IN-PLACE FOR NOW).
"""
for resource in descriptor.get('resources', []):
dereference_resource_descriptor(resource, base_path, descriptor)
return descriptor
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None):
"""Dereference resource descriptor (IN-PLACE FOR NOW).
"""
PROPERTIES = ['schema', 'dialect']
if base_descriptor is None:
base_descriptor = descriptor
for property in PROPERTIES:
value = descriptor.get(property)
# URI -> No
if not isinstance(value, six.string_types):
continue
# URI -> Pointer
if value.startswith('#'):
try:
pointer = jsonpointer.JsonPointer(value[1:])
descriptor[property] = pointer.resolve(base_descriptor)
except Exception as error:
message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Remote
elif base_path.startswith('http') or value.startswith('http'):
try:
fullpath = value
if not value.startswith('http'):
fullpath = os.path.join(base_path, value)
response = requests.get(fullpath)
response.raise_for_status()
descriptor[property] = response.json()
except Exception as error:
message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Local
else:
if not is_safe_path(value):
raise exceptions.DataPackageException(
'Not safe path in Local URI "%s" '
'for resource.%s' % (value, property))
if not base_path:
raise exceptions.DataPackageException(
'Local URI "%s" requires base path '
'for resource.%s' % (value, property))
fullpath = os.path.join(base_path, value)
try:
with io.open(fullpath, encoding='utf-8') as file:
descriptor[property] = json.load(file)
except Exception as error:
message = 'Not resolved Local URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
return descriptor
# Expand descriptor
def expand_package_descriptor(descriptor):
"""Apply defaults to data package descriptor (IN-PLACE FOR NOW).
"""
descriptor.setdefault('profile', config.DEFAULT_DATA_PACKAGE_PROFILE)
for resource in descriptor.get('resources', []):
expand_resource_descriptor(resource)
return descriptor
def expand_resource_descriptor(descriptor):
"""Apply defaults to resource descriptor (IN-PLACE FOR NOW).
"""
descriptor.setdefault('profile', config.DEFAULT_RESOURCE_PROFILE)
if descriptor['profile'] == 'tabular-data-resource':
# Schema
schema = descriptor.get('schema')
if schema is not None:
for field in schema.get('fields', []):
field.setdefault('type', config.DEFAULT_FIELD_TYPE)
field.setdefault('format', config.DEFAULT_FIELD_FORMAT)
schema.setdefault('missingValues', config.DEFAULT_MISSING_VALUES)
# Dialect
dialect = descriptor.get('dialect')
if dialect is not None:
for key, value in config.DEFAULT_DIALECT.items():
dialect.setdefault(key, value)
return descriptor
# Miscellaneous
def ensure_dir(path):
"""Ensure directory exists.
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
def is_safe_path(path):
"""Check if path is safe and allowed.
"""
contains_windows_var = lambda val: re.match(r'%.+%', val)
contains_posix_var = lambda val: re.match(r'\$.+', val)
unsafeness_conditions = [
os.path.isabs(path),
('..%s' % os.path.sep) in path,
path.startswith('~'),
os.path.expandvars(path) != path,
contains_windows_var(path),
contains_posix_var(path),
]
return not any(unsafeness_conditions)
def extract_sha256_hash(hash):
"""Extrach SHA256 hash or return None
"""
prefix = 'sha256:'
if hash and hash.startswith(prefix):
return hash.replace(prefix, '')
return None | 0.436502 | 0.050941 |
import pprint
from nose.tools import eq_
from .. import doi
from ...identifier import Identifier
INPUT_TEXT = """
This is a doi randomly placed in the text 10.0000/m1
Here's a typo that might be construed as a doi 10.60 people were there.
{{cite|...|doi=10.0000/m2|pmid=10559875}}
<ref><NAME>., <NAME>., <NAME>., & <NAME>. (2012).
The rise and decline of an open collaboration system: How Wikipedia’s
reaction to popularity is causing its decline.
American Behavioral Scientist,
0002764212469365 doi: 10.1177/0002764212469365</ref>. Hats pants and banana
[http://dx.doi.org/10.1170/foo<bar>(herp)derp]
[http://dx.doi.org/10.1170/foo<bar>(herp)derp[waffles]]
{{cite|...|doi=10.1098/rspb.2008.1131|issue=1656}}
http://www.google.com/sky/#latitude=3.362&longitude=160.1238441&zoom=
10.2387/234310.2347/39423
<!--
10.2387/234310.2347/39423-->
"""
EXPECTED = [
Identifier('doi', "10.0000/m1"),
Identifier('doi', "10.0000/m2"),
Identifier('doi', "10.1177/0002764212469365"),
Identifier('doi', "10.1170/foo<bar>(herp)derp"),
Identifier('doi', "10.1170/foo<bar>(herp)derp[waffles]"),
Identifier('doi', "10.1098/rspb.2008.1131"),
Identifier('doi', "10.2387/234310.2347/39423"),
Identifier('doi', "10.2387/234310.2347/39423")
]
"""
def test_extract_regex():
ids = list(doi.extract_regex(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
def test_extract_mwp():
ids = list(doi.extract_mwp(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
"""
def test_extract():
ids = list(doi.extract(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
def test_extract_island():
ids = list(doi.extract_island(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
def test_extract_search():
ids = list(doi.extract_search(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
#pprint.pprint(list(doi.tokenize_finditer(INPUT_TEXT)))
eq_(ids, EXPECTED) | mwcites/extractors/tests/test_doi.py | import pprint
from nose.tools import eq_
from .. import doi
from ...identifier import Identifier
INPUT_TEXT = """
This is a doi randomly placed in the text 10.0000/m1
Here's a typo that might be construed as a doi 10.60 people were there.
{{cite|...|doi=10.0000/m2|pmid=10559875}}
<ref><NAME>., <NAME>., <NAME>., & <NAME>. (2012).
The rise and decline of an open collaboration system: How Wikipedia’s
reaction to popularity is causing its decline.
American Behavioral Scientist,
0002764212469365 doi: 10.1177/0002764212469365</ref>. Hats pants and banana
[http://dx.doi.org/10.1170/foo<bar>(herp)derp]
[http://dx.doi.org/10.1170/foo<bar>(herp)derp[waffles]]
{{cite|...|doi=10.1098/rspb.2008.1131|issue=1656}}
http://www.google.com/sky/#latitude=3.362&longitude=160.1238441&zoom=
10.2387/234310.2347/39423
<!--
10.2387/234310.2347/39423-->
"""
EXPECTED = [
Identifier('doi', "10.0000/m1"),
Identifier('doi', "10.0000/m2"),
Identifier('doi', "10.1177/0002764212469365"),
Identifier('doi', "10.1170/foo<bar>(herp)derp"),
Identifier('doi', "10.1170/foo<bar>(herp)derp[waffles]"),
Identifier('doi', "10.1098/rspb.2008.1131"),
Identifier('doi', "10.2387/234310.2347/39423"),
Identifier('doi', "10.2387/234310.2347/39423")
]
"""
def test_extract_regex():
ids = list(doi.extract_regex(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
def test_extract_mwp():
ids = list(doi.extract_mwp(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
"""
def test_extract():
ids = list(doi.extract(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
def test_extract_island():
ids = list(doi.extract_island(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
def test_extract_search():
ids = list(doi.extract_search(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
#pprint.pprint(list(doi.tokenize_finditer(INPUT_TEXT)))
eq_(ids, EXPECTED) | 0.47317 | 0.36139 |
import os
import cv2
import math
import numpy as np
from typing import Optional, Tuple
__all__ = ['HeadPoseEstimator']
class HeadPoseEstimator(object):
def __init__(self, mean_shape_path: str = os.path.join(os.path.dirname(__file__),
'data', 'bfm_lms.npy')) -> None:
# Load the 68-point mean shape derived from BFM
mean_shape = np.load(mean_shape_path)
# Calculate the 5-points mean shape
left_eye = mean_shape[[37, 38, 40, 41]].mean(axis=0)
right_eye = mean_shape[[43, 44, 46, 47]].mean(axis=0)
self._mean_shape_5pts = np.vstack((left_eye, right_eye, mean_shape[[30, 48, 54]]))
# Flip the y coordinates of the mean shape to match that of the image coordinate system
self._mean_shape_5pts[:, 1] = -self._mean_shape_5pts[:, 1]
def __call__(self, landmarks: np.ndarray, image_width: int = 0, image_height: int = 0,
camera_matrix: Optional[np.ndarray] = None, dist_coeffs: Optional[np.ndarray] = None,
output_preference: int = 0) -> Tuple[float, float, float]:
# Form the camera matrix
if camera_matrix is None:
if image_width <= 0 or image_height <= 0:
raise ValueError(
'image_width and image_height must be specified when camera_matrix is not given directly')
else:
camera_matrix = np.array([[image_width + image_height, 0, image_width / 2.0],
[0, image_width + image_height, image_height / 2.0],
[0, 0, 1]], dtype=float)
# Prepare the landmarks
if landmarks.shape[0] == 68:
landmarks = landmarks[17:]
if landmarks.shape[0] in [49, 51]:
left_eye = landmarks[[20, 21, 23, 24]].mean(axis=0)
right_eye = landmarks[[26, 27, 29, 30]].mean(axis=0)
landmarks = np.vstack((left_eye, right_eye, landmarks[[13, 31, 37]]))
# Use EPnP to estimate pitch, yaw, and roll
_, rvec, _ = cv2.solvePnP(self._mean_shape_5pts, np.expand_dims(landmarks, axis=1),
camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
rot_mat, _ = cv2.Rodrigues(rvec)
if 1.0 + rot_mat[2, 0] < 1e-9:
pitch = 0.0
yaw = 90.0
roll = -math.atan2(rot_mat[0, 1], rot_mat[0, 2]) / math.pi * 180.0
elif 1.0 - rot_mat[2, 0] < 1e-9:
pitch = 0.0
yaw = -90.0
roll = math.atan2(-rot_mat[0, 1], -rot_mat[0, 2]) / math.pi * 180.0
else:
pitch = math.atan2(rot_mat[2, 1], rot_mat[2, 2]) / math.pi * 180.0
yaw = -math.asin(rot_mat[2, 0]) / math.pi * 180.0
roll = math.atan2(rot_mat[1, 0], rot_mat[0, 0]) / math.pi * 180.0
# Respond to output_preference:
# output_preference == 1: limit pitch to the range of -90.0 ~ 90.0
# output_preference == 2: limit yaw to the range of -90.0 ~ 90.0 (already satisfied)
# output_preference == 3: limit roll to the range of -90.0 ~ 90.0
# otherwise: minimise total rotation, min(abs(pitch) + abs(yaw) + abs(roll))
if output_preference != 2:
alt_pitch = pitch - 180.0 if pitch > 0.0 else pitch + 180.0
alt_yaw = -180.0 - yaw if yaw < 0.0 else 180.0 - yaw
alt_roll = roll - 180.0 if roll > 0.0 else roll + 180.0
if (output_preference == 1 and -90.0 < alt_pitch < 90.0 or
output_preference == 3 and -90.0 < alt_roll < 90.0 or
output_preference not in (1, 2, 3) and
abs(alt_pitch) + abs(alt_yaw) + abs(alt_roll) < abs(pitch) + abs(yaw) + abs(roll)):
pitch, yaw, roll = alt_pitch, alt_yaw, alt_roll
return -pitch, yaw, roll | ibug/face_detection/utils/head_pose_estimator.py | import os
import cv2
import math
import numpy as np
from typing import Optional, Tuple
__all__ = ['HeadPoseEstimator']
class HeadPoseEstimator(object):
def __init__(self, mean_shape_path: str = os.path.join(os.path.dirname(__file__),
'data', 'bfm_lms.npy')) -> None:
# Load the 68-point mean shape derived from BFM
mean_shape = np.load(mean_shape_path)
# Calculate the 5-points mean shape
left_eye = mean_shape[[37, 38, 40, 41]].mean(axis=0)
right_eye = mean_shape[[43, 44, 46, 47]].mean(axis=0)
self._mean_shape_5pts = np.vstack((left_eye, right_eye, mean_shape[[30, 48, 54]]))
# Flip the y coordinates of the mean shape to match that of the image coordinate system
self._mean_shape_5pts[:, 1] = -self._mean_shape_5pts[:, 1]
def __call__(self, landmarks: np.ndarray, image_width: int = 0, image_height: int = 0,
camera_matrix: Optional[np.ndarray] = None, dist_coeffs: Optional[np.ndarray] = None,
output_preference: int = 0) -> Tuple[float, float, float]:
# Form the camera matrix
if camera_matrix is None:
if image_width <= 0 or image_height <= 0:
raise ValueError(
'image_width and image_height must be specified when camera_matrix is not given directly')
else:
camera_matrix = np.array([[image_width + image_height, 0, image_width / 2.0],
[0, image_width + image_height, image_height / 2.0],
[0, 0, 1]], dtype=float)
# Prepare the landmarks
if landmarks.shape[0] == 68:
landmarks = landmarks[17:]
if landmarks.shape[0] in [49, 51]:
left_eye = landmarks[[20, 21, 23, 24]].mean(axis=0)
right_eye = landmarks[[26, 27, 29, 30]].mean(axis=0)
landmarks = np.vstack((left_eye, right_eye, landmarks[[13, 31, 37]]))
# Use EPnP to estimate pitch, yaw, and roll
_, rvec, _ = cv2.solvePnP(self._mean_shape_5pts, np.expand_dims(landmarks, axis=1),
camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
rot_mat, _ = cv2.Rodrigues(rvec)
if 1.0 + rot_mat[2, 0] < 1e-9:
pitch = 0.0
yaw = 90.0
roll = -math.atan2(rot_mat[0, 1], rot_mat[0, 2]) / math.pi * 180.0
elif 1.0 - rot_mat[2, 0] < 1e-9:
pitch = 0.0
yaw = -90.0
roll = math.atan2(-rot_mat[0, 1], -rot_mat[0, 2]) / math.pi * 180.0
else:
pitch = math.atan2(rot_mat[2, 1], rot_mat[2, 2]) / math.pi * 180.0
yaw = -math.asin(rot_mat[2, 0]) / math.pi * 180.0
roll = math.atan2(rot_mat[1, 0], rot_mat[0, 0]) / math.pi * 180.0
# Respond to output_preference:
# output_preference == 1: limit pitch to the range of -90.0 ~ 90.0
# output_preference == 2: limit yaw to the range of -90.0 ~ 90.0 (already satisfied)
# output_preference == 3: limit roll to the range of -90.0 ~ 90.0
# otherwise: minimise total rotation, min(abs(pitch) + abs(yaw) + abs(roll))
if output_preference != 2:
alt_pitch = pitch - 180.0 if pitch > 0.0 else pitch + 180.0
alt_yaw = -180.0 - yaw if yaw < 0.0 else 180.0 - yaw
alt_roll = roll - 180.0 if roll > 0.0 else roll + 180.0
if (output_preference == 1 and -90.0 < alt_pitch < 90.0 or
output_preference == 3 and -90.0 < alt_roll < 90.0 or
output_preference not in (1, 2, 3) and
abs(alt_pitch) + abs(alt_yaw) + abs(alt_roll) < abs(pitch) + abs(yaw) + abs(roll)):
pitch, yaw, roll = alt_pitch, alt_yaw, alt_roll
return -pitch, yaw, roll | 0.871721 | 0.505554 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import glob
import random
import collections
import math
from helper import *
from layer import *
from consts import *
Examples = collections.namedtuple("Examples", "paths, inputs, targets, labels, count, steps_per_epoch")
Examples_inf = collections.namedtuple("Examples_inf",
"paths, inputs, targets, labels, labels2, weight, count, steps_per_epoch")
def load_examples(input_dir, mode, lab_colorization,
which_direction, flip, scale_size, batch_size, png16bits, scop_name, mix_weight=False,
style_ref=False):
""" Based on https://github.com/eric-guerin/pix2pix-tensorflow/blob/png16bits-support/pix2pix.py,
see LICENSE file."""
if input_dir is None or not os.path.exists(input_dir):
raise Exception("input_dir does not exist")
input_paths = glob.glob(os.path.join(input_dir, "*.jpg"))
decode = tf.image.decode_jpeg
if len(input_paths) == 0:
input_paths = glob.glob(os.path.join(input_dir, "*.png"))
decode = tf.image.decode_png
if len(input_paths) == 0:
raise Exception("input_dir contains no image files")
def get_name(path):
name, _ = os.path.splitext(os.path.basename(path))
return name
# If the image names are numbers, sort by the value rather than asciibetically
# having sorted inputs means that the outputs are sorted in test mode.
if all(get_name(path).isdigit() for path in input_paths):
input_paths = sorted(input_paths, key=lambda path: int(get_name(path)))
else:
input_paths = sorted(input_paths)
input_paths_t = tf.convert_to_tensor(input_paths, dtype=tf.string)
if not style_ref:
input_labels = [int(path.split('_')[-1][:-4]) for path in input_paths]
else:
input_labels = [0 for _ in input_paths]
input_labels_t = tf.convert_to_tensor(input_labels, dtype=tf.int32)
if mix_weight:
input_labels2 = [int(path.split('_')[-2]) for path in input_paths]
input_weight = [float(path.split('_')[-3]) for path in input_paths]
input_labels2_t = tf.convert_to_tensor(input_labels2, dtype=tf.int32)
input_weight_t = tf.convert_to_tensor(input_weight, dtype=tf.float32)
input_queue = tf.train.slice_input_producer([input_paths_t, input_labels_t, input_labels2_t, input_weight_t],
shuffle=mode == "train")
else:
input_queue = tf.train.slice_input_producer([input_paths_t, input_labels_t], shuffle=mode == "train")
with tf.name_scope(scop_name):
if mix_weight:
paths, contents, labels, labels2, weight = read_images_from_disk(input_queue, combine_weight=True)
else:
paths, contents, labels = read_images_from_disk(input_queue)
if png16bits:
raw_input = decode(contents, dtype=tf.uint16)
else:
raw_input = decode(contents)
raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)
assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message="image does not have 3 channels")
with tf.control_dependencies([assertion]):
raw_input = tf.identity(raw_input)
raw_input.set_shape([None, None, 3])
if lab_colorization:
# load color and brightness from image, no B image exists here
lab = rgb_to_lab(raw_input)
L_chan, a_chan, b_chan = preprocess_lab(lab)
a_images = tf.expand_dims(L_chan, axis=2)
b_images = tf.stack([a_chan, b_chan], axis=2)
else:
# Break apart image pair and move to range [-1, 1]:
width = tf.shape(raw_input)[1] # [height, width, channels]
a_images = preprocess(raw_input[:, :width // 2, :])
b_images = preprocess(raw_input[:, width // 2:, :])
if which_direction == "AtoB":
inputs, targets = [a_images, b_images]
elif which_direction == "BtoA":
inputs, targets = [b_images, a_images]
else:
raise Exception("invalid direction")
# Synchronize seed for image operations so that we do the same operations to both
# input and output images.
seed = random.randint(0, 2 ** 31 - 1)
def transform(image):
r = image
if flip:
r = tf.image.random_flip_left_right(r, seed=seed)
# Area produces a nice downscaling, but does nearest neighbor for upscaling
# assume we're going to be doing downscaling here.
r = tf.image.resize_images(r, [scale_size, scale_size], method=tf.image.ResizeMethod.AREA)
offset = tf.cast(tf.floor(tf.random_uniform([2], 0, scale_size - CROP_SIZE + 1, seed=seed)), dtype=tf.int32)
if scale_size > CROP_SIZE:
r = tf.image.crop_to_bounding_box(r, offset[0], offset[1], CROP_SIZE, CROP_SIZE)
elif scale_size < CROP_SIZE:
raise Exception("Scale size cannot be less than crop size.")
return r
with tf.name_scope("input_images"):
input_images = transform(inputs)
with tf.name_scope("target_images"):
target_images = transform(targets)
if mix_weight:
paths_batch, inputs_batch, targets_batch, labels_batch, labels2_batch, weight_batch = tf.train.batch(
[paths, input_images, target_images, labels, labels2, weight],
batch_size=batch_size)
steps_per_epoch = int(math.ceil(len(input_paths) / batch_size))
return Examples_inf(
paths=paths_batch,
inputs=inputs_batch,
targets=targets_batch,
labels=labels_batch,
labels2=labels2_batch,
weight=weight_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
)
else:
paths_batch, inputs_batch, targets_batch, labels_batch = tf.train.batch(
[paths, input_images, target_images, labels],
batch_size=batch_size)
steps_per_epoch = int(math.ceil(len(input_paths) / batch_size))
return Examples(
paths=paths_batch,
inputs=inputs_batch,
targets=targets_batch,
labels=labels_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
) | dataloader.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import glob
import random
import collections
import math
from helper import *
from layer import *
from consts import *
Examples = collections.namedtuple("Examples", "paths, inputs, targets, labels, count, steps_per_epoch")
Examples_inf = collections.namedtuple("Examples_inf",
"paths, inputs, targets, labels, labels2, weight, count, steps_per_epoch")
def load_examples(input_dir, mode, lab_colorization,
which_direction, flip, scale_size, batch_size, png16bits, scop_name, mix_weight=False,
style_ref=False):
""" Based on https://github.com/eric-guerin/pix2pix-tensorflow/blob/png16bits-support/pix2pix.py,
see LICENSE file."""
if input_dir is None or not os.path.exists(input_dir):
raise Exception("input_dir does not exist")
input_paths = glob.glob(os.path.join(input_dir, "*.jpg"))
decode = tf.image.decode_jpeg
if len(input_paths) == 0:
input_paths = glob.glob(os.path.join(input_dir, "*.png"))
decode = tf.image.decode_png
if len(input_paths) == 0:
raise Exception("input_dir contains no image files")
def get_name(path):
name, _ = os.path.splitext(os.path.basename(path))
return name
# If the image names are numbers, sort by the value rather than asciibetically
# having sorted inputs means that the outputs are sorted in test mode.
if all(get_name(path).isdigit() for path in input_paths):
input_paths = sorted(input_paths, key=lambda path: int(get_name(path)))
else:
input_paths = sorted(input_paths)
input_paths_t = tf.convert_to_tensor(input_paths, dtype=tf.string)
if not style_ref:
input_labels = [int(path.split('_')[-1][:-4]) for path in input_paths]
else:
input_labels = [0 for _ in input_paths]
input_labels_t = tf.convert_to_tensor(input_labels, dtype=tf.int32)
if mix_weight:
input_labels2 = [int(path.split('_')[-2]) for path in input_paths]
input_weight = [float(path.split('_')[-3]) for path in input_paths]
input_labels2_t = tf.convert_to_tensor(input_labels2, dtype=tf.int32)
input_weight_t = tf.convert_to_tensor(input_weight, dtype=tf.float32)
input_queue = tf.train.slice_input_producer([input_paths_t, input_labels_t, input_labels2_t, input_weight_t],
shuffle=mode == "train")
else:
input_queue = tf.train.slice_input_producer([input_paths_t, input_labels_t], shuffle=mode == "train")
with tf.name_scope(scop_name):
if mix_weight:
paths, contents, labels, labels2, weight = read_images_from_disk(input_queue, combine_weight=True)
else:
paths, contents, labels = read_images_from_disk(input_queue)
if png16bits:
raw_input = decode(contents, dtype=tf.uint16)
else:
raw_input = decode(contents)
raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)
assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message="image does not have 3 channels")
with tf.control_dependencies([assertion]):
raw_input = tf.identity(raw_input)
raw_input.set_shape([None, None, 3])
if lab_colorization:
# load color and brightness from image, no B image exists here
lab = rgb_to_lab(raw_input)
L_chan, a_chan, b_chan = preprocess_lab(lab)
a_images = tf.expand_dims(L_chan, axis=2)
b_images = tf.stack([a_chan, b_chan], axis=2)
else:
# Break apart image pair and move to range [-1, 1]:
width = tf.shape(raw_input)[1] # [height, width, channels]
a_images = preprocess(raw_input[:, :width // 2, :])
b_images = preprocess(raw_input[:, width // 2:, :])
if which_direction == "AtoB":
inputs, targets = [a_images, b_images]
elif which_direction == "BtoA":
inputs, targets = [b_images, a_images]
else:
raise Exception("invalid direction")
# Synchronize seed for image operations so that we do the same operations to both
# input and output images.
seed = random.randint(0, 2 ** 31 - 1)
def transform(image):
r = image
if flip:
r = tf.image.random_flip_left_right(r, seed=seed)
# Area produces a nice downscaling, but does nearest neighbor for upscaling
# assume we're going to be doing downscaling here.
r = tf.image.resize_images(r, [scale_size, scale_size], method=tf.image.ResizeMethod.AREA)
offset = tf.cast(tf.floor(tf.random_uniform([2], 0, scale_size - CROP_SIZE + 1, seed=seed)), dtype=tf.int32)
if scale_size > CROP_SIZE:
r = tf.image.crop_to_bounding_box(r, offset[0], offset[1], CROP_SIZE, CROP_SIZE)
elif scale_size < CROP_SIZE:
raise Exception("Scale size cannot be less than crop size.")
return r
with tf.name_scope("input_images"):
input_images = transform(inputs)
with tf.name_scope("target_images"):
target_images = transform(targets)
if mix_weight:
paths_batch, inputs_batch, targets_batch, labels_batch, labels2_batch, weight_batch = tf.train.batch(
[paths, input_images, target_images, labels, labels2, weight],
batch_size=batch_size)
steps_per_epoch = int(math.ceil(len(input_paths) / batch_size))
return Examples_inf(
paths=paths_batch,
inputs=inputs_batch,
targets=targets_batch,
labels=labels_batch,
labels2=labels2_batch,
weight=weight_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
)
else:
paths_batch, inputs_batch, targets_batch, labels_batch = tf.train.batch(
[paths, input_images, target_images, labels],
batch_size=batch_size)
steps_per_epoch = int(math.ceil(len(input_paths) / batch_size))
return Examples(
paths=paths_batch,
inputs=inputs_batch,
targets=targets_batch,
labels=labels_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
) | 0.860149 | 0.341953 |
import argparse
import pandas as pd
from preprocessing.labels import encode_label
from preprocessing.tcga.utils import read_clinical_file
def main(
valid_tiles_file,
clinical_file,
output_tiles_labels_file,
patient_col_tiles_file,
patient_col_clinical_file,
label_cols,
):
tiles = pd.read_csv(valid_tiles_file)
clinical = read_clinical_file(clinical_file)
if not set(label_cols + [patient_col_clinical_file]) <= set(clinical.columns):
missing_cols = set(label_cols + [patient_col_clinical_file]) - set(
clinical.columns
)
raise ValueError(
f"Columns {' ,'.join(missing_cols)} are missing from the clinical file"
)
if not patient_col_tiles_file in tiles.columns:
raise ValueError(
f"Patient column {patient_col_tiles_file} not present in tiles file"
)
labels_w_patient = clinical[[*label_cols, patient_col_clinical_file]]
labels_w_patient.dropna(subset=label_cols, inplace=True)
labels_w_patient.drop_duplicates(inplace=True)
tiles_w_labels = pd.merge(
tiles,
labels_w_patient,
left_on=patient_col_tiles_file,
right_on=patient_col_clinical_file,
how="left",
).drop(patient_col_clinical_file, axis=1)
assert len(tiles_w_labels) == len(tiles)
for label in label_cols:
tiles_w_labels, _ = encode_label(tiles_w_labels, label, f"{label}_encoded")
tiles_w_labels.to_csv(output_tiles_labels_file, index=None)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"valid_tiles_file", type=str, help="Path to the valid tiles summary file"
)
parser.add_argument(
"clinical_file",
type=str,
help="Path to the clinical file (TSV), downloaded from TCGA portal",
)
parser.add_argument(
"output_tiles_labels_file",
type=str,
help="Path to the output file with tiles summary and labels",
)
parser.add_argument(
"--patient_col_tiles_file",
type=str,
default="patient",
help="Column name representing the patient "
"- code, name, id, etc - in the tiles summary file",
)
parser.add_argument(
"--patient_col_clinical_file",
type=str,
default="case_submitter_id",
help="Column name representing the patient "
"- code, name, id, etc - in the clinical file",
)
parser.add_argument(
"--label_cols",
type=str,
nargs="+",
help="Column(s) to be used as labels. Must be present in the clinical file",
)
args = parser.parse_args()
valid_tiles_file = args.valid_tiles_file
clinical_file = args.clinical_file
output_tiles_labels_file = args.output_tiles_labels_file
patient_col_tiles_file = args.patient_col_tiles_file
patient_col_clinical_file = args.patient_col_clinical_file
label_cols = args.label_cols
main(
valid_tiles_file,
clinical_file,
output_tiles_labels_file,
patient_col_tiles_file,
patient_col_clinical_file,
label_cols,
) | preprocessing_prepare_labels_tcga.py | import argparse
import pandas as pd
from preprocessing.labels import encode_label
from preprocessing.tcga.utils import read_clinical_file
def main(
valid_tiles_file,
clinical_file,
output_tiles_labels_file,
patient_col_tiles_file,
patient_col_clinical_file,
label_cols,
):
tiles = pd.read_csv(valid_tiles_file)
clinical = read_clinical_file(clinical_file)
if not set(label_cols + [patient_col_clinical_file]) <= set(clinical.columns):
missing_cols = set(label_cols + [patient_col_clinical_file]) - set(
clinical.columns
)
raise ValueError(
f"Columns {' ,'.join(missing_cols)} are missing from the clinical file"
)
if not patient_col_tiles_file in tiles.columns:
raise ValueError(
f"Patient column {patient_col_tiles_file} not present in tiles file"
)
labels_w_patient = clinical[[*label_cols, patient_col_clinical_file]]
labels_w_patient.dropna(subset=label_cols, inplace=True)
labels_w_patient.drop_duplicates(inplace=True)
tiles_w_labels = pd.merge(
tiles,
labels_w_patient,
left_on=patient_col_tiles_file,
right_on=patient_col_clinical_file,
how="left",
).drop(patient_col_clinical_file, axis=1)
assert len(tiles_w_labels) == len(tiles)
for label in label_cols:
tiles_w_labels, _ = encode_label(tiles_w_labels, label, f"{label}_encoded")
tiles_w_labels.to_csv(output_tiles_labels_file, index=None)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"valid_tiles_file", type=str, help="Path to the valid tiles summary file"
)
parser.add_argument(
"clinical_file",
type=str,
help="Path to the clinical file (TSV), downloaded from TCGA portal",
)
parser.add_argument(
"output_tiles_labels_file",
type=str,
help="Path to the output file with tiles summary and labels",
)
parser.add_argument(
"--patient_col_tiles_file",
type=str,
default="patient",
help="Column name representing the patient "
"- code, name, id, etc - in the tiles summary file",
)
parser.add_argument(
"--patient_col_clinical_file",
type=str,
default="case_submitter_id",
help="Column name representing the patient "
"- code, name, id, etc - in the clinical file",
)
parser.add_argument(
"--label_cols",
type=str,
nargs="+",
help="Column(s) to be used as labels. Must be present in the clinical file",
)
args = parser.parse_args()
valid_tiles_file = args.valid_tiles_file
clinical_file = args.clinical_file
output_tiles_labels_file = args.output_tiles_labels_file
patient_col_tiles_file = args.patient_col_tiles_file
patient_col_clinical_file = args.patient_col_clinical_file
label_cols = args.label_cols
main(
valid_tiles_file,
clinical_file,
output_tiles_labels_file,
patient_col_tiles_file,
patient_col_clinical_file,
label_cols,
) | 0.570331 | 0.387516 |
import docker
from twisted.python import log
RETRIES = 5
class NeverLocked(Exception):
pass
class AlreadyLocked(Exception):
pass
class Containers(object):
"""
Operations on the set of containers which pertain to dvol. Also maintain
state on which containers we stopped so that we can start them again.
@ivar stopped: mapping from volume name for which we stopped containers to
set of container ids, so that we can attempt to start them again.
"""
def __init__(self, volume_driver_name):
self.volume_driver_name = volume_driver_name
self.stopped = dict()
self.client = docker.client.Client(version="1.20")
def get_related_containers(self, volume):
"""
Find running containers using the dvol plugin that are using the given
volume.
"""
all_containers = self.client.containers()
containers = []
for container in all_containers:
# race condition: a container is deleted during the following
# iteration; catch and log exceptions but otherwise ignore; this is
# a best-effort snapshot of current docker state
try:
container = self.client.inspect_container(container['Id'])
running = container['State']['Running']
if self._is_container_related(container, volume) and running:
containers.append(container)
except:
log.err(None, "while fetching container state %s, "
"maybe it was deleted" % (container['Id'],))
return containers
def stop(self, volume):
"""
Stop containers which are using this volume, and remember which
containers were stopped.
"""
if volume in self.stopped:
raise AlreadyLocked("already locked %s, can't lock it" % (volume,))
containers = self.get_related_containers(volume)
self.stopped[volume] = set()
def attempt_stop(container):
for attempt in range(RETRIES):
try:
self.client.stop(container['Id'])
return
except:
if attempt < RETRIES - 1:
log.msg(
"Failed to stop container %s, retrying..." %
(container['Id'],))
else:
log.err(
None, "while trying to stop container %s" % (container,))
for container in containers:
attempt_stop(container)
self.stopped[volume] = set(c['Id'] for c in containers)
def start(self, volume):
if volume not in self.stopped:
raise NeverLocked("never locked %s, can't unlock it" % (volume,))
for cid in self.stopped[volume]:
try:
self.client.start(cid)
except:
log.err(None, "while trying to start container %s" % (cid,))
del self.stopped[volume]
def remove_related_containers(self, volume):
"""
Remove containers using the dvol plugin that are using the given
volume.
"""
all_containers = self.client.containers(all=True)
for container in all_containers:
# race condition: a container is deleted during the following
# iteration; catch and log exceptions but otherwise ignore; this is
# a best-effort snapshot of current docker state
try:
container = self.client.inspect_container(container['Id'])
except:
log.err(None, "while fetching container state %s, "
"maybe it was deleted" % (container['Id']))
if self._is_container_related(container, volume):
log.msg(None, "Deleting container %s" % (container['Id']))
self.client.remove_container(container['Id'], v=True)
def _is_container_related(self, container, volume):
volume_driver_matches = (
container['Config'].get('VolumeDriver') == self.volume_driver_name
or
container['HostConfig'].get('VolumeDriver') == self.volume_driver_name
)
if not volume_driver_matches:
return False
using_volume = False
aggregated_volumes = container.get('Volumes', {}).values()
# docker 1.8.2 seems to have new Mounts attribute, list of
# objects.
aggregated_volumes += [mount['Source'] for mount in container.get('Mounts', {})]
# e.g. {u'/data': u'/var/lib/dvol/volumes/frob_mysql/branches/master'}
for volume_path in aggregated_volumes:
# XXX implementation detail-y, will need refactoring when
# we support multiple backends
if volume_path.startswith("/var/lib/dvol/volumes"):
parts = volume_path.split("/")
volume_name = parts[-2]
if volume_name == volume:
using_volume = True
break
return using_volume | dvol_python/dockercontainers.py | import docker
from twisted.python import log
RETRIES = 5
class NeverLocked(Exception):
pass
class AlreadyLocked(Exception):
pass
class Containers(object):
"""
Operations on the set of containers which pertain to dvol. Also maintain
state on which containers we stopped so that we can start them again.
@ivar stopped: mapping from volume name for which we stopped containers to
set of container ids, so that we can attempt to start them again.
"""
def __init__(self, volume_driver_name):
self.volume_driver_name = volume_driver_name
self.stopped = dict()
self.client = docker.client.Client(version="1.20")
def get_related_containers(self, volume):
"""
Find running containers using the dvol plugin that are using the given
volume.
"""
all_containers = self.client.containers()
containers = []
for container in all_containers:
# race condition: a container is deleted during the following
# iteration; catch and log exceptions but otherwise ignore; this is
# a best-effort snapshot of current docker state
try:
container = self.client.inspect_container(container['Id'])
running = container['State']['Running']
if self._is_container_related(container, volume) and running:
containers.append(container)
except:
log.err(None, "while fetching container state %s, "
"maybe it was deleted" % (container['Id'],))
return containers
def stop(self, volume):
"""
Stop containers which are using this volume, and remember which
containers were stopped.
"""
if volume in self.stopped:
raise AlreadyLocked("already locked %s, can't lock it" % (volume,))
containers = self.get_related_containers(volume)
self.stopped[volume] = set()
def attempt_stop(container):
for attempt in range(RETRIES):
try:
self.client.stop(container['Id'])
return
except:
if attempt < RETRIES - 1:
log.msg(
"Failed to stop container %s, retrying..." %
(container['Id'],))
else:
log.err(
None, "while trying to stop container %s" % (container,))
for container in containers:
attempt_stop(container)
self.stopped[volume] = set(c['Id'] for c in containers)
def start(self, volume):
if volume not in self.stopped:
raise NeverLocked("never locked %s, can't unlock it" % (volume,))
for cid in self.stopped[volume]:
try:
self.client.start(cid)
except:
log.err(None, "while trying to start container %s" % (cid,))
del self.stopped[volume]
def remove_related_containers(self, volume):
"""
Remove containers using the dvol plugin that are using the given
volume.
"""
all_containers = self.client.containers(all=True)
for container in all_containers:
# race condition: a container is deleted during the following
# iteration; catch and log exceptions but otherwise ignore; this is
# a best-effort snapshot of current docker state
try:
container = self.client.inspect_container(container['Id'])
except:
log.err(None, "while fetching container state %s, "
"maybe it was deleted" % (container['Id']))
if self._is_container_related(container, volume):
log.msg(None, "Deleting container %s" % (container['Id']))
self.client.remove_container(container['Id'], v=True)
def _is_container_related(self, container, volume):
volume_driver_matches = (
container['Config'].get('VolumeDriver') == self.volume_driver_name
or
container['HostConfig'].get('VolumeDriver') == self.volume_driver_name
)
if not volume_driver_matches:
return False
using_volume = False
aggregated_volumes = container.get('Volumes', {}).values()
# docker 1.8.2 seems to have new Mounts attribute, list of
# objects.
aggregated_volumes += [mount['Source'] for mount in container.get('Mounts', {})]
# e.g. {u'/data': u'/var/lib/dvol/volumes/frob_mysql/branches/master'}
for volume_path in aggregated_volumes:
# XXX implementation detail-y, will need refactoring when
# we support multiple backends
if volume_path.startswith("/var/lib/dvol/volumes"):
parts = volume_path.split("/")
volume_name = parts[-2]
if volume_name == volume:
using_volume = True
break
return using_volume | 0.481454 | 0.256116 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import numpy as np
import sar_data as sd
import test_sar_data as tsd
import os
import math
import time
import argparse
import scipy as sp
import scipy.stats
import scipy.io
from PIL import Image
import random
from network import CNNEncoder, RelationNetwork
from sklearn.metrics import confusion_matrix
import rgb
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
parser = argparse.ArgumentParser(description="hsi few-shot classification")
parser.add_argument("--num_epoch", type=int, default=1)
parser.add_argument("--train_n_way", type=int, default=7)
parser.add_argument("--train_n_shot", type=int, default=5)
parser.add_argument("--train_n_query", type=int, default=15)
parser.add_argument("--test_n_way", type=int, default=7)
parser.add_argument("--test_n_shot", type=int, default=5)
parser.add_argument("--test_n_query", type=int, default=1)
parser.add_argument("--test_epoch", type=int, default=100)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--data_folder", type=str, default='./data/')
parser.add_argument("--data_name", type=str, default='rs_data') # flevoland
parser.add_argument("--sar_size1", type=int, default=5, help="flip the picture to 5x5 size")
parser.add_argument("--sar_size2", type=int, default=11, help="flip the picture to 11x11 size")
parser.add_argument("--sar_size3", type=int, default=17, help="flip the picture to 13x13 size")
parser.add_argument("--trainset_ratio", type=float, default=0.7)
parser.add_argument("--out_dim", type=int, default=32, help="cnn_net_out_dim")
parser.add_argument("--hidden_size", type=int, default=10, help="relation_net_hidden_size")
parser.add_argument("--loss_model", type=int, default=3, help="0: ce_loss;1: mse_loss;2: focal_loss;3: MSE_IIRL_loss")
parser.add_argument("--test_num", type=int, default=0)
parser.add_argument("--test_switch",type=bool, default=False)
parser.add_argument("--paint_switch",type=bool,default=False)
args = parser.parse_args()
def weights_init(m):
"""
initial model.
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif classname.find('Linear') != -1:
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data = torch.ones(m.bias.data.size())
def one_hot(args, indices):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
"""
encoded_indicate = torch.zeros(args.train_n_way*args.train_n_query, args.train_n_way).cuda()
index = indices.long().view(-1,1)
encoded_indicate = encoded_indicate.scatter_(1,index,1)
return encoded_indicate
def kappa(confusion_matrix):
"""kappa系数
:param: confusion_matrix--混淆矩阵
:return: Kappa系数
"""
pe_rows = np.sum(confusion_matrix, axis=0)
pe_cols = np.sum(confusion_matrix, axis=1)
sum_total = sum(pe_cols)
pe = np.dot(pe_rows, pe_cols) / float(sum_total ** 2)
po = np.trace(confusion_matrix) / float(sum_total)
return (po - pe) / (1 - pe)
def main():
rgb_colors = rgb.ncolors(args.train_n_way)
print(rgb_colors)
start_time = time.time()
# rgb_colors = np.array([[248, 49, 49], [200, 248, 9], [42, 248, 124], [36, 123, 254], [204, 4, 254]])
if args.paint_switch:
print("painting img_gt")
_, gts = sd.mat_data(args)
wait
gts -= 1
img_h = gts.shape[0]-16
img_v = gts.shape[1]-16
img_gt = Image.new("RGB", (img_h, img_v), "white")
for h in range(img_h):
for v in range(img_v):
for i in range(args.test_n_way):
if gts[h+8,v+8] == i:
img_gt.putpixel([h, v], (rgb_colors[i][0], rgb_colors[i][1], rgb_colors[i][2]))
break
img_gt.save("./img_result/"+ str(args.data_name) + "_img_gt.jpg")
if args.test_switch:
# 184170 load
que_labels = scipy.io.loadmat("./labels_save/que_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))['que_labels'].squeeze(0).astype(int)
pre_labels = scipy.io.loadmat("./labels_save/pre_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))['pre_labels'].squeeze(0)
# perpare
class_correct = np.zeros(args.test_n_way).astype(int)
class_num = np.zeros(args.test_n_way).astype(int)
class_acc = np.zeros(args.test_n_way).astype(float)
for i in range(len(que_labels)):
if pre_labels[i]==que_labels[i]:
class_correct[que_labels[i]] += 1
class_num[que_labels[i]] += 1
# kappa
confusion_m = confusion_matrix(que_labels, pre_labels)
kappa_score = kappa(confusion_m)
print("Kappa: %.2f %%" %(kappa_score*100))
# aa
for i in range(args.test_n_way):
class_acc[i] = class_correct[i] / class_num[i]
print("class_%d_acc: %.2f %%" %(i, class_acc[i]*100))
aa = np.mean(class_acc)
print("AA: %.2f %%" %(aa*100))
# oa
total_labels = np.sum(class_num)
total_correct = np.sum(class_correct)
oa = total_correct/1.0 / total_labels/1.0
print("OA: %.2f %%" %(oa*100))
return print("test finished!")
print("loading sar_dataset")
if os.path.exists('./data/' + args.data_name + '/stacks_1.npy') == False:
print("making dataset")
os.makedirs(("./data/"+args.data_name+"/"), exist_ok= True)
tsd.sar_datesets(args)
test_stacks_1 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_1.npy')) # (182656,27,5,5)
test_stacks_2 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_2.npy'))
test_stacks_3 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_3.npy'))
test_gts = torch.Tensor(np.load('./data/' + args.data_name + '/gts.npy'))
test_gts -= 1
load_time = time.time()
print("%sset load successfully, and spend time: %.2f"%(args.data_name, load_time-start_time))
print("init network")
cnn_sup = CNNEncoder(test_stacks_1.size(1), args.out_dim)
cnn_que = CNNEncoder(test_stacks_1.size(1), args.out_dim)
relation_net = RelationNetwork(2*args.out_dim, args.hidden_size)
# 初始化模型
cnn_sup.apply(weights_init)
cnn_que.apply(weights_init)
relation_net.apply(weights_init)
cnn_sup.cuda()
cnn_que.cuda()
relation_net.cuda()
# scheduler
# Adam 对网络参数进行优化,学习率10000次循环后降为原来的0.5倍
cnn_sup_optim = torch.optim.Adam(cnn_sup.parameters(), lr=args.lr)
cnn_sup_scheduler = StepLR(cnn_sup_optim, step_size=20000, gamma=0.5)
cnn_que_optim = torch.optim.Adam(cnn_que.parameters(), lr=args.lr)
cnn_que_scheduler = StepLR(cnn_que_optim, step_size=20000, gamma=0.5)
relation_net_optim = torch.optim.Adam(relation_net.parameters(), lr=args.lr)
relation_net_scheduler = StepLR(relation_net_optim, step_size=20000, gamma=0.1)
test_result = open("./test_result/%s_%d_loss_%d_shot_%d_log.txt"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num), 'w')
cnn_sup_folder = "./model/" + str(args.data_name) + "/cnn_sup/"
cnn_que_folder = "./model/" + str(args.data_name) + "/cnn_que/"
relation_net_folder = "./model/" + str(args.data_name) + "/relation_net/"
os.makedirs(cnn_sup_folder, exist_ok=True)
os.makedirs(cnn_que_folder, exist_ok=True)
os.makedirs(relation_net_folder, exist_ok=True)
if os.path.exists(cnn_sup_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
cnn_sup.load_state_dict(torch.load(cnn_sup_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load cnn_sup successfully")
if os.path.exists(cnn_que_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
cnn_que.load_state_dict(torch.load(cnn_que_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load cnn_que successfully")
if os.path.exists(relation_net_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
relation_net.load_state_dict(torch.load(relation_net_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load relation_net successfully")
'''
cnn_sup.eval()
cnn_que.eval()
relation_net.eval()
'''
for epoch in range(args.num_epoch):
print("start testing")
#------------------------------prepare------------------------------
test_time = time.time()
total_correct = 0
class_correct = np.zeros(args.test_n_way).astype(int)
class_acc = np.zeros(args.test_n_way).astype(float)
pre_labels = []
que_labels = []
gts_class = np.arange(args.test_n_way)
h_img = 750 -16
v_img = 1024 -16
img_out = Image.new("RGB", (h_img, v_img), "white")
#------------------------------test------------------------------
test_sup_stacks_1, test_sup_stacks_2, test_sup_stacks_3, test_sup_gts, class_num = tsd.sar_dataloader(args, gts_class, test_gts, test_stacks_1, test_stacks_2, test_stacks_3, split='test',form='support', shuffle=False)
class_num_max = np.max(class_num)
print("class_num_max: ", class_num_max)
index_i = np.zeros(args.test_n_way).astype(int)
index_j = np.zeros(args.test_n_way).astype(int)
for i in range(class_num_max):
#-------------------------------------------------------------------------
stack_index = np.arange(0, test_gts.size(0)) # 生成stack的索引
# print("stack_index: ", len(stack_index))
index = np.zeros(1, dtype=int) # 生成一个零数组,方便for循环
for i in gts_class:
stack_index_i = stack_index[test_gts == i]
if index_j[i] >= len(stack_index_i):
index_j[i] = 0
# print(i, ":", len(stack_index_i))
stack_index_i = [stack_index_i[index_j[i]]]
index = np.concatenate((index, stack_index_i), axis=0)
index_j[i] += 1
index = np.delete(index, 0 , 0) # 不打乱顺序
test_que_stacks_1 = []
test_que_stacks_2 = []
test_que_stacks_3 = []
test_que_gts = []
for item in list(index):
# 每一行需要增加一维,拼接时保证维度正确
test_que_stacks_1.append(test_stacks_1[item].unsqueeze(0))
test_que_stacks_2.append(test_stacks_2[item].unsqueeze(0))
test_que_stacks_3.append(test_stacks_3[item].unsqueeze(0))
test_que_gts.append(test_gts[item].unsqueeze(0))
test_que_stacks_1 = torch.cat(test_que_stacks_1, dim=0) # (25,27,5,5)
test_que_stacks_2 = torch.cat(test_que_stacks_2, dim=0) # (25,27,11,11)
test_que_stacks_3 = torch.cat(test_que_stacks_3, dim=0) # (25,27,17,17)
test_que_gts = torch.cat(test_que_gts, dim=0)
#-------------------------------------------------------------------------
test_sup_stacks_1 = test_sup_stacks_1.cuda()
test_sup_stacks_2 = test_sup_stacks_2.cuda()
test_sup_stacks_3 = test_sup_stacks_3.cuda()
test_sup_gts = test_sup_gts.cuda()
test_que_stacks_1 = test_que_stacks_1.cuda()
test_que_stacks_2 = test_que_stacks_2.cuda()
test_que_stacks_3 = test_que_stacks_3.cuda()
test_que_gts = test_que_gts.cuda()
mult_sup_feature = cnn_sup(test_sup_stacks_1, test_sup_stacks_2, test_sup_stacks_3)
mult_que_feature = cnn_que(test_que_stacks_1, test_que_stacks_2, test_que_stacks_3)
mult_relation_pairs = []
for i in range(3):
# 支持集按类取平均
sup_feature = mult_sup_feature[i]
que_feature = mult_que_feature[i]
sup_feature = sup_feature.view(args.test_n_way, args.test_n_shot, -1, sup_feature.shape[2], sup_feature.shape[3])
sup_feature = torch.mean(sup_feature,1).squeeze(1)
# relations
sup_feature_ext = sup_feature.unsqueeze(0).repeat(args.test_n_way*args.test_n_query, 1, 1, 1, 1)
que_feature_ext = torch.transpose(que_feature.unsqueeze(0).repeat(args.test_n_way,1,1, 1, 1),0,1)
relation_pairs = torch.cat((sup_feature_ext, que_feature_ext), 2).view(-1, 2*args.out_dim, sup_feature.shape[2], sup_feature.shape[3])
mult_relation_pairs.append(relation_pairs)
relations = relation_net(mult_relation_pairs[0], mult_relation_pairs[1], mult_relation_pairs[2]).view(-1, args.test_n_way)
# calculate relations
_, predict_gts = torch.max(relations.data, 1)
for j in range(args.test_n_way):
h_j = index[j] // v_img
v_j = index[j] % v_img
img_out.putpixel([h_j, v_j], (rgb_colors[predict_gts[j]][0], rgb_colors[predict_gts[j]][1], rgb_colors[predict_gts[j]][2]))
if index_i[j] > class_num[j]:
continue
if predict_gts[j]== test_que_gts[j]:
class_correct[j] += 1
pre_labels.append(predict_gts[j].item())
que_labels.append(test_que_gts[j].item())
index_i[j] +=1
# painting
img_out.save("./img_result/" + "%s_%d_loss_%d_shot_%d_img_out.jpg"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))
# labels save
que_save = "./labels_save/que_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)
pre_save = "./labels_save/pre_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)
scipy.io.savemat(que_save, mdict={"que_labels": que_labels})
scipy.io.savemat(pre_save, mdict={"pre_labels": pre_labels})
# kappa
confusion_m = confusion_matrix(que_labels, pre_labels)
kappa_score = kappa(confusion_m)
print("Kappa: %.2f %%" %(kappa_score*100))
test_result.write("Kappa: %.2f %%\n" %(kappa_score*100))
test_result.flush()
# aa
for i in range(args.test_n_way):
class_acc[i] = class_correct[i] / class_num[i]
# print(i, "_class_correct: ", class_correct[i])
# print(i, "_class_num: ", class_num[i])
print("class_%d_acc: %.2f %%" %(i, class_acc[i]*100))
test_result.write("class_%d_acc: %.2f %%\n" %(i, class_acc[i]*100))
test_result.flush()
aa = np.mean(class_acc)
print("AA: %.2f %%" %(aa*100))
test_result.write("AA: %.2f %%\n" %(aa*100))
test_result.flush()
# oa
total_labels = np.sum(class_num)
total_correct = np.sum(class_correct)
# print("total_labels: ", total_labels)
# print("total_correct: ", total_correct)
oa = total_correct / total_labels
print("OA: %.2f %%" %(oa*100))
test_result.write("OA: %.2f %%\n" %(oa*100))
test_result.flush()
end_time = time.time()
print("test finished, and spend time: ", end_time - test_time)
if __name__ == "__main__":
main() | optical/test.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import numpy as np
import sar_data as sd
import test_sar_data as tsd
import os
import math
import time
import argparse
import scipy as sp
import scipy.stats
import scipy.io
from PIL import Image
import random
from network import CNNEncoder, RelationNetwork
from sklearn.metrics import confusion_matrix
import rgb
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
parser = argparse.ArgumentParser(description="hsi few-shot classification")
parser.add_argument("--num_epoch", type=int, default=1)
parser.add_argument("--train_n_way", type=int, default=7)
parser.add_argument("--train_n_shot", type=int, default=5)
parser.add_argument("--train_n_query", type=int, default=15)
parser.add_argument("--test_n_way", type=int, default=7)
parser.add_argument("--test_n_shot", type=int, default=5)
parser.add_argument("--test_n_query", type=int, default=1)
parser.add_argument("--test_epoch", type=int, default=100)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--data_folder", type=str, default='./data/')
parser.add_argument("--data_name", type=str, default='rs_data') # flevoland
parser.add_argument("--sar_size1", type=int, default=5, help="flip the picture to 5x5 size")
parser.add_argument("--sar_size2", type=int, default=11, help="flip the picture to 11x11 size")
parser.add_argument("--sar_size3", type=int, default=17, help="flip the picture to 13x13 size")
parser.add_argument("--trainset_ratio", type=float, default=0.7)
parser.add_argument("--out_dim", type=int, default=32, help="cnn_net_out_dim")
parser.add_argument("--hidden_size", type=int, default=10, help="relation_net_hidden_size")
parser.add_argument("--loss_model", type=int, default=3, help="0: ce_loss;1: mse_loss;2: focal_loss;3: MSE_IIRL_loss")
parser.add_argument("--test_num", type=int, default=0)
parser.add_argument("--test_switch",type=bool, default=False)
parser.add_argument("--paint_switch",type=bool,default=False)
args = parser.parse_args()
def weights_init(m):
"""
initial model.
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif classname.find('Linear') != -1:
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data = torch.ones(m.bias.data.size())
def one_hot(args, indices):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
"""
encoded_indicate = torch.zeros(args.train_n_way*args.train_n_query, args.train_n_way).cuda()
index = indices.long().view(-1,1)
encoded_indicate = encoded_indicate.scatter_(1,index,1)
return encoded_indicate
def kappa(confusion_matrix):
"""kappa系数
:param: confusion_matrix--混淆矩阵
:return: Kappa系数
"""
pe_rows = np.sum(confusion_matrix, axis=0)
pe_cols = np.sum(confusion_matrix, axis=1)
sum_total = sum(pe_cols)
pe = np.dot(pe_rows, pe_cols) / float(sum_total ** 2)
po = np.trace(confusion_matrix) / float(sum_total)
return (po - pe) / (1 - pe)
def main():
rgb_colors = rgb.ncolors(args.train_n_way)
print(rgb_colors)
start_time = time.time()
# rgb_colors = np.array([[248, 49, 49], [200, 248, 9], [42, 248, 124], [36, 123, 254], [204, 4, 254]])
if args.paint_switch:
print("painting img_gt")
_, gts = sd.mat_data(args)
wait
gts -= 1
img_h = gts.shape[0]-16
img_v = gts.shape[1]-16
img_gt = Image.new("RGB", (img_h, img_v), "white")
for h in range(img_h):
for v in range(img_v):
for i in range(args.test_n_way):
if gts[h+8,v+8] == i:
img_gt.putpixel([h, v], (rgb_colors[i][0], rgb_colors[i][1], rgb_colors[i][2]))
break
img_gt.save("./img_result/"+ str(args.data_name) + "_img_gt.jpg")
if args.test_switch:
# 184170 load
que_labels = scipy.io.loadmat("./labels_save/que_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))['que_labels'].squeeze(0).astype(int)
pre_labels = scipy.io.loadmat("./labels_save/pre_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))['pre_labels'].squeeze(0)
# perpare
class_correct = np.zeros(args.test_n_way).astype(int)
class_num = np.zeros(args.test_n_way).astype(int)
class_acc = np.zeros(args.test_n_way).astype(float)
for i in range(len(que_labels)):
if pre_labels[i]==que_labels[i]:
class_correct[que_labels[i]] += 1
class_num[que_labels[i]] += 1
# kappa
confusion_m = confusion_matrix(que_labels, pre_labels)
kappa_score = kappa(confusion_m)
print("Kappa: %.2f %%" %(kappa_score*100))
# aa
for i in range(args.test_n_way):
class_acc[i] = class_correct[i] / class_num[i]
print("class_%d_acc: %.2f %%" %(i, class_acc[i]*100))
aa = np.mean(class_acc)
print("AA: %.2f %%" %(aa*100))
# oa
total_labels = np.sum(class_num)
total_correct = np.sum(class_correct)
oa = total_correct/1.0 / total_labels/1.0
print("OA: %.2f %%" %(oa*100))
return print("test finished!")
print("loading sar_dataset")
if os.path.exists('./data/' + args.data_name + '/stacks_1.npy') == False:
print("making dataset")
os.makedirs(("./data/"+args.data_name+"/"), exist_ok= True)
tsd.sar_datesets(args)
test_stacks_1 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_1.npy')) # (182656,27,5,5)
test_stacks_2 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_2.npy'))
test_stacks_3 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_3.npy'))
test_gts = torch.Tensor(np.load('./data/' + args.data_name + '/gts.npy'))
test_gts -= 1
load_time = time.time()
print("%sset load successfully, and spend time: %.2f"%(args.data_name, load_time-start_time))
print("init network")
cnn_sup = CNNEncoder(test_stacks_1.size(1), args.out_dim)
cnn_que = CNNEncoder(test_stacks_1.size(1), args.out_dim)
relation_net = RelationNetwork(2*args.out_dim, args.hidden_size)
# 初始化模型
cnn_sup.apply(weights_init)
cnn_que.apply(weights_init)
relation_net.apply(weights_init)
cnn_sup.cuda()
cnn_que.cuda()
relation_net.cuda()
# scheduler
# Adam 对网络参数进行优化,学习率10000次循环后降为原来的0.5倍
cnn_sup_optim = torch.optim.Adam(cnn_sup.parameters(), lr=args.lr)
cnn_sup_scheduler = StepLR(cnn_sup_optim, step_size=20000, gamma=0.5)
cnn_que_optim = torch.optim.Adam(cnn_que.parameters(), lr=args.lr)
cnn_que_scheduler = StepLR(cnn_que_optim, step_size=20000, gamma=0.5)
relation_net_optim = torch.optim.Adam(relation_net.parameters(), lr=args.lr)
relation_net_scheduler = StepLR(relation_net_optim, step_size=20000, gamma=0.1)
test_result = open("./test_result/%s_%d_loss_%d_shot_%d_log.txt"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num), 'w')
cnn_sup_folder = "./model/" + str(args.data_name) + "/cnn_sup/"
cnn_que_folder = "./model/" + str(args.data_name) + "/cnn_que/"
relation_net_folder = "./model/" + str(args.data_name) + "/relation_net/"
os.makedirs(cnn_sup_folder, exist_ok=True)
os.makedirs(cnn_que_folder, exist_ok=True)
os.makedirs(relation_net_folder, exist_ok=True)
if os.path.exists(cnn_sup_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
cnn_sup.load_state_dict(torch.load(cnn_sup_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load cnn_sup successfully")
if os.path.exists(cnn_que_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
cnn_que.load_state_dict(torch.load(cnn_que_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load cnn_que successfully")
if os.path.exists(relation_net_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
relation_net.load_state_dict(torch.load(relation_net_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load relation_net successfully")
'''
cnn_sup.eval()
cnn_que.eval()
relation_net.eval()
'''
for epoch in range(args.num_epoch):
print("start testing")
#------------------------------prepare------------------------------
test_time = time.time()
total_correct = 0
class_correct = np.zeros(args.test_n_way).astype(int)
class_acc = np.zeros(args.test_n_way).astype(float)
pre_labels = []
que_labels = []
gts_class = np.arange(args.test_n_way)
h_img = 750 -16
v_img = 1024 -16
img_out = Image.new("RGB", (h_img, v_img), "white")
#------------------------------test------------------------------
test_sup_stacks_1, test_sup_stacks_2, test_sup_stacks_3, test_sup_gts, class_num = tsd.sar_dataloader(args, gts_class, test_gts, test_stacks_1, test_stacks_2, test_stacks_3, split='test',form='support', shuffle=False)
class_num_max = np.max(class_num)
print("class_num_max: ", class_num_max)
index_i = np.zeros(args.test_n_way).astype(int)
index_j = np.zeros(args.test_n_way).astype(int)
for i in range(class_num_max):
#-------------------------------------------------------------------------
stack_index = np.arange(0, test_gts.size(0)) # 生成stack的索引
# print("stack_index: ", len(stack_index))
index = np.zeros(1, dtype=int) # 生成一个零数组,方便for循环
for i in gts_class:
stack_index_i = stack_index[test_gts == i]
if index_j[i] >= len(stack_index_i):
index_j[i] = 0
# print(i, ":", len(stack_index_i))
stack_index_i = [stack_index_i[index_j[i]]]
index = np.concatenate((index, stack_index_i), axis=0)
index_j[i] += 1
index = np.delete(index, 0 , 0) # 不打乱顺序
test_que_stacks_1 = []
test_que_stacks_2 = []
test_que_stacks_3 = []
test_que_gts = []
for item in list(index):
# 每一行需要增加一维,拼接时保证维度正确
test_que_stacks_1.append(test_stacks_1[item].unsqueeze(0))
test_que_stacks_2.append(test_stacks_2[item].unsqueeze(0))
test_que_stacks_3.append(test_stacks_3[item].unsqueeze(0))
test_que_gts.append(test_gts[item].unsqueeze(0))
test_que_stacks_1 = torch.cat(test_que_stacks_1, dim=0) # (25,27,5,5)
test_que_stacks_2 = torch.cat(test_que_stacks_2, dim=0) # (25,27,11,11)
test_que_stacks_3 = torch.cat(test_que_stacks_3, dim=0) # (25,27,17,17)
test_que_gts = torch.cat(test_que_gts, dim=0)
#-------------------------------------------------------------------------
test_sup_stacks_1 = test_sup_stacks_1.cuda()
test_sup_stacks_2 = test_sup_stacks_2.cuda()
test_sup_stacks_3 = test_sup_stacks_3.cuda()
test_sup_gts = test_sup_gts.cuda()
test_que_stacks_1 = test_que_stacks_1.cuda()
test_que_stacks_2 = test_que_stacks_2.cuda()
test_que_stacks_3 = test_que_stacks_3.cuda()
test_que_gts = test_que_gts.cuda()
mult_sup_feature = cnn_sup(test_sup_stacks_1, test_sup_stacks_2, test_sup_stacks_3)
mult_que_feature = cnn_que(test_que_stacks_1, test_que_stacks_2, test_que_stacks_3)
mult_relation_pairs = []
for i in range(3):
# 支持集按类取平均
sup_feature = mult_sup_feature[i]
que_feature = mult_que_feature[i]
sup_feature = sup_feature.view(args.test_n_way, args.test_n_shot, -1, sup_feature.shape[2], sup_feature.shape[3])
sup_feature = torch.mean(sup_feature,1).squeeze(1)
# relations
sup_feature_ext = sup_feature.unsqueeze(0).repeat(args.test_n_way*args.test_n_query, 1, 1, 1, 1)
que_feature_ext = torch.transpose(que_feature.unsqueeze(0).repeat(args.test_n_way,1,1, 1, 1),0,1)
relation_pairs = torch.cat((sup_feature_ext, que_feature_ext), 2).view(-1, 2*args.out_dim, sup_feature.shape[2], sup_feature.shape[3])
mult_relation_pairs.append(relation_pairs)
relations = relation_net(mult_relation_pairs[0], mult_relation_pairs[1], mult_relation_pairs[2]).view(-1, args.test_n_way)
# calculate relations
_, predict_gts = torch.max(relations.data, 1)
for j in range(args.test_n_way):
h_j = index[j] // v_img
v_j = index[j] % v_img
img_out.putpixel([h_j, v_j], (rgb_colors[predict_gts[j]][0], rgb_colors[predict_gts[j]][1], rgb_colors[predict_gts[j]][2]))
if index_i[j] > class_num[j]:
continue
if predict_gts[j]== test_que_gts[j]:
class_correct[j] += 1
pre_labels.append(predict_gts[j].item())
que_labels.append(test_que_gts[j].item())
index_i[j] +=1
# painting
img_out.save("./img_result/" + "%s_%d_loss_%d_shot_%d_img_out.jpg"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))
# labels save
que_save = "./labels_save/que_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)
pre_save = "./labels_save/pre_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)
scipy.io.savemat(que_save, mdict={"que_labels": que_labels})
scipy.io.savemat(pre_save, mdict={"pre_labels": pre_labels})
# kappa
confusion_m = confusion_matrix(que_labels, pre_labels)
kappa_score = kappa(confusion_m)
print("Kappa: %.2f %%" %(kappa_score*100))
test_result.write("Kappa: %.2f %%\n" %(kappa_score*100))
test_result.flush()
# aa
for i in range(args.test_n_way):
class_acc[i] = class_correct[i] / class_num[i]
# print(i, "_class_correct: ", class_correct[i])
# print(i, "_class_num: ", class_num[i])
print("class_%d_acc: %.2f %%" %(i, class_acc[i]*100))
test_result.write("class_%d_acc: %.2f %%\n" %(i, class_acc[i]*100))
test_result.flush()
aa = np.mean(class_acc)
print("AA: %.2f %%" %(aa*100))
test_result.write("AA: %.2f %%\n" %(aa*100))
test_result.flush()
# oa
total_labels = np.sum(class_num)
total_correct = np.sum(class_correct)
# print("total_labels: ", total_labels)
# print("total_correct: ", total_correct)
oa = total_correct / total_labels
print("OA: %.2f %%" %(oa*100))
test_result.write("OA: %.2f %%\n" %(oa*100))
test_result.flush()
end_time = time.time()
print("test finished, and spend time: ", end_time - test_time)
if __name__ == "__main__":
main() | 0.665628 | 0.159217 |
import unittest
from unittest import TestCase, mock
from unittest.mock import MagicMock
from http.client import HTTPResponse
from vivino.geocoder.helpers import get_coordinates
class TestHelpers(TestCase):
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_valid_json_response(self, mocked_request):
response_json = '[{"place_id": "94242929", "licence": "Data © OpenStreetMap contributors, ODbL 1.0. ' \
'https://osm.org/copyright", "osm_type": "way", "osm_id": "114823817", "boundingbox": ' \
'["51.1525635", "51.1614997", "-1.4508447", "-1.4408037"], "lat": 51.1576661, "lon": ' \
'-1.4458572, "display_name": "Test, Test Valley, Hampshire, South East, England, SO20 6BD,' \
' UK", "class": "waterway", "type": "river", "importance": 0.46204844474975}]'
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = response_json
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (51.1576661, -1.4458572))
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_bad_json_response(self, mocked_request):
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = '{"test":1}'
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_bad_http_status(self, mocked_request):
a = MagicMock(status=300)
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_missing_lat(self, mocked_request):
response_json = '[{"place_id": "94242929", "licence": "Data © OpenStreetMap contributors, ' \
'ODbL 1.0. https://osm.org/copyright", "osm_type": "way", "osm_id": "114823817", ' \
'"boundingbox": ["51.1525635", "51.1614997", "-1.4508447", "-1.4408037"], "lon": -1.4458572, ' \
'"display_name": "Test, Test Valley, Hampshire, South East, England, SO20 6BD, UK", ' \
'"class": "waterway", "type": "river", "importance": 0.46204844474975}]'
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = response_json
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
@mock.patch('helpers.urllib.request.urlopen', spec=HTTPResponse)
def test_get_coordinates_missing_lon(self, mocked_request):
response_json = '[{"place_id": "94242929", "licence": "Data © OpenStreetMap contributors, ODbL 1.0. ' \
'https://osm.org/copyright", "osm_type": "way", "osm_id": "114823817", "boundingbox": ' \
'["51.1525635", "51.1614997", "-1.4508447", "-1.4408037"], "lat": 51.1576661, ' \
'"display_name": "Test, Test Valley, Hampshire, South East, England, SO20 6BD, UK", "class": ' \
'"waterway", "type": "river", "importance": 0.46204844474975}]'
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = response_json
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
if __name__ == '__main__':
unittest.main() | vivino/geocoder/test_helpers.py | import unittest
from unittest import TestCase, mock
from unittest.mock import MagicMock
from http.client import HTTPResponse
from vivino.geocoder.helpers import get_coordinates
class TestHelpers(TestCase):
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_valid_json_response(self, mocked_request):
response_json = '[{"place_id": "94242929", "licence": "Data © OpenStreetMap contributors, ODbL 1.0. ' \
'https://osm.org/copyright", "osm_type": "way", "osm_id": "114823817", "boundingbox": ' \
'["51.1525635", "51.1614997", "-1.4508447", "-1.4408037"], "lat": 51.1576661, "lon": ' \
'-1.4458572, "display_name": "Test, Test Valley, Hampshire, South East, England, SO20 6BD,' \
' UK", "class": "waterway", "type": "river", "importance": 0.46204844474975}]'
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = response_json
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (51.1576661, -1.4458572))
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_bad_json_response(self, mocked_request):
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = '{"test":1}'
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_bad_http_status(self, mocked_request):
a = MagicMock(status=300)
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
@mock.patch('helpers.urllib.request.urlopen')
def test_get_coordinates_missing_lat(self, mocked_request):
response_json = '[{"place_id": "94242929", "licence": "Data © OpenStreetMap contributors, ' \
'ODbL 1.0. https://osm.org/copyright", "osm_type": "way", "osm_id": "114823817", ' \
'"boundingbox": ["51.1525635", "51.1614997", "-1.4508447", "-1.4408037"], "lon": -1.4458572, ' \
'"display_name": "Test, Test Valley, Hampshire, South East, England, SO20 6BD, UK", ' \
'"class": "waterway", "type": "river", "importance": 0.46204844474975}]'
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = response_json
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
@mock.patch('helpers.urllib.request.urlopen', spec=HTTPResponse)
def test_get_coordinates_missing_lon(self, mocked_request):
response_json = '[{"place_id": "94242929", "licence": "Data © OpenStreetMap contributors, ODbL 1.0. ' \
'https://osm.org/copyright", "osm_type": "way", "osm_id": "114823817", "boundingbox": ' \
'["51.1525635", "51.1614997", "-1.4508447", "-1.4408037"], "lat": 51.1576661, ' \
'"display_name": "Test, Test Valley, Hampshire, South East, England, SO20 6BD, UK", "class": ' \
'"waterway", "type": "river", "importance": 0.46204844474975}]'
a = MagicMock(status=200)
b = MagicMock()
b.decode.return_value = response_json
a.read.return_value = b
mocked_request.return_value.__enter__.return_value = a
coords = get_coordinates('test', 'test')
self.assertEqual(coords, (0, 0))
if __name__ == '__main__':
unittest.main() | 0.656768 | 0.493897 |
from datetime import datetime, date
from uuid import uuid4
from django.db.models import Model
from elasticsearch.exceptions import NotFoundError
from django_elasticsearch_model_binder.exceptions import (
ElasticSearchFailure,
UnableToCastESNominatedFieldException,
UnableToDeleteModelFromElasticSearch,
UnableToSaveModelToElasticSearch,
)
from django_elasticsearch_model_binder.utils import (
build_document_from_model, get_es_client,
get_index_names_from_alias, queryset_iterator,
)
class ESBoundModel(Model):
"""
Mixin that binds a models nominated field to an Elasticsearch index.
Nominated fields will maintain persistency with the models existence
and configuration within the database.
"""
class Meta:
abstract = True
# Fields to be cached in ES.
es_cached_model_fields = []
# nonfields containing methods for custom field insertion.
es_cached_extra_fields = []
# Alias postfix values, used to decern write aliases from read.
es_index_alias_read_postfix = 'read'
es_index_alias_write_postfix = 'write'
@classmethod
def get_index_base_name(cls) -> str:
"""
Retrieve the model defined index name from self.index_name defaulting
to generated name based on app module directory and model name.
"""
if hasattr(cls, 'index_name'):
return cls.index_name
else:
return '-'.join(
cls.__module__.lower().split('.')
+ [cls.__name__.lower()]
)
@classmethod
def convert_model_field_to_es_format(cls, value):
"""
Helper method to cast an incoming value into a format that
is indexable within ElasticSearch. extend with your own super
implentation if there are custom types you'd like handled differently.
"""
if isinstance(value, Model):
return value.pk
elif isinstance(value, datetime) or isinstance(value, date):
return value.strftime('%d-%M-%Y %H:%M:%S')
elif isinstance(value, int) or isinstance(value, float):
return value
else:
# Catch all try to cast value to string raising
# an exception explicitly if that fails.
try:
return str(value)
except Exception as e:
raise UnableToCastESNominatedFieldException(e)
def save(self, *args, **kwargs):
"""
Override model save to index those fields nominated by
es_cached_model_fields storring them in elasticsearch.
"""
super().save(*args, **kwargs)
try:
get_es_client().index(
id=self.pk,
index=self.get_write_alias_name(),
body=build_document_from_model(self),
)
except Exception:
raise UnableToSaveModelToElasticSearch(
'Attempted to save/update the {} related es document '
'from index {}, please check your '
'connection and status of your ES cluster.'.format(
str(self), self.get_index_base_name()
)
)
def delete(self, *args, **kwargs):
"""
Same as save but in reverse, remove the model instances cached
fields in Elasticsearch.
"""
# We temporarily cache the model pk here so we can delete the model
# instance first before we remove from Elasticsearch.
author_document_id = self.pk
super().delete(*args, **kwargs)
try:
get_es_client().delete(
index=self.get_write_alias_name(), id=author_document_id,
)
except Exception:
# Catch failure and reraise with specific exception.
raise UnableToDeleteModelFromElasticSearch(
'Attempted to remove {} related es document '
'from index {}, please check your '
'connection and status of your ES cluster.'.format(
str(self), self.get_index_base_name()
)
)
@staticmethod
def get_index_mapping() -> dict:
"""
Stub mapping of how the index should be created, override this with
the specific implementation of what fields should be searchable
and how.
"""
return {'settings': {}, 'mappings': {}}
@classmethod
def get_read_alias_name(cls) -> str:
"""
Generates a unique alias name using either set explicitly by
overridding this method or in the default format of a
combination of {index_name}-read.
"""
return (
cls.get_index_base_name()
+ '-' + cls.es_index_alias_read_postfix
)
@classmethod
def get_write_alias_name(cls) -> str:
"""
Generates a unique alias name using either set explicitly by
overridding this method or in the default format of a
combination of {index_name}-write.
"""
return (
cls.get_index_base_name()
+ '-' + cls.es_index_alias_write_postfix
)
@classmethod
def generate_index(cls) -> str:
"""
Generates a new index in Elasticsearch for the
model returning the index name.
"""
index = cls.get_index_base_name() + '-' + uuid4().hex
get_es_client().indices.create(
index=index, body=cls.get_index_mapping()
)
return index
@classmethod
def bind_alias(cls, index: str, alias: str):
"""
Connect an alias to a specified index by default removes alias
from any other indices if present.
"""
old_indicy_names = []
if get_es_client().indices.exists_alias(name=alias):
old_indicy_names = get_index_names_from_alias(alias)
alias_updates = [
{'remove': {'index': indicy, 'alias': alias}}
for indicy in old_indicy_names
]
alias_updates.append({'add': {'index': index, 'alias': alias}})
get_es_client().indices.update_aliases(body={'actions': alias_updates})
@classmethod
def rebuild_es_index(cls, queryset=None, drop_old_index=True):
"""
Rebuilds the entire ESIndex for the model, utilizes Aliases to
preserve access to the old index while the new is being built.
By default will rebuild the entire database table in Elasticsearch,
define a queryset to only rebuild a slice of this.
Set drop_old_index to False if you want to preserve the old index for
future use, this will no longer have the aliases tied to it but will
still be accessable through the Elasticsearch API.
"""
old_indicy = get_index_names_from_alias(cls.get_read_alias_name())[0]
new_indicy = cls.generate_index()
cls.bind_alias(new_indicy, cls.get_write_alias_name())
chunked_qs_generator = queryset_iterator(queryset or cls.objects.all())
for qs_chunk in chunked_qs_generator:
qs_chunk.reindex_into_es()
cls.bind_alias(new_indicy, cls.get_read_alias_name())
if drop_old_index:
get_es_client().indices.delete(old_indicy)
def retrive_es_fields(self, only_include_fields=True):
"""
Returns the currently indexed fields within ES for the model.
"""
try:
results = get_es_client().get(
id=self.pk, index=self.get_read_alias_name(),
)
except NotFoundError:
raise ElasticSearchFailure(
f'Model {repr(self)} is not found in '
f'{self.get_index_base_name()}, model requires '
f'indexing to retrieve fields back.'
)
if only_include_fields:
return results['_source']
return results | django_elasticsearch_model_binder/models.py | from datetime import datetime, date
from uuid import uuid4
from django.db.models import Model
from elasticsearch.exceptions import NotFoundError
from django_elasticsearch_model_binder.exceptions import (
ElasticSearchFailure,
UnableToCastESNominatedFieldException,
UnableToDeleteModelFromElasticSearch,
UnableToSaveModelToElasticSearch,
)
from django_elasticsearch_model_binder.utils import (
build_document_from_model, get_es_client,
get_index_names_from_alias, queryset_iterator,
)
class ESBoundModel(Model):
"""
Mixin that binds a models nominated field to an Elasticsearch index.
Nominated fields will maintain persistency with the models existence
and configuration within the database.
"""
class Meta:
abstract = True
# Fields to be cached in ES.
es_cached_model_fields = []
# nonfields containing methods for custom field insertion.
es_cached_extra_fields = []
# Alias postfix values, used to decern write aliases from read.
es_index_alias_read_postfix = 'read'
es_index_alias_write_postfix = 'write'
@classmethod
def get_index_base_name(cls) -> str:
"""
Retrieve the model defined index name from self.index_name defaulting
to generated name based on app module directory and model name.
"""
if hasattr(cls, 'index_name'):
return cls.index_name
else:
return '-'.join(
cls.__module__.lower().split('.')
+ [cls.__name__.lower()]
)
@classmethod
def convert_model_field_to_es_format(cls, value):
"""
Helper method to cast an incoming value into a format that
is indexable within ElasticSearch. extend with your own super
implentation if there are custom types you'd like handled differently.
"""
if isinstance(value, Model):
return value.pk
elif isinstance(value, datetime) or isinstance(value, date):
return value.strftime('%d-%M-%Y %H:%M:%S')
elif isinstance(value, int) or isinstance(value, float):
return value
else:
# Catch all try to cast value to string raising
# an exception explicitly if that fails.
try:
return str(value)
except Exception as e:
raise UnableToCastESNominatedFieldException(e)
def save(self, *args, **kwargs):
"""
Override model save to index those fields nominated by
es_cached_model_fields storring them in elasticsearch.
"""
super().save(*args, **kwargs)
try:
get_es_client().index(
id=self.pk,
index=self.get_write_alias_name(),
body=build_document_from_model(self),
)
except Exception:
raise UnableToSaveModelToElasticSearch(
'Attempted to save/update the {} related es document '
'from index {}, please check your '
'connection and status of your ES cluster.'.format(
str(self), self.get_index_base_name()
)
)
def delete(self, *args, **kwargs):
"""
Same as save but in reverse, remove the model instances cached
fields in Elasticsearch.
"""
# We temporarily cache the model pk here so we can delete the model
# instance first before we remove from Elasticsearch.
author_document_id = self.pk
super().delete(*args, **kwargs)
try:
get_es_client().delete(
index=self.get_write_alias_name(), id=author_document_id,
)
except Exception:
# Catch failure and reraise with specific exception.
raise UnableToDeleteModelFromElasticSearch(
'Attempted to remove {} related es document '
'from index {}, please check your '
'connection and status of your ES cluster.'.format(
str(self), self.get_index_base_name()
)
)
@staticmethod
def get_index_mapping() -> dict:
"""
Stub mapping of how the index should be created, override this with
the specific implementation of what fields should be searchable
and how.
"""
return {'settings': {}, 'mappings': {}}
@classmethod
def get_read_alias_name(cls) -> str:
"""
Generates a unique alias name using either set explicitly by
overridding this method or in the default format of a
combination of {index_name}-read.
"""
return (
cls.get_index_base_name()
+ '-' + cls.es_index_alias_read_postfix
)
@classmethod
def get_write_alias_name(cls) -> str:
"""
Generates a unique alias name using either set explicitly by
overridding this method or in the default format of a
combination of {index_name}-write.
"""
return (
cls.get_index_base_name()
+ '-' + cls.es_index_alias_write_postfix
)
@classmethod
def generate_index(cls) -> str:
"""
Generates a new index in Elasticsearch for the
model returning the index name.
"""
index = cls.get_index_base_name() + '-' + uuid4().hex
get_es_client().indices.create(
index=index, body=cls.get_index_mapping()
)
return index
@classmethod
def bind_alias(cls, index: str, alias: str):
"""
Connect an alias to a specified index by default removes alias
from any other indices if present.
"""
old_indicy_names = []
if get_es_client().indices.exists_alias(name=alias):
old_indicy_names = get_index_names_from_alias(alias)
alias_updates = [
{'remove': {'index': indicy, 'alias': alias}}
for indicy in old_indicy_names
]
alias_updates.append({'add': {'index': index, 'alias': alias}})
get_es_client().indices.update_aliases(body={'actions': alias_updates})
@classmethod
def rebuild_es_index(cls, queryset=None, drop_old_index=True):
"""
Rebuilds the entire ESIndex for the model, utilizes Aliases to
preserve access to the old index while the new is being built.
By default will rebuild the entire database table in Elasticsearch,
define a queryset to only rebuild a slice of this.
Set drop_old_index to False if you want to preserve the old index for
future use, this will no longer have the aliases tied to it but will
still be accessable through the Elasticsearch API.
"""
old_indicy = get_index_names_from_alias(cls.get_read_alias_name())[0]
new_indicy = cls.generate_index()
cls.bind_alias(new_indicy, cls.get_write_alias_name())
chunked_qs_generator = queryset_iterator(queryset or cls.objects.all())
for qs_chunk in chunked_qs_generator:
qs_chunk.reindex_into_es()
cls.bind_alias(new_indicy, cls.get_read_alias_name())
if drop_old_index:
get_es_client().indices.delete(old_indicy)
def retrive_es_fields(self, only_include_fields=True):
"""
Returns the currently indexed fields within ES for the model.
"""
try:
results = get_es_client().get(
id=self.pk, index=self.get_read_alias_name(),
)
except NotFoundError:
raise ElasticSearchFailure(
f'Model {repr(self)} is not found in '
f'{self.get_index_base_name()}, model requires '
f'indexing to retrieve fields back.'
)
if only_include_fields:
return results['_source']
return results | 0.751375 | 0.162579 |
import numpy as np
import matplotlib.pyplot as plt
class SnapshotProcessing:
def __init__(self,y,chem_obj,grid):
self.y = y
self.y_eq = chem_obj.equilibrate()
self.gas = chem_obj.gas
self.grid = grid
self.x = np.zeros((self.gas.n_species,self.grid.N))
self.x_eq = self.x.copy()
for i in range(self.grid.N):
self.x[:,i] = self.y[:,i]/self.gas.molecular_weights*self.gas.mean_molecular_weight
self.x_eq[:,i] = self.y_eq[:,i]/self.gas.molecular_weights*self.gas.mean_molecular_weight
def Plot(self,species_names,output_fig_path):
n = len(species_names)
species_indices = []
for species_name in species_names:
species_indices.append(self.gas.species_index(species_name))
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(1,2,1)
ax1.plot(self.grid.T,self.grid.PG,'-',self.grid.T_ref,self.grid.PG_ref,'--')
ax1.set_yscale('log')
ax1.invert_yaxis()
ax1.set_xlabel('T(K)')
ax1.set_ylabel('P(Pa)')
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
color_index = 0
ax2 = fig.add_subplot(1,2,2)
for species_index, species_name in zip(species_indices,species_names):
ax2.plot(self.x[species_index,:],self.grid.PG,linestyle='-',color = colors[color_index],label = species_name)
ax2.plot(self.x_eq[species_index,:],self.grid.PG,linestyle='--',color = colors[color_index])
color_index = color_index + 1
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.invert_yaxis()
ax2.set_xlabel('X')
ax2.set_ylabel('P(Pa)')
ax2.legend(loc=3)
plt.savefig(output_fig_path)
print "snapshot saved successfully"
def SaveData(self,species_names,output_file_path,header_variable_names):
# change to mole fractions
x_selected_species = np.zeros((len(species_names),self.grid.N))
i=0
for species_name in species_names:
x_selected_species[i,:] = self.x[self.gas.species_index(species_name),:]
i=i+1
data = np.array([self.grid.z,self.grid.PG,self.grid.T])
data = np.transpose(np.concatenate((data,x_selected_species),axis=0))
np.savetxt(output_file_path,data,fmt='%.8e',header = header_variable_names)
print "File written successfully" | diffusion_kinetics/snapshot_processing.py | import numpy as np
import matplotlib.pyplot as plt
class SnapshotProcessing:
def __init__(self,y,chem_obj,grid):
self.y = y
self.y_eq = chem_obj.equilibrate()
self.gas = chem_obj.gas
self.grid = grid
self.x = np.zeros((self.gas.n_species,self.grid.N))
self.x_eq = self.x.copy()
for i in range(self.grid.N):
self.x[:,i] = self.y[:,i]/self.gas.molecular_weights*self.gas.mean_molecular_weight
self.x_eq[:,i] = self.y_eq[:,i]/self.gas.molecular_weights*self.gas.mean_molecular_weight
def Plot(self,species_names,output_fig_path):
n = len(species_names)
species_indices = []
for species_name in species_names:
species_indices.append(self.gas.species_index(species_name))
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(1,2,1)
ax1.plot(self.grid.T,self.grid.PG,'-',self.grid.T_ref,self.grid.PG_ref,'--')
ax1.set_yscale('log')
ax1.invert_yaxis()
ax1.set_xlabel('T(K)')
ax1.set_ylabel('P(Pa)')
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
color_index = 0
ax2 = fig.add_subplot(1,2,2)
for species_index, species_name in zip(species_indices,species_names):
ax2.plot(self.x[species_index,:],self.grid.PG,linestyle='-',color = colors[color_index],label = species_name)
ax2.plot(self.x_eq[species_index,:],self.grid.PG,linestyle='--',color = colors[color_index])
color_index = color_index + 1
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.invert_yaxis()
ax2.set_xlabel('X')
ax2.set_ylabel('P(Pa)')
ax2.legend(loc=3)
plt.savefig(output_fig_path)
print "snapshot saved successfully"
def SaveData(self,species_names,output_file_path,header_variable_names):
# change to mole fractions
x_selected_species = np.zeros((len(species_names),self.grid.N))
i=0
for species_name in species_names:
x_selected_species[i,:] = self.x[self.gas.species_index(species_name),:]
i=i+1
data = np.array([self.grid.z,self.grid.PG,self.grid.T])
data = np.transpose(np.concatenate((data,x_selected_species),axis=0))
np.savetxt(output_file_path,data,fmt='%.8e',header = header_variable_names)
print "File written successfully" | 0.386185 | 0.452838 |
import base64
import getopt
import os
import json
import re
import sys
import urllib
from urllib import request
import bakthread
import requests
banner = '''
_ _ _ _
| |_ ___ | |__ ___ | |_ ___ ___ | |__
| . \<_> || / // | '| . |/ ._>/ | '| / /
|___/<___||_\_\\_|_.|_|_|\___.\_|_.|_\_\\n
author:mfsva v 1.2
please input email,key
example:
python3 bakscan.py -s 'body="thinkphp" && after="2021-01-01"'
'''
headers = {
'Upgrade-Insecure-Requests': '1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
search = ''
def getParam(argv):
try:
opts, args = getopt.getopt(argv, "s:", ["ifile="])
except getopt.GetoptError:
print('bakscan.py -s <search>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(banner)
sys.exit()
elif opt in ("-s", "--Fofa 查询参数"):
search= arg
# print(search)
return search
def FofaSearch(name,size):
size=str(size)
email = ""
key = ""
b64 =base64.b64encode(name.encode('UTF-8'))
url="https://fofa.so/api/v1/search/all?email="+email+"&key="+key+"&size="+size+"&qbase64="+str(b64).replace("b'",'').replace("'",'').replace('=','%3D')
# print(url)
request = urllib.request.Request(url, headers=headers)
req = urllib.request.urlopen(request).read()
req = json.loads(req)
return req
def alive_cc(name):
return
def bakfilescan():
bakthread.run()
return
def setagreement(name):
if re.match(r'http', name):
return (name+"\n")
else:
return ("http://" + name+"\n"+"https://" + name+"\n")
# bakfilescan(n[0])
if __name__ == '__main__':
# parameter =
print(banner)
search=getParam(sys.argv[1:])
# print(search)
size = 10000
if os.path.exists("runoob.txt"):
os.remove("runoob.txt")
fo = open("runoob.txt", "w+")
try:
info = FofaSearch(search,size)
# print(info)
search_id = info['query']
search_size= str(info['size'])
search_results = info['results']
for n in search_results:
fo.seek(0, 2)
fo.write(setagreement(n[0]))
fo.close()
print("查询参数:"+search_id)
print("查询条数:" + search_size )
print("文件runoob.txt写入成功")
except BaseException :
sys.exit()
bakfilescan() | bakscan.py | import base64
import getopt
import os
import json
import re
import sys
import urllib
from urllib import request
import bakthread
import requests
banner = '''
_ _ _ _
| |_ ___ | |__ ___ | |_ ___ ___ | |__
| . \<_> || / // | '| . |/ ._>/ | '| / /
|___/<___||_\_\\_|_.|_|_|\___.\_|_.|_\_\\n
author:mfsva v 1.2
please input email,key
example:
python3 bakscan.py -s 'body="thinkphp" && after="2021-01-01"'
'''
headers = {
'Upgrade-Insecure-Requests': '1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
search = ''
def getParam(argv):
try:
opts, args = getopt.getopt(argv, "s:", ["ifile="])
except getopt.GetoptError:
print('bakscan.py -s <search>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(banner)
sys.exit()
elif opt in ("-s", "--Fofa 查询参数"):
search= arg
# print(search)
return search
def FofaSearch(name,size):
size=str(size)
email = ""
key = ""
b64 =base64.b64encode(name.encode('UTF-8'))
url="https://fofa.so/api/v1/search/all?email="+email+"&key="+key+"&size="+size+"&qbase64="+str(b64).replace("b'",'').replace("'",'').replace('=','%3D')
# print(url)
request = urllib.request.Request(url, headers=headers)
req = urllib.request.urlopen(request).read()
req = json.loads(req)
return req
def alive_cc(name):
return
def bakfilescan():
bakthread.run()
return
def setagreement(name):
if re.match(r'http', name):
return (name+"\n")
else:
return ("http://" + name+"\n"+"https://" + name+"\n")
# bakfilescan(n[0])
if __name__ == '__main__':
# parameter =
print(banner)
search=getParam(sys.argv[1:])
# print(search)
size = 10000
if os.path.exists("runoob.txt"):
os.remove("runoob.txt")
fo = open("runoob.txt", "w+")
try:
info = FofaSearch(search,size)
# print(info)
search_id = info['query']
search_size= str(info['size'])
search_results = info['results']
for n in search_results:
fo.seek(0, 2)
fo.write(setagreement(n[0]))
fo.close()
print("查询参数:"+search_id)
print("查询条数:" + search_size )
print("文件runoob.txt写入成功")
except BaseException :
sys.exit()
bakfilescan() | 0.035153 | 0.067824 |
from decimal import Decimal
class VaultHelper(object):
def __init__(self, context):
self.tenants = dict()
self.context = context
def reset(self):
self.tenants = dict()
def get_account(self, tenant, account):
if not self.account_exist(tenant, account):
return {}
return self.tenants[tenant][account]
def account_exist(self, tenant, account):
return tenant in self.tenants and account in self.tenants[tenant]
def create_account(self, tenant, account, format, currency, is_balance_check):
if self.account_exist(tenant, account):
return False
if not tenant in self.tenants:
self.tenants[tenant] = dict()
self.tenants[tenant][account] = {
'format': format,
'currency': currency,
'is_balance_check': is_balance_check,
'balance': Decimal('0'),
'blocking': Decimal('0'),
'promised': dict()
}
return True
def __process_promise_order(self, tenant, account, transaction, amount, currency):
if not self.account_exist(tenant, account):
return 'EE'
if transaction in self.tenants[tenant][account]['promised']:
return 'P1'
if currency != self.tenants[tenant][account]['currency']:
return 'P2 CURRENCY_MISMATCH'
want = Decimal(amount)
if self.tenants[tenant][account]['is_balance_check'] and (want + self.tenants[tenant][account]['balance']).is_signed():
return 'P2 INSUFFICIENT_FUNDS'
self.tenants[tenant][account]['promised'][transaction] = want
self.tenants[tenant][account]['balance'] += want
self.tenants[tenant][account]['blocking'] -= want
return 'P1'
def __process_commit_order(self, tenant, account, transaction):
if not self.account_exist(tenant, account):
return 'EE'
if not transaction in self.tenants[tenant][account]['promised']:
return 'C1'
promised = self.tenants[tenant][account]['promised'][transaction]
self.tenants[tenant][account]['blocking'] += promised
del self.tenants[tenant][account]['promised'][transaction]
return 'C1'
def __process_rollback_order(self, tenant, account, transaction):
if not self.account_exist(tenant, account):
return 'R1'
if not transaction in self.tenants[tenant][account]['promised']:
return 'R1'
promised = self.tenants[tenant][account]['promised'][transaction]
self.tenants[tenant][account]['balance'] -= promised
self.tenants[tenant][account]['blocking'] += promised
del self.tenants[tenant][account]['promised'][transaction]
return 'R1'
def process_account_event(self, tenant, account, kind, transaction, amount, currency):
if kind == 'NP':
return self.__process_promise_order(tenant, account, transaction, amount, currency)
elif kind == 'NC':
return self.__process_commit_order(tenant, account, transaction)
elif kind == 'NR':
return self.__process_rollback_order(tenant, account, transaction)
else:
return 'EE' | bbtest/helpers/vault.py |
from decimal import Decimal
class VaultHelper(object):
def __init__(self, context):
self.tenants = dict()
self.context = context
def reset(self):
self.tenants = dict()
def get_account(self, tenant, account):
if not self.account_exist(tenant, account):
return {}
return self.tenants[tenant][account]
def account_exist(self, tenant, account):
return tenant in self.tenants and account in self.tenants[tenant]
def create_account(self, tenant, account, format, currency, is_balance_check):
if self.account_exist(tenant, account):
return False
if not tenant in self.tenants:
self.tenants[tenant] = dict()
self.tenants[tenant][account] = {
'format': format,
'currency': currency,
'is_balance_check': is_balance_check,
'balance': Decimal('0'),
'blocking': Decimal('0'),
'promised': dict()
}
return True
def __process_promise_order(self, tenant, account, transaction, amount, currency):
if not self.account_exist(tenant, account):
return 'EE'
if transaction in self.tenants[tenant][account]['promised']:
return 'P1'
if currency != self.tenants[tenant][account]['currency']:
return 'P2 CURRENCY_MISMATCH'
want = Decimal(amount)
if self.tenants[tenant][account]['is_balance_check'] and (want + self.tenants[tenant][account]['balance']).is_signed():
return 'P2 INSUFFICIENT_FUNDS'
self.tenants[tenant][account]['promised'][transaction] = want
self.tenants[tenant][account]['balance'] += want
self.tenants[tenant][account]['blocking'] -= want
return 'P1'
def __process_commit_order(self, tenant, account, transaction):
if not self.account_exist(tenant, account):
return 'EE'
if not transaction in self.tenants[tenant][account]['promised']:
return 'C1'
promised = self.tenants[tenant][account]['promised'][transaction]
self.tenants[tenant][account]['blocking'] += promised
del self.tenants[tenant][account]['promised'][transaction]
return 'C1'
def __process_rollback_order(self, tenant, account, transaction):
if not self.account_exist(tenant, account):
return 'R1'
if not transaction in self.tenants[tenant][account]['promised']:
return 'R1'
promised = self.tenants[tenant][account]['promised'][transaction]
self.tenants[tenant][account]['balance'] -= promised
self.tenants[tenant][account]['blocking'] += promised
del self.tenants[tenant][account]['promised'][transaction]
return 'R1'
def process_account_event(self, tenant, account, kind, transaction, amount, currency):
if kind == 'NP':
return self.__process_promise_order(tenant, account, transaction, amount, currency)
elif kind == 'NC':
return self.__process_commit_order(tenant, account, transaction)
elif kind == 'NR':
return self.__process_rollback_order(tenant, account, transaction)
else:
return 'EE' | 0.613237 | 0.085633 |
import asyncio
import json
import random
import secrets
from email.message import EmailMessage
import aiosmtplib
import discord
from redbot.core import Config, commands
from redbot.core.data_manager import bundled_data_path, cog_data_path
from redbot.core.utils.chat_formatting import pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
class Verify(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=95932766180343808, force_registration=True)
self.config.register_global(
username=None, password=<PASSWORD>, verified_emails=[], welcome_messages=[]
)
self.config.register_user(code=None, verified=False, email=None, verified_by=None)
self._init_task = self.bot.loop.create_task(self.initialize())
async def initialize(self):
"""This will load all the bundled data into respective variables."""
await self.bot.wait_until_red_ready()
guild = self.bot.get_guild(713522800081764392)
self.roles = {
"case4": guild.get_role(713541535085494312),
"case3": guild.get_role(713541403904442438),
"case2": guild.get_role(713539660936118282),
"ca": guild.get_role(713538655817564250),
"case": guild.get_role(713538335984975943),
"alumni": guild.get_role(713538175456247828),
}
def cog_unload(self):
if self._init_task:
self._init_task.cancel()
@commands.command()
@commands.admin()
async def unverify(self, ctx, *, user: discord.User):
"""Unverify someone"""
data = await self.config.user(user).all()
if not data["verified"]:
return await ctx.send("This user isn't verified.")
async with self.config.verified_emails() as emails:
if data["email"] in emails:
emails.remove(data["email"])
await self.config.user(user).code.set(None)
await self.config.user(user).verified.set(False)
await self.config.user(user).email.set(None)
await ctx.send("User has been un-verified.")
@commands.group()
async def verify(self, ctx):
"""Verification process"""
pass
@verify.command(name="email")
@commands.dm_only()
async def verify_email(self, ctx, email: str):
"""Verify your DCU email"""
if email.lower().endswith("@dcu.ie"):
await (self.bot.get_channel(713522800081764395)).send(
f"{ctx.author} with the email {email} has tried to verify and can potentionally be a staff member."
)
return await ctx.send(
"An error occured trying to verify your account. This error has been raised to the mod team."
)
if not email.lower().endswith("@mail.dcu.ie"):
return await ctx.send("This doesn't seem to be a valid DCU email.")
if await self.config.user(ctx.author).verified():
await ctx.send("You have already been verified.")
await (self.bot.get_channel(713522800081764395)).send(
f"{ctx.author} with the email {email} has tried to verify with an email that has already been verified."
)
return
emails = await self.config.verified_emails()
if email in emails:
await ctx.send("This email has already been verified.")
return
code = secrets.token_hex(3)
await self.config.user(ctx.author).code.set(code)
await self.config.user(ctx.author).email.set(email)
await self.send_email(email, code)
await ctx.send(
f"You will recieve an email shortly. Once it arrived you may complete your verification process by typing:\n{ctx.clean_prefix}verify code <code from email>"
)
@verify.command(name="code")
@commands.dm_only()
async def verify_code(self, ctx, code):
"""Verify the code from your email"""
usercode = await self.config.user(ctx.author).code()
verified = await self.config.user(ctx.author).verified()
if verified:
await ctx.send("You are already verified.")
return
if usercode is None:
await ctx.send(
"You haven't started the verification process yet. Get started by invoking the .verify email command."
)
return
if code == usercode:
roles = []
verified = await self.config.user(ctx.author).verified.set(True)
await self.config.user(ctx.author).verified_by.set("System")
email = await self.config.user(ctx.author).email()
async with self.config.verified_emails() as emails:
emails.append(email)
guild = self.bot.get_guild(713522800081764392)
role = guild.get_role(713538570824187968)
user = guild.get_member(ctx.author.id)
mod, general = self.bot.get_channel(713522800081764395), self.bot.get_channel(
713524886840279042
)
greeting_msgs = await self.config.welcome_messages()
# Set user nickname to real name if not already there
user_email = await self.config.user(ctx.author).email()
first_name = user_email.split(".")[0]
name_len = 32 - len(f" ({first_name})")
name = user.display_name[:name_len] + f" ({first_name.title()})"
if first_name.lower() not in user.display_name.lower():
await user.edit(nick=name)
roles.append(role)
# Check a private cog with student data.
cog = self.bot.get_cog("Students")
rolemsg = "We were unable to determine your year of study. Please contact an admin to have a year role assigned to you."
if cog is not None:
if email.lower() in cog.students["ca"]:
rolemsg = "We've automatically determined you as a CA1 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["ca"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["case2"]:
rolemsg = "We've automatically determined you as a CASE2 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["case2"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["case3"]:
rolemsg = "We've automatically determined you as a CASE3 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["case3"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["case4"]:
rolemsg = "We've automatically determined you as a CASE4 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["case4"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["alumni"]:
rolemsg = "We've automatically determined you as an Alumni. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["alumni"])
roles.append(self.roles["case"])
# Add roles and greet
await user.add_roles(
*roles,
reason=f"Automatically verified - Email: {user_email}",
)
await ctx.send(f"Your account has been verified!\n{rolemsg}")
await mod.send(
f"User <@{user.id}> joined the server!",
allowed_mentions=discord.AllowedMentions(everyone=True),
)
await general.send(random.choice(greeting_msgs).format(name=f"<@{user.id}>"))
else:
await ctx.send(
"That code doesn't match the one sent via the email. Try again or request a new code."
)
@verify.command(name="other")
@commands.dm_only()
async def verify_other(self, ctx, *, message: str):
"""Verification process for external/alumni members."""
verified = await self.config.user(ctx.author).verified()
if verified:
await ctx.send("You are already verified.")
return
guild = self.bot.get_guild(713522800081764392)
channel = guild.get_channel(713522800081764395)
embed = discord.Embed(description=message, colour=discord.Color.red())
embed.set_author(name=f"{ctx.author} | {ctx.author.id}", icon_url=ctx.author.avatar_url)
await channel.send(embed=embed)
await ctx.send("Your verification request has been sent.")
@verify.command()
@commands.admin()
async def user(self, ctx, type: str, *, user: discord.Member):
"""Verify a user.
Valid types are internal, external and alumni."""
if ctx.guild.id != 713522800081764392:
await ctx.send("This must be used in the CASE++ server.")
if type.lower() == "external":
roles = [
ctx.guild.get_role(713538609017258025),
ctx.guild.get_role(713538570824187968),
]
elif type.lower() == "internal":
roles = [ctx.guild.get_role(713538570824187968)]
elif type.lower() == "alumni":
roles = [ctx.guild.get_role(713538175456247828)]
else:
await ctx.send("Type must be internal or external.")
return
await user.add_roles(*roles, reason=f"Manually verified by: {ctx.author}")
await self.config.user(user).verified_by.set(ctx.author.name)
await self.config.user(user).verified.set(True)
await self.config.user(user).email.set(type.title())
await user.send(f"Your account has been verified on CASE++ by {ctx.author}")
await ctx.tick()
@commands.is_owner()
@commands.command()
@commands.dm_only()
async def verifyset(self, ctx, email, password):
"""Credential settings"""
await self.config.username.set(email)
await self.config.password.set(password)
await ctx.tick()
async def send_email(self, email, code):
message = EmailMessage()
message["From"] = "<EMAIL>"
message["To"] = email
message["Subject"] = "Discord Verification"
message.set_content(code)
await aiosmtplib.send(
message,
recipients=[email],
hostname="smtp.gmail.com",
port=465,
username=await self.config.username(),
password=await self.config.password(),
use_tls=True,
)
@commands.command()
@commands.admin()
async def profile(self, ctx, user: discord.Member):
"""Show a users profile information."""
embed = discord.Embed(color=user.color, title=f"Profile for {user}")
useri = await self.config.user(user).verified_by()
verif = await self.config.user(user).verified()
email = await self.config.user(user).email()
embed.add_field(name="Verified", value=str(verif))
if not verif:
await ctx.send(embed=embed)
return
veri_by = useri if useri is not None else "None"
emaill = email if email is not None else "None"
embed.add_field(name="Verified By", value=veri_by)
embed.add_field(name="Email", value=emaill)
await ctx.send(embed=embed)
@commands.command()
@commands.admin()
async def addwelcomemsg(self, ctx, *, msgtoadd: str):
"""Add welcome message strings to existing list"""
if "{name}" not in msgtoadd:
await ctx.send(
"String must contain the phrase '{name}' to format in place of the users' username."
)
return
await ctx.send(
"Please confirm that the greeting message is valid with a 'yes' or 'no': \n\n{}".format(
msgtoadd
)
)
try:
pred = MessagePredicate.yes_or_no(ctx, user=ctx.author)
await ctx.bot.wait_for("message", check=pred, timeout=20)
except asyncio.TimeoutError:
await ctx.send("Exiting operation.")
return
if pred.result:
async with self.config.welcome_messages() as messages:
messages.append(msgtoadd)
await ctx.send("Appended greeting message to existing list successfully!")
else:
await ctx.send("Operation cancelled.")
@commands.command()
@commands.admin()
async def listmessages(self, ctx):
"""List welcome messages."""
msgs = await self.config.welcome_messages()
if not msgs:
return await ctx.send("No custom responses available.")
a = chunks(msgs, 10)
embeds = []
i = 0
for item in a:
items = []
for strings in item:
items.append(f"Reply {i}: {strings}")
i += 1
embed = discord.Embed(colour=discord.Color.red(), description="\n".join(items))
embeds.append(embed)
if len(embeds) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.command()
@commands.admin()
async def removemessage(self, ctx, index: int):
"""Remove a message by reply ID"""
async with self.config.welcome_messages() as msgs:
if index + 1 > len(msgs):
return await ctx.send("Not a valid ID!")
msgs.pop(index)
await ctx.tick()
@commands.command()
@commands.admin()
async def recheck(self, ctx):
"""Recheck users roles."""
async with ctx.typing():
rolesa = {
"case4": ctx.guild.get_role(713541535085494312),
"case3": ctx.guild.get_role(713541403904442438),
"case2": ctx.guild.get_role(713539660936118282),
"ca": ctx.guild.get_role(713538655817564250),
"case": ctx.guild.get_role(713538335984975943),
}
msg = ""
for user in ctx.guild.members:
if not await self.config.user(user).verified():
continue
email = await self.config.user(user).email()
cogs = self.bot.get_cog("Students")
roles = []
if cogs is not None:
if email.lower() in cogs.students["ca"]:
roles.append(rolesa["ca"])
roles.append(rolesa["case"])
elif email.lower() in cogs.students["case2"]:
roles.append(rolesa["case2"])
roles.append(rolesa["case"])
elif email.lower() in cogs.students["case3"]:
roles.append(rolesa["case3"])
roles.append(rolesa["case"])
elif email.lower() in cogs.students["case4"]:
roles.append(rolesa["case4"])
roles.append(rolesa["case"])
if roles:
removed_roles = [
role
for role in user.roles
if role not in roles and role in rolesa.values()
]
await user.remove_roles(*removed_roles)
await user.add_roles(*roles, reason="updated")
msg += (
f"Updated {user}s roles - New roles: {','.join([x.name for x in roles])}\n"
)
if msg:
for page in pagify(msg):
await ctx.send(page)
else:
await ctx.send("No users updated") | verify/verify.py | import asyncio
import json
import random
import secrets
from email.message import EmailMessage
import aiosmtplib
import discord
from redbot.core import Config, commands
from redbot.core.data_manager import bundled_data_path, cog_data_path
from redbot.core.utils.chat_formatting import pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
class Verify(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=95932766180343808, force_registration=True)
self.config.register_global(
username=None, password=<PASSWORD>, verified_emails=[], welcome_messages=[]
)
self.config.register_user(code=None, verified=False, email=None, verified_by=None)
self._init_task = self.bot.loop.create_task(self.initialize())
async def initialize(self):
"""This will load all the bundled data into respective variables."""
await self.bot.wait_until_red_ready()
guild = self.bot.get_guild(713522800081764392)
self.roles = {
"case4": guild.get_role(713541535085494312),
"case3": guild.get_role(713541403904442438),
"case2": guild.get_role(713539660936118282),
"ca": guild.get_role(713538655817564250),
"case": guild.get_role(713538335984975943),
"alumni": guild.get_role(713538175456247828),
}
def cog_unload(self):
if self._init_task:
self._init_task.cancel()
@commands.command()
@commands.admin()
async def unverify(self, ctx, *, user: discord.User):
"""Unverify someone"""
data = await self.config.user(user).all()
if not data["verified"]:
return await ctx.send("This user isn't verified.")
async with self.config.verified_emails() as emails:
if data["email"] in emails:
emails.remove(data["email"])
await self.config.user(user).code.set(None)
await self.config.user(user).verified.set(False)
await self.config.user(user).email.set(None)
await ctx.send("User has been un-verified.")
@commands.group()
async def verify(self, ctx):
"""Verification process"""
pass
@verify.command(name="email")
@commands.dm_only()
async def verify_email(self, ctx, email: str):
"""Verify your DCU email"""
if email.lower().endswith("@dcu.ie"):
await (self.bot.get_channel(713522800081764395)).send(
f"{ctx.author} with the email {email} has tried to verify and can potentionally be a staff member."
)
return await ctx.send(
"An error occured trying to verify your account. This error has been raised to the mod team."
)
if not email.lower().endswith("@mail.dcu.ie"):
return await ctx.send("This doesn't seem to be a valid DCU email.")
if await self.config.user(ctx.author).verified():
await ctx.send("You have already been verified.")
await (self.bot.get_channel(713522800081764395)).send(
f"{ctx.author} with the email {email} has tried to verify with an email that has already been verified."
)
return
emails = await self.config.verified_emails()
if email in emails:
await ctx.send("This email has already been verified.")
return
code = secrets.token_hex(3)
await self.config.user(ctx.author).code.set(code)
await self.config.user(ctx.author).email.set(email)
await self.send_email(email, code)
await ctx.send(
f"You will recieve an email shortly. Once it arrived you may complete your verification process by typing:\n{ctx.clean_prefix}verify code <code from email>"
)
@verify.command(name="code")
@commands.dm_only()
async def verify_code(self, ctx, code):
"""Verify the code from your email"""
usercode = await self.config.user(ctx.author).code()
verified = await self.config.user(ctx.author).verified()
if verified:
await ctx.send("You are already verified.")
return
if usercode is None:
await ctx.send(
"You haven't started the verification process yet. Get started by invoking the .verify email command."
)
return
if code == usercode:
roles = []
verified = await self.config.user(ctx.author).verified.set(True)
await self.config.user(ctx.author).verified_by.set("System")
email = await self.config.user(ctx.author).email()
async with self.config.verified_emails() as emails:
emails.append(email)
guild = self.bot.get_guild(713522800081764392)
role = guild.get_role(713538570824187968)
user = guild.get_member(ctx.author.id)
mod, general = self.bot.get_channel(713522800081764395), self.bot.get_channel(
713524886840279042
)
greeting_msgs = await self.config.welcome_messages()
# Set user nickname to real name if not already there
user_email = await self.config.user(ctx.author).email()
first_name = user_email.split(".")[0]
name_len = 32 - len(f" ({first_name})")
name = user.display_name[:name_len] + f" ({first_name.title()})"
if first_name.lower() not in user.display_name.lower():
await user.edit(nick=name)
roles.append(role)
# Check a private cog with student data.
cog = self.bot.get_cog("Students")
rolemsg = "We were unable to determine your year of study. Please contact an admin to have a year role assigned to you."
if cog is not None:
if email.lower() in cog.students["ca"]:
rolemsg = "We've automatically determined you as a CA1 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["ca"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["case2"]:
rolemsg = "We've automatically determined you as a CASE2 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["case2"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["case3"]:
rolemsg = "We've automatically determined you as a CASE3 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["case3"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["case4"]:
rolemsg = "We've automatically determined you as a CASE4 student. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["case4"])
roles.append(self.roles["case"])
elif email.lower() in cog.students["alumni"]:
rolemsg = "We've automatically determined you as an Alumni. If this is an error, you can correct this by contacting an admin."
roles.append(self.roles["alumni"])
roles.append(self.roles["case"])
# Add roles and greet
await user.add_roles(
*roles,
reason=f"Automatically verified - Email: {user_email}",
)
await ctx.send(f"Your account has been verified!\n{rolemsg}")
await mod.send(
f"User <@{user.id}> joined the server!",
allowed_mentions=discord.AllowedMentions(everyone=True),
)
await general.send(random.choice(greeting_msgs).format(name=f"<@{user.id}>"))
else:
await ctx.send(
"That code doesn't match the one sent via the email. Try again or request a new code."
)
@verify.command(name="other")
@commands.dm_only()
async def verify_other(self, ctx, *, message: str):
"""Verification process for external/alumni members."""
verified = await self.config.user(ctx.author).verified()
if verified:
await ctx.send("You are already verified.")
return
guild = self.bot.get_guild(713522800081764392)
channel = guild.get_channel(713522800081764395)
embed = discord.Embed(description=message, colour=discord.Color.red())
embed.set_author(name=f"{ctx.author} | {ctx.author.id}", icon_url=ctx.author.avatar_url)
await channel.send(embed=embed)
await ctx.send("Your verification request has been sent.")
@verify.command()
@commands.admin()
async def user(self, ctx, type: str, *, user: discord.Member):
"""Verify a user.
Valid types are internal, external and alumni."""
if ctx.guild.id != 713522800081764392:
await ctx.send("This must be used in the CASE++ server.")
if type.lower() == "external":
roles = [
ctx.guild.get_role(713538609017258025),
ctx.guild.get_role(713538570824187968),
]
elif type.lower() == "internal":
roles = [ctx.guild.get_role(713538570824187968)]
elif type.lower() == "alumni":
roles = [ctx.guild.get_role(713538175456247828)]
else:
await ctx.send("Type must be internal or external.")
return
await user.add_roles(*roles, reason=f"Manually verified by: {ctx.author}")
await self.config.user(user).verified_by.set(ctx.author.name)
await self.config.user(user).verified.set(True)
await self.config.user(user).email.set(type.title())
await user.send(f"Your account has been verified on CASE++ by {ctx.author}")
await ctx.tick()
@commands.is_owner()
@commands.command()
@commands.dm_only()
async def verifyset(self, ctx, email, password):
"""Credential settings"""
await self.config.username.set(email)
await self.config.password.set(password)
await ctx.tick()
async def send_email(self, email, code):
message = EmailMessage()
message["From"] = "<EMAIL>"
message["To"] = email
message["Subject"] = "Discord Verification"
message.set_content(code)
await aiosmtplib.send(
message,
recipients=[email],
hostname="smtp.gmail.com",
port=465,
username=await self.config.username(),
password=await self.config.password(),
use_tls=True,
)
@commands.command()
@commands.admin()
async def profile(self, ctx, user: discord.Member):
"""Show a users profile information."""
embed = discord.Embed(color=user.color, title=f"Profile for {user}")
useri = await self.config.user(user).verified_by()
verif = await self.config.user(user).verified()
email = await self.config.user(user).email()
embed.add_field(name="Verified", value=str(verif))
if not verif:
await ctx.send(embed=embed)
return
veri_by = useri if useri is not None else "None"
emaill = email if email is not None else "None"
embed.add_field(name="Verified By", value=veri_by)
embed.add_field(name="Email", value=emaill)
await ctx.send(embed=embed)
@commands.command()
@commands.admin()
async def addwelcomemsg(self, ctx, *, msgtoadd: str):
"""Add welcome message strings to existing list"""
if "{name}" not in msgtoadd:
await ctx.send(
"String must contain the phrase '{name}' to format in place of the users' username."
)
return
await ctx.send(
"Please confirm that the greeting message is valid with a 'yes' or 'no': \n\n{}".format(
msgtoadd
)
)
try:
pred = MessagePredicate.yes_or_no(ctx, user=ctx.author)
await ctx.bot.wait_for("message", check=pred, timeout=20)
except asyncio.TimeoutError:
await ctx.send("Exiting operation.")
return
if pred.result:
async with self.config.welcome_messages() as messages:
messages.append(msgtoadd)
await ctx.send("Appended greeting message to existing list successfully!")
else:
await ctx.send("Operation cancelled.")
@commands.command()
@commands.admin()
async def listmessages(self, ctx):
"""List welcome messages."""
msgs = await self.config.welcome_messages()
if not msgs:
return await ctx.send("No custom responses available.")
a = chunks(msgs, 10)
embeds = []
i = 0
for item in a:
items = []
for strings in item:
items.append(f"Reply {i}: {strings}")
i += 1
embed = discord.Embed(colour=discord.Color.red(), description="\n".join(items))
embeds.append(embed)
if len(embeds) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.command()
@commands.admin()
async def removemessage(self, ctx, index: int):
"""Remove a message by reply ID"""
async with self.config.welcome_messages() as msgs:
if index + 1 > len(msgs):
return await ctx.send("Not a valid ID!")
msgs.pop(index)
await ctx.tick()
@commands.command()
@commands.admin()
async def recheck(self, ctx):
"""Recheck users roles."""
async with ctx.typing():
rolesa = {
"case4": ctx.guild.get_role(713541535085494312),
"case3": ctx.guild.get_role(713541403904442438),
"case2": ctx.guild.get_role(713539660936118282),
"ca": ctx.guild.get_role(713538655817564250),
"case": ctx.guild.get_role(713538335984975943),
}
msg = ""
for user in ctx.guild.members:
if not await self.config.user(user).verified():
continue
email = await self.config.user(user).email()
cogs = self.bot.get_cog("Students")
roles = []
if cogs is not None:
if email.lower() in cogs.students["ca"]:
roles.append(rolesa["ca"])
roles.append(rolesa["case"])
elif email.lower() in cogs.students["case2"]:
roles.append(rolesa["case2"])
roles.append(rolesa["case"])
elif email.lower() in cogs.students["case3"]:
roles.append(rolesa["case3"])
roles.append(rolesa["case"])
elif email.lower() in cogs.students["case4"]:
roles.append(rolesa["case4"])
roles.append(rolesa["case"])
if roles:
removed_roles = [
role
for role in user.roles
if role not in roles and role in rolesa.values()
]
await user.remove_roles(*removed_roles)
await user.add_roles(*roles, reason="updated")
msg += (
f"Updated {user}s roles - New roles: {','.join([x.name for x in roles])}\n"
)
if msg:
for page in pagify(msg):
await ctx.send(page)
else:
await ctx.send("No users updated") | 0.468061 | 0.102125 |
import argparse
import sys
import random
from pathlib import Path
import sampling.conll as conll
import sampling.wikiner as wikiner
import sampling.wikinews as wikinews
import sampling.text as text
import sampling.apil as apil
def guess_format(pathname):
if pathname.is_dir():
return "wikinews"
if str(pathname).endswith(".conllu") or str(pathname).endswith(".conllu.txt"):
return "conllu"
if pathname.suffix == ".txt":
return "text"
raise ValueError("Unhandled file format: {}".format(pathname.suffix))
def main(infilename, corpus_format="guess", sample_size=1000, output_dir="."):
infilepath = Path(infilename)
if corpus_format == "guess":
corpus_format = guess_format(infilepath)
PN_tag = {"conllu": "PROPN", "wikiner": "NAM"}
PN = PN_tag.get(corpus_format, "")
basename = None
if corpus_format == "conllu":
corpus = conll.read_corpus(infilename)
elif corpus_format == "wikiner":
corpus = wikiner.read_corpus(infilename)
elif corpus_format == "wikinews":
corpus = wikinews.read_corpus(infilepath)
basename = "{}-{}".format(Path(infilepath.parent).stem, infilepath.stem)
elif corpus_format == "text":
corpus = text.read_corpus(infilename)
elif corpus_format == "apil":
corpus = apil.read_corpus(infilename)
random.shuffle(corpus)
n_toks = 0
selected = []
while n_toks < sample_size:
selected.append(corpus.pop())
n_toks += len(selected[-1])
basename = basename or infilepath.stem
textfile = Path(output_dir) / (basename + ".sample.txt")
with open(textfile, "w") as output_stream:
for sentence in selected:
output_stream.write(f"{sentence.text}\n")
idfile = Path(output_dir) / (basename + ".ids.txt")
with open(idfile, "w") as output_stream:
for sentence in selected:
output_stream.write(f"{sentence.id}\n")
reportfile = Path(output_dir) / (basename + ".report.txt")
with open(reportfile, "w") as output_stream:
n_sents = len(selected)
n_propn = sum(sent.count_pos(PN) for sent in selected)
output_stream.write(f"{n_sents} sentences\n")
output_stream.write(f"{n_toks} tokens\n")
if PN:
output_stream.write(f"{n_propn} proper nouns\n")
else:
output_stream.write(f"No POS tags available.\n")
def parse_cl(argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"infilename",
type=str,
help="The input file name."
)
parser.add_argument(
"-f", "--corpus-format",
choices=("guess", "conllu", "wikinews", "wikiner", "text", "apil"),
default="guess",
help="The format of the corpus."
)
parser.add_argument(
"-s", "--sample-size",
type=int,
default=1000,
help="The size of the sample in number of tokens (default: %(default)s)."
)
parser.add_argument(
"-o", "--output-dir",
type=str,
default=".",
help="Output directory."
)
args = parser.parse_args(argv)
main(**vars(args))
if __name__ == "__main__":
parse_cl()
sys.exit(0) | sampling/sample.py | import argparse
import sys
import random
from pathlib import Path
import sampling.conll as conll
import sampling.wikiner as wikiner
import sampling.wikinews as wikinews
import sampling.text as text
import sampling.apil as apil
def guess_format(pathname):
if pathname.is_dir():
return "wikinews"
if str(pathname).endswith(".conllu") or str(pathname).endswith(".conllu.txt"):
return "conllu"
if pathname.suffix == ".txt":
return "text"
raise ValueError("Unhandled file format: {}".format(pathname.suffix))
def main(infilename, corpus_format="guess", sample_size=1000, output_dir="."):
infilepath = Path(infilename)
if corpus_format == "guess":
corpus_format = guess_format(infilepath)
PN_tag = {"conllu": "PROPN", "wikiner": "NAM"}
PN = PN_tag.get(corpus_format, "")
basename = None
if corpus_format == "conllu":
corpus = conll.read_corpus(infilename)
elif corpus_format == "wikiner":
corpus = wikiner.read_corpus(infilename)
elif corpus_format == "wikinews":
corpus = wikinews.read_corpus(infilepath)
basename = "{}-{}".format(Path(infilepath.parent).stem, infilepath.stem)
elif corpus_format == "text":
corpus = text.read_corpus(infilename)
elif corpus_format == "apil":
corpus = apil.read_corpus(infilename)
random.shuffle(corpus)
n_toks = 0
selected = []
while n_toks < sample_size:
selected.append(corpus.pop())
n_toks += len(selected[-1])
basename = basename or infilepath.stem
textfile = Path(output_dir) / (basename + ".sample.txt")
with open(textfile, "w") as output_stream:
for sentence in selected:
output_stream.write(f"{sentence.text}\n")
idfile = Path(output_dir) / (basename + ".ids.txt")
with open(idfile, "w") as output_stream:
for sentence in selected:
output_stream.write(f"{sentence.id}\n")
reportfile = Path(output_dir) / (basename + ".report.txt")
with open(reportfile, "w") as output_stream:
n_sents = len(selected)
n_propn = sum(sent.count_pos(PN) for sent in selected)
output_stream.write(f"{n_sents} sentences\n")
output_stream.write(f"{n_toks} tokens\n")
if PN:
output_stream.write(f"{n_propn} proper nouns\n")
else:
output_stream.write(f"No POS tags available.\n")
def parse_cl(argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"infilename",
type=str,
help="The input file name."
)
parser.add_argument(
"-f", "--corpus-format",
choices=("guess", "conllu", "wikinews", "wikiner", "text", "apil"),
default="guess",
help="The format of the corpus."
)
parser.add_argument(
"-s", "--sample-size",
type=int,
default=1000,
help="The size of the sample in number of tokens (default: %(default)s)."
)
parser.add_argument(
"-o", "--output-dir",
type=str,
default=".",
help="Output directory."
)
args = parser.parse_args(argv)
main(**vars(args))
if __name__ == "__main__":
parse_cl()
sys.exit(0) | 0.298287 | 0.202621 |
import pytest
from flask import url_for
from . import days_from_now_millis
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated(client, config, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection")
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) <= config["PAGE_SIZE"]
assert "pagination" in rv.json
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_archive(client, config, event_factory):
event_factory.create_batch(5, with_archived=True)
url = url_for("api.event_collection", current="n")
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) <= config["PAGE_SIZE"]
assert rv.json["pagination"]["numPages"] == 3
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_last_page(client, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection", page=3)
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) == 1
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_past_lastpage(client, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection", page=4)
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) == 0
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_wrong_page(client, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection", page="invalid")
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) == 2
def test_create_event(client, login, user_factory):
email = "<EMAIL>"
password = "<PASSWORD>"
user_factory(email=email, password=password)
tokens = login(client, email, password)
headers = {"X-CSRF-TOKEN": tokens.csrf_access_token}
url = url_for("api.event_collection")
data = {
"name": "event name",
"location": "Brok",
"date": days_from_now_millis(16),
"length": 20,
}
rv = client.post(url, json=data, headers=headers)
assert rv.status_code == 201
assert "item" in rv.json | tests/test_resource_event_collection.py | import pytest
from flask import url_for
from . import days_from_now_millis
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated(client, config, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection")
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) <= config["PAGE_SIZE"]
assert "pagination" in rv.json
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_archive(client, config, event_factory):
event_factory.create_batch(5, with_archived=True)
url = url_for("api.event_collection", current="n")
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) <= config["PAGE_SIZE"]
assert rv.json["pagination"]["numPages"] == 3
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_last_page(client, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection", page=3)
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) == 1
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_past_lastpage(client, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection", page=4)
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) == 0
@pytest.mark.options(PAGE_SIZE=2)
def test_get_paginated_wrong_page(client, event_factory):
event_factory.create_batch(5)
url = url_for("api.event_collection", page="invalid")
rv = client.get(url)
assert rv.status_code == 200
assert len(rv.json["collection"]) == 2
def test_create_event(client, login, user_factory):
email = "<EMAIL>"
password = "<PASSWORD>"
user_factory(email=email, password=password)
tokens = login(client, email, password)
headers = {"X-CSRF-TOKEN": tokens.csrf_access_token}
url = url_for("api.event_collection")
data = {
"name": "event name",
"location": "Brok",
"date": days_from_now_millis(16),
"length": 20,
}
rv = client.post(url, json=data, headers=headers)
assert rv.status_code == 201
assert "item" in rv.json | 0.420957 | 0.390185 |
from datetime import date, datetime, timezone
from unittest import TestCase
import pandas
from freezegun import freeze_time
from pandas.testing import assert_frame_equal
from petri_dish.app import Dish
from petri_dish.connectors import DummyConnector
class GetAllSubjectsTestCase(TestCase):
@freeze_time('2017-10-17 17:21')
def setUp(self):
self.empty_source = DummyConnector(
pandas.DataFrame(
columns=[
'id',
'name',
'dob',
'colour',
],
)
)
self.empty_sink = DummyConnector(
pandas.DataFrame(
columns=[
'id',
'name',
'dob',
'colour',
Dish.GROUP_COLUMN_NAME,
Dish.STAGE_COLUMN_NAME,
Dish.JOINED_COLUMN_NAME,
],
)
)
self.partial_source = DummyConnector(
pandas.DataFrame({
'id': [1, 7],
'name': ['Alice', 'Bob'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
],
'colour': ['Purple', 'Red'],
})
)
self.grouped_sink = DummyConnector(
pandas.DataFrame({
'id': [1, 7],
'name': ['Alice', 'Bob'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
],
'colour': ['Purple', 'Red'],
Dish.GROUP_COLUMN_NAME: ['A', 'B'],
Dish.STAGE_COLUMN_NAME: ['stage1', 'stage3'],
Dish.JOINED_COLUMN_NAME: [
datetime(2017, 9, 30, 12, 30, tzinfo=timezone.utc),
datetime(2017, 10, 1, tzinfo=timezone.utc),
],
})
)
@freeze_time('2017-10-17 17:21')
def test_new_subjects_only(self):
now = datetime.now(timezone.utc)
dish = Dish(
subject_source=DummyConnector(),
subject_sink=self.empty_sink,
group_balancer=None,
stages=1,
)
subjects = dish.get_all_subjects()
expected = pandas.DataFrame({
'id': [1, 7, 100, 18],
'name': ['Alice', 'Bob', 'Charlie', 'Dave'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
date(2010, 1, 1),
date(1985, 1, 1),
],
'colour': ['Purple', 'Red', 'Blue', 'Green'],
Dish.GROUP_COLUMN_NAME: [None, None, None, None],
Dish.STAGE_COLUMN_NAME: [None, None, None, None],
Dish.JOINED_COLUMN_NAME: [now, now, now, now],
})
assert_frame_equal(subjects, expected, check_like=True)
def test_grouped_subjects_only(self):
dish = Dish(
subject_source=self.partial_source,
subject_sink=self.grouped_sink,
group_balancer=None,
stages=1,
)
subjects = dish.get_all_subjects()
expected = pandas.DataFrame({
'id': [1, 7],
'name': ['Alice', 'Bob'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
],
'colour': ['Purple', 'Red'],
Dish.GROUP_COLUMN_NAME: ['A', 'B'],
Dish.STAGE_COLUMN_NAME: ['stage1', 'stage3'],
Dish.JOINED_COLUMN_NAME: [
datetime(2017, 9, 30, 12, 30, tzinfo=timezone.utc),
datetime(2017, 10, 1, tzinfo=timezone.utc),
],
})
assert_frame_equal(subjects, expected, check_like=True)
@freeze_time('2017-10-17 17:21')
def test_mixed_subjects(self):
now = datetime.now(timezone.utc)
dish = Dish(
subject_source=DummyConnector(),
subject_sink=self.grouped_sink,
group_balancer=None,
stages=1,
)
subjects = dish.get_all_subjects()
expected = pandas.DataFrame({
'id': [1, 7, 100, 18],
'name': ['Alice', 'Bob', 'Charlie', 'Dave'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
date(2010, 1, 1),
date(1985, 1, 1),
],
'colour': ['Purple', 'Red', 'Blue', 'Green'],
Dish.GROUP_COLUMN_NAME: ['A', 'B', None, None],
Dish.STAGE_COLUMN_NAME: ['stage1', 'stage3', None, None],
Dish.JOINED_COLUMN_NAME: [
datetime(2017, 9, 30, 12, 30, tzinfo=timezone.utc),
datetime(2017, 10, 1, tzinfo=timezone.utc),
now,
now,
],
})
assert_frame_equal(subjects, expected, check_like=True) | tests/test_app.py | from datetime import date, datetime, timezone
from unittest import TestCase
import pandas
from freezegun import freeze_time
from pandas.testing import assert_frame_equal
from petri_dish.app import Dish
from petri_dish.connectors import DummyConnector
class GetAllSubjectsTestCase(TestCase):
@freeze_time('2017-10-17 17:21')
def setUp(self):
self.empty_source = DummyConnector(
pandas.DataFrame(
columns=[
'id',
'name',
'dob',
'colour',
],
)
)
self.empty_sink = DummyConnector(
pandas.DataFrame(
columns=[
'id',
'name',
'dob',
'colour',
Dish.GROUP_COLUMN_NAME,
Dish.STAGE_COLUMN_NAME,
Dish.JOINED_COLUMN_NAME,
],
)
)
self.partial_source = DummyConnector(
pandas.DataFrame({
'id': [1, 7],
'name': ['Alice', 'Bob'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
],
'colour': ['Purple', 'Red'],
})
)
self.grouped_sink = DummyConnector(
pandas.DataFrame({
'id': [1, 7],
'name': ['Alice', 'Bob'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
],
'colour': ['Purple', 'Red'],
Dish.GROUP_COLUMN_NAME: ['A', 'B'],
Dish.STAGE_COLUMN_NAME: ['stage1', 'stage3'],
Dish.JOINED_COLUMN_NAME: [
datetime(2017, 9, 30, 12, 30, tzinfo=timezone.utc),
datetime(2017, 10, 1, tzinfo=timezone.utc),
],
})
)
@freeze_time('2017-10-17 17:21')
def test_new_subjects_only(self):
now = datetime.now(timezone.utc)
dish = Dish(
subject_source=DummyConnector(),
subject_sink=self.empty_sink,
group_balancer=None,
stages=1,
)
subjects = dish.get_all_subjects()
expected = pandas.DataFrame({
'id': [1, 7, 100, 18],
'name': ['Alice', 'Bob', 'Charlie', 'Dave'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
date(2010, 1, 1),
date(1985, 1, 1),
],
'colour': ['Purple', 'Red', 'Blue', 'Green'],
Dish.GROUP_COLUMN_NAME: [None, None, None, None],
Dish.STAGE_COLUMN_NAME: [None, None, None, None],
Dish.JOINED_COLUMN_NAME: [now, now, now, now],
})
assert_frame_equal(subjects, expected, check_like=True)
def test_grouped_subjects_only(self):
dish = Dish(
subject_source=self.partial_source,
subject_sink=self.grouped_sink,
group_balancer=None,
stages=1,
)
subjects = dish.get_all_subjects()
expected = pandas.DataFrame({
'id': [1, 7],
'name': ['Alice', 'Bob'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
],
'colour': ['Purple', 'Red'],
Dish.GROUP_COLUMN_NAME: ['A', 'B'],
Dish.STAGE_COLUMN_NAME: ['stage1', 'stage3'],
Dish.JOINED_COLUMN_NAME: [
datetime(2017, 9, 30, 12, 30, tzinfo=timezone.utc),
datetime(2017, 10, 1, tzinfo=timezone.utc),
],
})
assert_frame_equal(subjects, expected, check_like=True)
@freeze_time('2017-10-17 17:21')
def test_mixed_subjects(self):
now = datetime.now(timezone.utc)
dish = Dish(
subject_source=DummyConnector(),
subject_sink=self.grouped_sink,
group_balancer=None,
stages=1,
)
subjects = dish.get_all_subjects()
expected = pandas.DataFrame({
'id': [1, 7, 100, 18],
'name': ['Alice', 'Bob', 'Charlie', 'Dave'],
'dob': [
date(1997, 1, 1),
date(1990, 1, 1),
date(2010, 1, 1),
date(1985, 1, 1),
],
'colour': ['Purple', 'Red', 'Blue', 'Green'],
Dish.GROUP_COLUMN_NAME: ['A', 'B', None, None],
Dish.STAGE_COLUMN_NAME: ['stage1', 'stage3', None, None],
Dish.JOINED_COLUMN_NAME: [
datetime(2017, 9, 30, 12, 30, tzinfo=timezone.utc),
datetime(2017, 10, 1, tzinfo=timezone.utc),
now,
now,
],
})
assert_frame_equal(subjects, expected, check_like=True) | 0.692122 | 0.327144 |
from typing import Any, ByteString
from aimm.plugins import common
from aimm.plugins import decorators
def exec_data_access(name: str,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to access data"""
plugin = decorators.get_data_access(name)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
return plugin.function(*args, **kwargs)
def exec_instantiate(model_type: str,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to create a model instance"""
plugin = decorators.get_instantiate(model_type)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
return plugin.function(*args, **kwargs)
def exec_fit(model_type: str,
instance: Any,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to fit a model instance"""
plugin = decorators.get_fit(model_type)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
args, kwargs = _args_add_instance(plugin.instance_arg_name, instance, args,
kwargs)
return plugin.function(*args, **kwargs)
def exec_predict(model_type: str,
instance: Any,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to perform a prediction with a given model
instance"""
plugin = decorators.get_predict(model_type)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
args, kwargs = _args_add_instance(plugin.instance_arg_name, instance, args,
kwargs)
return plugin.function(*args, **kwargs)
def exec_serialize(model_type: str,
instance: Any) -> ByteString:
"""Uses a loaded plugin to convert model into bytes"""
plugin = decorators.get_serialize(model_type)
return plugin.function(instance)
def exec_deserialize(model_type: str,
instance_bytes: ByteString) -> Any:
"""Uses a loaded plugin to convert bytes into a model instance"""
plugin = decorators.get_deserialize(model_type)
return plugin.function(instance_bytes)
def _kwargs_add_state_cb(state_cb_arg_name, cb, kwargs):
if state_cb_arg_name:
if state_cb_arg_name in kwargs:
raise Exception('state cb already set')
kwargs = dict(kwargs, **{state_cb_arg_name: cb})
return kwargs
def _args_add_instance(instance_arg_name, instance, args, kwargs):
if instance_arg_name:
if instance_arg_name in kwargs:
raise Exception('instance already set')
kwargs = dict(kwargs, **{instance_arg_name: instance})
return args, kwargs
return (instance, *args), kwargs | aimm/plugins/execute.py | from typing import Any, ByteString
from aimm.plugins import common
from aimm.plugins import decorators
def exec_data_access(name: str,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to access data"""
plugin = decorators.get_data_access(name)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
return plugin.function(*args, **kwargs)
def exec_instantiate(model_type: str,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to create a model instance"""
plugin = decorators.get_instantiate(model_type)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
return plugin.function(*args, **kwargs)
def exec_fit(model_type: str,
instance: Any,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to fit a model instance"""
plugin = decorators.get_fit(model_type)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
args, kwargs = _args_add_instance(plugin.instance_arg_name, instance, args,
kwargs)
return plugin.function(*args, **kwargs)
def exec_predict(model_type: str,
instance: Any,
state_cb: common.StateCallback = lambda state: None,
*args: Any,
**kwargs: Any) -> Any:
"""Uses a loaded plugin to perform a prediction with a given model
instance"""
plugin = decorators.get_predict(model_type)
kwargs = _kwargs_add_state_cb(plugin.state_cb_arg_name, state_cb, kwargs)
args, kwargs = _args_add_instance(plugin.instance_arg_name, instance, args,
kwargs)
return plugin.function(*args, **kwargs)
def exec_serialize(model_type: str,
instance: Any) -> ByteString:
"""Uses a loaded plugin to convert model into bytes"""
plugin = decorators.get_serialize(model_type)
return plugin.function(instance)
def exec_deserialize(model_type: str,
instance_bytes: ByteString) -> Any:
"""Uses a loaded plugin to convert bytes into a model instance"""
plugin = decorators.get_deserialize(model_type)
return plugin.function(instance_bytes)
def _kwargs_add_state_cb(state_cb_arg_name, cb, kwargs):
if state_cb_arg_name:
if state_cb_arg_name in kwargs:
raise Exception('state cb already set')
kwargs = dict(kwargs, **{state_cb_arg_name: cb})
return kwargs
def _args_add_instance(instance_arg_name, instance, args, kwargs):
if instance_arg_name:
if instance_arg_name in kwargs:
raise Exception('instance already set')
kwargs = dict(kwargs, **{instance_arg_name: instance})
return args, kwargs
return (instance, *args), kwargs | 0.848549 | 0.231028 |
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Car(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
name = models.CharField(max_length = 300)
image = models.ImageField(upload_to='carimage/', null=True)
description = models.CharField(max_length = 300,default='car!!!')
rating = models.CharField(max_length = 30, default = 0)
av_usability = models.CharField(max_length = 30, default = 0)
av_design = models.CharField(max_length = 30, default = 0)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE,related_name='profile')
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
bio = models.CharField(max_length=100)
profile_pic = models.ImageField(upload_to='profile/')
pub_date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.first_name
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
@classmethod
def get_profiles(cls):
profiles = cls.objects.all()
return profiles
class Location(models.Model):
name = models.CharField(max_length=30)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=30)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def __str__(self):
return self.name
class Rating(models.Model):
car_name = models.CharField(max_length = 30, default = '')
poster = models.ForeignKey(User,on_delete=models.CASCADE)
usability = models.IntegerField(choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)), blank=True)
design = models.IntegerField(choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)), blank=True)
def __str__(self):
return self.poster
average = models.IntegerField(blank = True, default=0)
class CarEvaluate(models.Model):
evaluater = models.CharField(default='My Project', max_length = 80)
evaluated = models.CharField(default='My Project', max_length = 80)
published_date = models.DateField(auto_now_add=True, null=True)
design = models.PositiveIntegerField(default=1, choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)))
usability = models.PositiveIntegerField(default=1, choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)))
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return f'{self.design} marks' | carsell/models.py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Car(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
name = models.CharField(max_length = 300)
image = models.ImageField(upload_to='carimage/', null=True)
description = models.CharField(max_length = 300,default='car!!!')
rating = models.CharField(max_length = 30, default = 0)
av_usability = models.CharField(max_length = 30, default = 0)
av_design = models.CharField(max_length = 30, default = 0)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE,related_name='profile')
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
bio = models.CharField(max_length=100)
profile_pic = models.ImageField(upload_to='profile/')
pub_date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.first_name
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
@classmethod
def get_profiles(cls):
profiles = cls.objects.all()
return profiles
class Location(models.Model):
name = models.CharField(max_length=30)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=30)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def __str__(self):
return self.name
class Rating(models.Model):
car_name = models.CharField(max_length = 30, default = '')
poster = models.ForeignKey(User,on_delete=models.CASCADE)
usability = models.IntegerField(choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)), blank=True)
design = models.IntegerField(choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)), blank=True)
def __str__(self):
return self.poster
average = models.IntegerField(blank = True, default=0)
class CarEvaluate(models.Model):
evaluater = models.CharField(default='My Project', max_length = 80)
evaluated = models.CharField(default='My Project', max_length = 80)
published_date = models.DateField(auto_now_add=True, null=True)
design = models.PositiveIntegerField(default=1, choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)))
usability = models.PositiveIntegerField(default=1, choices=((1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6), (7, 7),(8, 8), (9, 9), (10, 10)))
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return f'{self.design} marks' | 0.589953 | 0.17824 |
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import AbstractAsyncContextManager
import random
from typing import Any, cast
from aiohttp import ClientResponse, ClientSession, ClientTimeout
from .consts import API_URL, LOGIN_KEY, TIMEOUT
from .exceptions import (
SleepIQAPIException,
SleepIQLoginException,
SleepIQTimeoutException,
)
def random_user_agent() -> str:
"""Create a randomly generated sorta valid User Agent string."""
uas = {
"Edge": (
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/98.0.4758.80 Safari/537.36 Edg/98.0.1108.43"
),
"Chrome": (
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/97.0.4692.99 Safari/537.36"
),
"Firefox": "Gecko/20100101 Firefox/96.0",
"iphone": (
"AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/15.2 Mobile/15E148 Safari/604.1"
),
"Safari": (
"AppleWebKit/605.1.15 (KHTML, like Gecko) " "Version/11.1.2 Safari/605.1.15"
),
}
os = {
"windows": "Windows NT 10.0; Win64; x64",
"iphone": "iPhone; CPU iPhone OS 15_2_1 like Mac OS X",
"mac": "Macintosh; Intel Mac OS X 10_11_6",
}
template = "Mozilla/5.0 ({os}) {ua}"
return template.format(
os=random.choice(list(os.values())), ua=random.choice(list(uas.values()))
)
class SleepIQAPI:
"""API interface base class."""
def __init__(
self,
email: str | None = None,
password: str | None = None,
login_method: int = LOGIN_KEY,
client_session: ClientSession | None = None,
) -> None:
"""Initialize AsyncSleepIQ API Interface."""
self.email = email
self.password = password
self.key = ""
self._session = client_session or ClientSession()
self._headers = {"User-Agent": random_user_agent()}
self._login_method = login_method
async def close_session(self) -> None:
"""Close the API session."""
if self._session:
await self._session.close()
async def login(
self, email: str | None = None, password: str | None = None
) -> None:
"""Login using the with the email/password provided or stored."""
if not email:
email = self.email
if not password:
password = <PASSWORD>
if not email or not password:
raise SleepIQLoginException("username/password not set")
try:
if self._login_method == LOGIN_KEY:
await self.login_key(email, password)
else:
await self.login_cookie(email, password)
except asyncio.TimeoutError as ex:
# timed out
raise SleepIQTimeoutException("API call timed out") from ex
except SleepIQTimeoutException as ex:
raise ex
except Exception as ex:
raise SleepIQLoginException(f"Connection failure: {ex}") from ex
# store in case we need to login again
self.email = email
self.password = password
async def login_key(self, email: str, password: str) -> None:
"""Login using the key authentication method with the email/password provided."""
self.key = ""
auth_data = {"login": email, "password": password}
async with self._session.put(
API_URL + "/login", headers=self._headers, timeout=TIMEOUT, json=auth_data
) as resp:
if resp.status == 401:
raise SleepIQLoginException("Incorrect username or password")
if resp.status == 403:
raise SleepIQLoginException(
"User Agent is blocked. May need to update GenUserAgent data?"
)
if resp.status not in (200, 201):
raise SleepIQLoginException(
"Unexpected response code: {code}\n{body}".format(
code=resp.status,
body=resp.text,
)
)
json = await resp.json()
self.key = json["key"]
async def login_cookie(self, email: str, password: str) -> None:
"""Login using the cookie authentication method with the email/password provided."""
auth_data = {
"Email": email,
"Password": password,
"ClientID": "2oa5825venq9kek1dnrhfp7rdh",
}
async with self._session.post(
"https://l06it26kuh.execute-api.us-east-1.amazonaws.com/Prod/v1/token",
headers=self._headers,
timeout=TIMEOUT,
json=auth_data,
) as resp:
if resp.status == 401:
raise SleepIQLoginException("Incorrect username or password")
if resp.status == 403:
raise SleepIQLoginException(
"User Agent is blocked. May need to update GenUserAgent data?"
)
if resp.status not in (200, 201):
raise SleepIQLoginException(
"Unexpected response code: {code}\n{body}".format(
code=resp.status,
body=resp.text,
)
)
json = await resp.json()
token = json["data"]["AccessToken"]
self._headers["Authorization"] = token
async with self._session.get(
API_URL + "/user/jwt", headers=self._headers, timeout=TIMEOUT
) as resp:
if resp.status not in (200, 201):
raise SleepIQLoginException(
"Unexpected response code: {code}\n{body}".format(
code=resp.status,
body=resp.text,
)
)
async def put(
self, url: str, json: dict[str, Any] = {}, params: dict[str, Any] = {}
) -> None:
"""Make a PUT request to the API."""
await self.__make_request(self._session.put, url, json, params)
async def get(
self, url: str, json: dict[str, Any] = {}, params: dict[str, Any] = {}
) -> dict[str, Any] | Any:
"""Make a GET request to the API."""
return await self.__make_request(self._session.get, url, json, params)
async def check(
self, url: str, json: dict[str, Any] = {}, params: dict[str, Any] = {}
) -> bool:
"""Check if a GET request to the API would be successful."""
return cast(
bool,
await self.__make_request(self._session.get, url, json, params, check=True),
)
async def __make_request(
self,
make_request: Callable[..., AbstractAsyncContextManager[ClientResponse]],
url: str,
json: dict[str, Any] = {},
params: dict[str, Any] = {},
retry: bool = True,
check: bool = False,
) -> bool | dict[str, Any] | Any:
"""Make a request to the API."""
timeout = ClientTimeout(total=TIMEOUT)
params["_k"] = self.key
try:
async with make_request(
API_URL + "/" + url,
headers=self._headers,
timeout=timeout,
json=json,
params=params,
) as resp:
if check:
return resp.status == 200
if resp.status != 200:
if retry and resp.status in (401, 404):
# login and try again
await self.login()
return await self.__make_request(
make_request, url, json, params, False
)
raise SleepIQAPIException(
f"API call error response {resp.status}\n{resp.text}"
)
return await resp.json()
except asyncio.TimeoutError as ex:
# timed out
raise SleepIQTimeoutException("API call timed out") from ex | asyncsleepiq/api.py | from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import AbstractAsyncContextManager
import random
from typing import Any, cast
from aiohttp import ClientResponse, ClientSession, ClientTimeout
from .consts import API_URL, LOGIN_KEY, TIMEOUT
from .exceptions import (
SleepIQAPIException,
SleepIQLoginException,
SleepIQTimeoutException,
)
def random_user_agent() -> str:
"""Create a randomly generated sorta valid User Agent string."""
uas = {
"Edge": (
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/98.0.4758.80 Safari/537.36 Edg/98.0.1108.43"
),
"Chrome": (
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/97.0.4692.99 Safari/537.36"
),
"Firefox": "Gecko/20100101 Firefox/96.0",
"iphone": (
"AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/15.2 Mobile/15E148 Safari/604.1"
),
"Safari": (
"AppleWebKit/605.1.15 (KHTML, like Gecko) " "Version/11.1.2 Safari/605.1.15"
),
}
os = {
"windows": "Windows NT 10.0; Win64; x64",
"iphone": "iPhone; CPU iPhone OS 15_2_1 like Mac OS X",
"mac": "Macintosh; Intel Mac OS X 10_11_6",
}
template = "Mozilla/5.0 ({os}) {ua}"
return template.format(
os=random.choice(list(os.values())), ua=random.choice(list(uas.values()))
)
class SleepIQAPI:
"""API interface base class."""
def __init__(
self,
email: str | None = None,
password: str | None = None,
login_method: int = LOGIN_KEY,
client_session: ClientSession | None = None,
) -> None:
"""Initialize AsyncSleepIQ API Interface."""
self.email = email
self.password = password
self.key = ""
self._session = client_session or ClientSession()
self._headers = {"User-Agent": random_user_agent()}
self._login_method = login_method
async def close_session(self) -> None:
"""Close the API session."""
if self._session:
await self._session.close()
async def login(
self, email: str | None = None, password: str | None = None
) -> None:
"""Login using the with the email/password provided or stored."""
if not email:
email = self.email
if not password:
password = <PASSWORD>
if not email or not password:
raise SleepIQLoginException("username/password not set")
try:
if self._login_method == LOGIN_KEY:
await self.login_key(email, password)
else:
await self.login_cookie(email, password)
except asyncio.TimeoutError as ex:
# timed out
raise SleepIQTimeoutException("API call timed out") from ex
except SleepIQTimeoutException as ex:
raise ex
except Exception as ex:
raise SleepIQLoginException(f"Connection failure: {ex}") from ex
# store in case we need to login again
self.email = email
self.password = password
async def login_key(self, email: str, password: str) -> None:
"""Login using the key authentication method with the email/password provided."""
self.key = ""
auth_data = {"login": email, "password": password}
async with self._session.put(
API_URL + "/login", headers=self._headers, timeout=TIMEOUT, json=auth_data
) as resp:
if resp.status == 401:
raise SleepIQLoginException("Incorrect username or password")
if resp.status == 403:
raise SleepIQLoginException(
"User Agent is blocked. May need to update GenUserAgent data?"
)
if resp.status not in (200, 201):
raise SleepIQLoginException(
"Unexpected response code: {code}\n{body}".format(
code=resp.status,
body=resp.text,
)
)
json = await resp.json()
self.key = json["key"]
async def login_cookie(self, email: str, password: str) -> None:
"""Login using the cookie authentication method with the email/password provided."""
auth_data = {
"Email": email,
"Password": password,
"ClientID": "2oa5825venq9kek1dnrhfp7rdh",
}
async with self._session.post(
"https://l06it26kuh.execute-api.us-east-1.amazonaws.com/Prod/v1/token",
headers=self._headers,
timeout=TIMEOUT,
json=auth_data,
) as resp:
if resp.status == 401:
raise SleepIQLoginException("Incorrect username or password")
if resp.status == 403:
raise SleepIQLoginException(
"User Agent is blocked. May need to update GenUserAgent data?"
)
if resp.status not in (200, 201):
raise SleepIQLoginException(
"Unexpected response code: {code}\n{body}".format(
code=resp.status,
body=resp.text,
)
)
json = await resp.json()
token = json["data"]["AccessToken"]
self._headers["Authorization"] = token
async with self._session.get(
API_URL + "/user/jwt", headers=self._headers, timeout=TIMEOUT
) as resp:
if resp.status not in (200, 201):
raise SleepIQLoginException(
"Unexpected response code: {code}\n{body}".format(
code=resp.status,
body=resp.text,
)
)
async def put(
self, url: str, json: dict[str, Any] = {}, params: dict[str, Any] = {}
) -> None:
"""Make a PUT request to the API."""
await self.__make_request(self._session.put, url, json, params)
async def get(
self, url: str, json: dict[str, Any] = {}, params: dict[str, Any] = {}
) -> dict[str, Any] | Any:
"""Make a GET request to the API."""
return await self.__make_request(self._session.get, url, json, params)
async def check(
self, url: str, json: dict[str, Any] = {}, params: dict[str, Any] = {}
) -> bool:
"""Check if a GET request to the API would be successful."""
return cast(
bool,
await self.__make_request(self._session.get, url, json, params, check=True),
)
async def __make_request(
self,
make_request: Callable[..., AbstractAsyncContextManager[ClientResponse]],
url: str,
json: dict[str, Any] = {},
params: dict[str, Any] = {},
retry: bool = True,
check: bool = False,
) -> bool | dict[str, Any] | Any:
"""Make a request to the API."""
timeout = ClientTimeout(total=TIMEOUT)
params["_k"] = self.key
try:
async with make_request(
API_URL + "/" + url,
headers=self._headers,
timeout=timeout,
json=json,
params=params,
) as resp:
if check:
return resp.status == 200
if resp.status != 200:
if retry and resp.status in (401, 404):
# login and try again
await self.login()
return await self.__make_request(
make_request, url, json, params, False
)
raise SleepIQAPIException(
f"API call error response {resp.status}\n{resp.text}"
)
return await resp.json()
except asyncio.TimeoutError as ex:
# timed out
raise SleepIQTimeoutException("API call timed out") from ex | 0.730001 | 0.055209 |
from math import sqrt
from typing import Dict, Union
import pandas as pd
from gs_quant.api.gs.data import GsDataApi
from gs_quant.data.core import DataContext
from gs_quant.datetime import date
from gs_quant.errors import MqValueError
from gs_quant.models.risk_model import FactorRiskModel, ReturnFormat
from gs_quant.target.data import DataQuery
class Factor:
def __init__(self, risk_model_id: str, factor_name: str):
risk_model = FactorRiskModel(risk_model_id)
factor_data = risk_model.get_factor_data(format=ReturnFormat.JSON)
name_matches = [factor for factor in factor_data if factor['name'] == factor_name]
if not name_matches:
raise MqValueError(f'Factor with name {factor_name} does not in exist in risk model {risk_model_id}')
factor = name_matches.pop()
self.__risk_model_id: str = risk_model_id
self.__id = factor['identifier']
self.__name: str = factor['name']
self.__type: str = factor['type']
self.__category: str = factor.get('factorCategory')
@property
def id(self):
return self.__id
@property
def name(self):
return self.__name
@property
def type(self):
return self.__type
@property
def category(self):
return self.__category
@property
def risk_model_id(self):
return self.__risk_model_id
def covariance(self,
factor,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->covariance values between this factor and another for a date
range """
covariance_data_raw = GsDataApi.execute_query(
'RISK_MODEL_COVARIANCE_MATRIX',
DataQuery(
where={"riskModel": self.risk_model_id, "factorId": self.id},
start_date=start_date,
end_date=end_date
)
).get('data', [])
date_to_matrix_order = factor.__matrix_order(start_date, end_date)
covariance_data = {}
for data in covariance_data_raw:
date = data['date']
if date_to_matrix_order.get(date):
matrix_order_on_date = date_to_matrix_order[date]
covariance_data[date] = data[matrix_order_on_date]
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(covariance_data, orient='index', columns=['covariance'])
return covariance_data
def variance(self,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->variance values for a factor over a date range """
variance_data = self.covariance(self, start_date, end_date, ReturnFormat.JSON)
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(variance_data, orient='index', columns=['variance'])
return variance_data
def volatility(self,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->volatility values for a factor over a date range """
variance = self.variance(start_date, end_date, ReturnFormat.JSON)
volatility_data = {k: sqrt(v) for k, v in variance.items()}
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(volatility_data, orient='index', columns=['volatility'])
return volatility_data
def correlation(self,
other_factor,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->correlation values between this factor and another for a date
range """
factor_vol = self.volatility(start_date, end_date, ReturnFormat.JSON)
other_factor_vol = other_factor.volatility(start_date, end_date, ReturnFormat.JSON)
covariance = self.covariance(other_factor, start_date, end_date, ReturnFormat.JSON)
correlation_data = {}
for _date, covar in covariance.items():
if _date in factor_vol and _date in other_factor_vol:
denominator = factor_vol[_date] * other_factor_vol[_date]
if denominator != 0:
correlation_data[_date] = covar / denominator
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(correlation_data, orient='index', columns=['correlation'])
return correlation_data
def __matrix_order(self, start_date: date, end_date: date) -> Dict:
""" Retrieve Dictionary of date->matrix_order for the factor in the covariance matrix """
query_results = GsDataApi.execute_query(
'RISK_MODEL_COVARIANCE_MATRIX',
DataQuery(
where={"riskModel": self.risk_model_id, "factorId": self.id},
fields=['matrixOrder'],
start_date=start_date,
end_date=end_date
)
).get('data', [])
return {data['date']: str(data['matrixOrder']) for data in query_results} | gs_quant/markets/factor.py | from math import sqrt
from typing import Dict, Union
import pandas as pd
from gs_quant.api.gs.data import GsDataApi
from gs_quant.data.core import DataContext
from gs_quant.datetime import date
from gs_quant.errors import MqValueError
from gs_quant.models.risk_model import FactorRiskModel, ReturnFormat
from gs_quant.target.data import DataQuery
class Factor:
def __init__(self, risk_model_id: str, factor_name: str):
risk_model = FactorRiskModel(risk_model_id)
factor_data = risk_model.get_factor_data(format=ReturnFormat.JSON)
name_matches = [factor for factor in factor_data if factor['name'] == factor_name]
if not name_matches:
raise MqValueError(f'Factor with name {factor_name} does not in exist in risk model {risk_model_id}')
factor = name_matches.pop()
self.__risk_model_id: str = risk_model_id
self.__id = factor['identifier']
self.__name: str = factor['name']
self.__type: str = factor['type']
self.__category: str = factor.get('factorCategory')
@property
def id(self):
return self.__id
@property
def name(self):
return self.__name
@property
def type(self):
return self.__type
@property
def category(self):
return self.__category
@property
def risk_model_id(self):
return self.__risk_model_id
def covariance(self,
factor,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->covariance values between this factor and another for a date
range """
covariance_data_raw = GsDataApi.execute_query(
'RISK_MODEL_COVARIANCE_MATRIX',
DataQuery(
where={"riskModel": self.risk_model_id, "factorId": self.id},
start_date=start_date,
end_date=end_date
)
).get('data', [])
date_to_matrix_order = factor.__matrix_order(start_date, end_date)
covariance_data = {}
for data in covariance_data_raw:
date = data['date']
if date_to_matrix_order.get(date):
matrix_order_on_date = date_to_matrix_order[date]
covariance_data[date] = data[matrix_order_on_date]
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(covariance_data, orient='index', columns=['covariance'])
return covariance_data
def variance(self,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->variance values for a factor over a date range """
variance_data = self.covariance(self, start_date, end_date, ReturnFormat.JSON)
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(variance_data, orient='index', columns=['variance'])
return variance_data
def volatility(self,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->volatility values for a factor over a date range """
variance = self.variance(start_date, end_date, ReturnFormat.JSON)
volatility_data = {k: sqrt(v) for k, v in variance.items()}
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(volatility_data, orient='index', columns=['volatility'])
return volatility_data
def correlation(self,
other_factor,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->correlation values between this factor and another for a date
range """
factor_vol = self.volatility(start_date, end_date, ReturnFormat.JSON)
other_factor_vol = other_factor.volatility(start_date, end_date, ReturnFormat.JSON)
covariance = self.covariance(other_factor, start_date, end_date, ReturnFormat.JSON)
correlation_data = {}
for _date, covar in covariance.items():
if _date in factor_vol and _date in other_factor_vol:
denominator = factor_vol[_date] * other_factor_vol[_date]
if denominator != 0:
correlation_data[_date] = covar / denominator
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(correlation_data, orient='index', columns=['correlation'])
return correlation_data
def __matrix_order(self, start_date: date, end_date: date) -> Dict:
""" Retrieve Dictionary of date->matrix_order for the factor in the covariance matrix """
query_results = GsDataApi.execute_query(
'RISK_MODEL_COVARIANCE_MATRIX',
DataQuery(
where={"riskModel": self.risk_model_id, "factorId": self.id},
fields=['matrixOrder'],
start_date=start_date,
end_date=end_date
)
).get('data', [])
return {data['date']: str(data['matrixOrder']) for data in query_results} | 0.914367 | 0.36108 |
from worker import Crawler
import pymysql
from selenium import webdriver
baseUrl = "http://www.sxfj.gov.cn/"
indexUrl = baseUrl + "PageShowNext.aspx?ID=24"
browser = webdriver.Chrome("d:/chromedriver.exe")
class HNCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(indexUrl)
a = soup.find("div", attrs={"style": "float:left;width:15%;padding-top:6px;"})
href = a.text[4:]
return int(href)
def get_index(self):
return indexUrl
def join_url(self, i):
url = baseUrl + "articles/news/subindex/ID:24/page:" + str(i+1)
return url
def get_urls(self, url):
soup = self.get_soup(url)
lists = soup.find("ul", id="li")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get("href")[1:]
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
browser.get(url)
info_result.url = url
title = browser.find_element_by_class_name("title")
info_result.title = title.text
info_result.time = browser.find_element_by_id("edate").text
info_result.source = browser.find_element_by_id("efrom").text
article = browser.find_element_by_id("frameContent")
ps = article.find_elements_by_tag_name("p")
text = ""
for p in ps:
text = text + p.text.replace("\t", "") + "\n"
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "湖南"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "").replace("发表时间", "")
info.postion = "审查调查"
return info
c = HNCrawler()
conns = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='<PASSWORD>', db='data', charset='utf8')
c.start(conns)
conns.close()
browser.quit()
# print(c.get_num())
# print(c.join_url(1))
# print(c.get_urls("http://www.sxfj.gov.cn/articles/news/subindex/ID:24/page:10"))
# c.get_info("http://www.hbjwjc.gov.cn/ajcc/101809.htm") | worker/HuNanCrawler.py | from worker import Crawler
import pymysql
from selenium import webdriver
baseUrl = "http://www.sxfj.gov.cn/"
indexUrl = baseUrl + "PageShowNext.aspx?ID=24"
browser = webdriver.Chrome("d:/chromedriver.exe")
class HNCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(indexUrl)
a = soup.find("div", attrs={"style": "float:left;width:15%;padding-top:6px;"})
href = a.text[4:]
return int(href)
def get_index(self):
return indexUrl
def join_url(self, i):
url = baseUrl + "articles/news/subindex/ID:24/page:" + str(i+1)
return url
def get_urls(self, url):
soup = self.get_soup(url)
lists = soup.find("ul", id="li")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get("href")[1:]
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
browser.get(url)
info_result.url = url
title = browser.find_element_by_class_name("title")
info_result.title = title.text
info_result.time = browser.find_element_by_id("edate").text
info_result.source = browser.find_element_by_id("efrom").text
article = browser.find_element_by_id("frameContent")
ps = article.find_elements_by_tag_name("p")
text = ""
for p in ps:
text = text + p.text.replace("\t", "") + "\n"
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "湖南"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "").replace("发表时间", "")
info.postion = "审查调查"
return info
c = HNCrawler()
conns = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='<PASSWORD>', db='data', charset='utf8')
c.start(conns)
conns.close()
browser.quit()
# print(c.get_num())
# print(c.join_url(1))
# print(c.get_urls("http://www.sxfj.gov.cn/articles/news/subindex/ID:24/page:10"))
# c.get_info("http://www.hbjwjc.gov.cn/ajcc/101809.htm") | 0.168241 | 0.067824 |
from pyvdp.visadirect import VisaDirectDispatcher
def send(data):
"""Submits a MultiPullFundsTransactions (AFT) request.
:param data: **Required**.
Instance of :func:`~pyvdp.visadirect.fundstransfer.MultiPullFundsTransactionsModel`.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect import CardAcceptorModel
from pyvdp.visadirect.fundstransfer import multipullfundstransactions, MultiPullFundsTransactionsModel
address_kwargs = {
"country": "USA",
"county": "San Mateo",
"state": "CA",
"zipCode": "94404"
}
card_acceptor_kwargs = {
"address": CardAcceptorModel.CardAcceptorAddress(**ca_address_kwargs),
"idCode": "ABCD1234ABCD123",
"name": "Visa Inc. USA-Foster City",
"terminalId": "ABCD1234"
}
request = {
"amount": 124.02,
"cardAcceptor": CardAcceptorModel(**card_acceptor_kwargs),
"cavv": "0700020718799100000002980179911000000000",
"localTransactionDateTime": "2017-04-20T05:16:05",
"retrievalReferenceNumber": "401010101011",
"senderCardExpiryDate": "2020-12",
"senderCurrencyCode": "USD",
"senderPrimaryAccountNumber": "4895140000066666",
"systemsTraceAuditNumber": "101011"
}
data_kwargs = {
"acquirerCountryCode": "608",
"acquiringBin": "408999",
"businessApplicationId": "AA",
"localTransactionDateTime": "2017-04-20T05:16:05",
"merchantCategoryCode": "6012",
"request": [
request
]
}
data = MultiPullFundsTransactionsModel(**data_kwargs)
result = multipullfundstransactions.send(data)
print(result)
"""
c = VisaDirectDispatcher(resource='visadirect',
api='fundstransfer',
method='multipullfundstransactions',
http_verb='POST',
data=data)
return c.send()
def get(status_id):
"""Fetches a status of previously submitted :func:`~pyvdp.visadirect.fundstransfer.multipullfundstransactions`
request.
Returns a status of :func:`~pyvdp.visadirect.fundstransfer.MultiPullFundsTransactionsModel` request by
transaction identifier, returned with 202 response.
:param str status_id: **Required**. Transaction status identifier.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect.fundstransfer import multipullfundstransactions
status_id = '1491819372_186_81_l73c003_VDP_ARM'
result = multipullfundstransactions.get(status_id)
print(result)
"""
query_string = '/' + status_id
c = VisaDirectDispatcher(resource='visadirect',
api='fundstransfer',
method='multipullfundstransactions',
http_verb='GET',
query_string=query_string)
return c.send() | pyvdp/visadirect/fundstransfer/multipullfundstransactions.py | from pyvdp.visadirect import VisaDirectDispatcher
def send(data):
"""Submits a MultiPullFundsTransactions (AFT) request.
:param data: **Required**.
Instance of :func:`~pyvdp.visadirect.fundstransfer.MultiPullFundsTransactionsModel`.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect import CardAcceptorModel
from pyvdp.visadirect.fundstransfer import multipullfundstransactions, MultiPullFundsTransactionsModel
address_kwargs = {
"country": "USA",
"county": "San Mateo",
"state": "CA",
"zipCode": "94404"
}
card_acceptor_kwargs = {
"address": CardAcceptorModel.CardAcceptorAddress(**ca_address_kwargs),
"idCode": "ABCD1234ABCD123",
"name": "Visa Inc. USA-Foster City",
"terminalId": "ABCD1234"
}
request = {
"amount": 124.02,
"cardAcceptor": CardAcceptorModel(**card_acceptor_kwargs),
"cavv": "0700020718799100000002980179911000000000",
"localTransactionDateTime": "2017-04-20T05:16:05",
"retrievalReferenceNumber": "401010101011",
"senderCardExpiryDate": "2020-12",
"senderCurrencyCode": "USD",
"senderPrimaryAccountNumber": "4895140000066666",
"systemsTraceAuditNumber": "101011"
}
data_kwargs = {
"acquirerCountryCode": "608",
"acquiringBin": "408999",
"businessApplicationId": "AA",
"localTransactionDateTime": "2017-04-20T05:16:05",
"merchantCategoryCode": "6012",
"request": [
request
]
}
data = MultiPullFundsTransactionsModel(**data_kwargs)
result = multipullfundstransactions.send(data)
print(result)
"""
c = VisaDirectDispatcher(resource='visadirect',
api='fundstransfer',
method='multipullfundstransactions',
http_verb='POST',
data=data)
return c.send()
def get(status_id):
"""Fetches a status of previously submitted :func:`~pyvdp.visadirect.fundstransfer.multipullfundstransactions`
request.
Returns a status of :func:`~pyvdp.visadirect.fundstransfer.MultiPullFundsTransactionsModel` request by
transaction identifier, returned with 202 response.
:param str status_id: **Required**. Transaction status identifier.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect.fundstransfer import multipullfundstransactions
status_id = '1491819372_186_81_l73c003_VDP_ARM'
result = multipullfundstransactions.get(status_id)
print(result)
"""
query_string = '/' + status_id
c = VisaDirectDispatcher(resource='visadirect',
api='fundstransfer',
method='multipullfundstransactions',
http_verb='GET',
query_string=query_string)
return c.send() | 0.819569 | 0.197348 |
import logging
import numpy as np
try:
from scipy.optimize import curve_fit
enable_scipy = True
except:
enable_scipy = False
from chainerpruner import Graph
from chainerpruner.masks import NormMask
from chainerpruner.rebuild.rebuild import rebuild
logger = logging.getLogger(__name__)
class ProgressiveSoftFilterPruning():
def __init__(self, model, args, target_layers,
pruning_rate, stop_trigger, pruning_rate_decay=1 / 8):
""" Progressive Deep Neural Networks Acceleration via Soft Filter Pruning
https://arxiv.org/abs/1808.07471
Args:
model (chainer.Chain):
target_layers (list):
pruning_rate (float): sparsity. target_layerで指定した全レイヤ一律 [0, 1) 大きいほど高圧縮
pruning_rate_decay (float): pruning_rateのprogressiveな変化率を調整するパラメータ。論文では1/8がデフォルト
pruning_rateの3/4のsparsityを学習のmax_iteration/epochの何%の位置に指定するか
trigger (tuple): weightをzeroにする頻度 (500, 'iteration') のように指定する。論文では(1, 'epoch')がデフォルト
stop_trigger (int): 学習の総iteration/epochを指定
"""
if not enable_scipy:
raise ImportError("please install scipy")
self.model = model
self.target_layers = target_layers
self.pruning_rate = pruning_rate
self.pruning_rate_decay = pruning_rate_decay
self.stop_trigger = stop_trigger
self.graph = Graph(model, args)
initial_pruning_rate = 0.
self.mask = NormMask(model, self.graph, target_layers, percent=initial_pruning_rate, norm='l2')
self._pruning_rate_fn = self._init_pruning_rate_fn(pruning_rate,
pruning_rate_decay,
stop_trigger)
def _init_pruning_rate_fn(self, pruning_rate, pruning_rate_decay, max_step):
"""progressiveにpruning ratioを上昇させる関数を構築
curve-fitting to y = a * exp(-k * x) + b
(0, 0), (max_step * pruning_rate_decay, pruning_rate / 4), (max_step, pruning_rate)
Args:
pruning_rate:
pruning_rate_decay:
max_step:
Returns:
fn: callable
"""
pruning_rate *= 100
def f(x, a, k, b):
return a * np.exp(-k * x) + b
# using fp64
xdata = np.array([0, max_step * pruning_rate_decay, max_step], dtype=np.float64)
ydata = np.array([0, pruning_rate * 3 / 4, pruning_rate], dtype=np.float64) # paper = 1/4 ?
p0 = np.array([0, 0, 0], dtype=np.float32)
popt, _ = curve_fit(f, xdata, ydata, p0=p0)
logger.info('(sparsity[%]): {}'.format([(x, y) for x, y in zip(xdata, ydata)]))
return lambda x: f(x, *popt) * 0.01 # 10% -> 0.1
def __call__(self, step):
# update pruning_rate
for key in self.mask.percent.keys():
self.mask.percent[key] = self._pruning_rate_fn(step)
info = self.mask()
logger.info(info)
def rebuild(self):
info = rebuild(self.model, self.graph, self.target_layers)
logger.debug(info) | chainerpruner/pruning/psfp/psfp.py |
import logging
import numpy as np
try:
from scipy.optimize import curve_fit
enable_scipy = True
except:
enable_scipy = False
from chainerpruner import Graph
from chainerpruner.masks import NormMask
from chainerpruner.rebuild.rebuild import rebuild
logger = logging.getLogger(__name__)
class ProgressiveSoftFilterPruning():
def __init__(self, model, args, target_layers,
pruning_rate, stop_trigger, pruning_rate_decay=1 / 8):
""" Progressive Deep Neural Networks Acceleration via Soft Filter Pruning
https://arxiv.org/abs/1808.07471
Args:
model (chainer.Chain):
target_layers (list):
pruning_rate (float): sparsity. target_layerで指定した全レイヤ一律 [0, 1) 大きいほど高圧縮
pruning_rate_decay (float): pruning_rateのprogressiveな変化率を調整するパラメータ。論文では1/8がデフォルト
pruning_rateの3/4のsparsityを学習のmax_iteration/epochの何%の位置に指定するか
trigger (tuple): weightをzeroにする頻度 (500, 'iteration') のように指定する。論文では(1, 'epoch')がデフォルト
stop_trigger (int): 学習の総iteration/epochを指定
"""
if not enable_scipy:
raise ImportError("please install scipy")
self.model = model
self.target_layers = target_layers
self.pruning_rate = pruning_rate
self.pruning_rate_decay = pruning_rate_decay
self.stop_trigger = stop_trigger
self.graph = Graph(model, args)
initial_pruning_rate = 0.
self.mask = NormMask(model, self.graph, target_layers, percent=initial_pruning_rate, norm='l2')
self._pruning_rate_fn = self._init_pruning_rate_fn(pruning_rate,
pruning_rate_decay,
stop_trigger)
def _init_pruning_rate_fn(self, pruning_rate, pruning_rate_decay, max_step):
"""progressiveにpruning ratioを上昇させる関数を構築
curve-fitting to y = a * exp(-k * x) + b
(0, 0), (max_step * pruning_rate_decay, pruning_rate / 4), (max_step, pruning_rate)
Args:
pruning_rate:
pruning_rate_decay:
max_step:
Returns:
fn: callable
"""
pruning_rate *= 100
def f(x, a, k, b):
return a * np.exp(-k * x) + b
# using fp64
xdata = np.array([0, max_step * pruning_rate_decay, max_step], dtype=np.float64)
ydata = np.array([0, pruning_rate * 3 / 4, pruning_rate], dtype=np.float64) # paper = 1/4 ?
p0 = np.array([0, 0, 0], dtype=np.float32)
popt, _ = curve_fit(f, xdata, ydata, p0=p0)
logger.info('(sparsity[%]): {}'.format([(x, y) for x, y in zip(xdata, ydata)]))
return lambda x: f(x, *popt) * 0.01 # 10% -> 0.1
def __call__(self, step):
# update pruning_rate
for key in self.mask.percent.keys():
self.mask.percent[key] = self._pruning_rate_fn(step)
info = self.mask()
logger.info(info)
def rebuild(self):
info = rebuild(self.model, self.graph, self.target_layers)
logger.debug(info) | 0.733165 | 0.345906 |
import logging
from pprint import pprint
import boto3
from botocore.exceptions import ClientError
import boto3
logger = logging.getLogger(__name__)
rekognition_client = boto3.client('rekognition')
class RekognitionText:
"""Encapsulates an Amazon Rekognition text element."""
def __init__(self, text_data):
"""
Initializes the text object.
:param text_data: Text data, in the format returned by Amazon Rekognition
functions.
"""
self.text = text_data.get('DetectedText')
self.kind = text_data.get('Type')
self.id = text_data.get('Id')
self.parent_id = text_data.get('ParentId')
self.confidence = text_data.get('Confidence')
self.geometry = text_data.get('Geometry')
def to_dict(self):
"""
Renders some of the text data to a dict.
:return: A dict that contains the text data.
"""
rendering = {}
if self.text is not None:
rendering['text'] = self.text
if self.kind is not None:
rendering['kind'] = self.kind
if self.geometry is not None:
rendering['polygon'] = self.geometry.get('Polygon')
return rendering
client = boto3.client('rekognition')
import meilisearch
client = meilisearch.Client('http://127.0.0.1:7700', 'masterKey')
# An index is where the documents are stored.
index = client.index('cards')
class CardDeterminer:
""" Stuff """
def __init__(self) -> None:
pass
def detect_text(self, img_file_name) -> None:
"""
Detects text in the image.
:return The list of text elements found in the image.
"""
try:
with open(img_file_name, 'rb') as img_file:
image = {'Bytes': img_file.read()}
response = rekognition_client.detect_text(Image=image)
texts = [RekognitionText(text)
for text in response['TextDetections']]
logger.info("Found %s texts in %s.", len(texts), img_file_name)
except ClientError:
logger.exception("Couldn't detect text in %s.", img_file_name)
raise
else:
results = index.search(texts[0].text)
print(results)
return results[0] | card_determiner.py | import logging
from pprint import pprint
import boto3
from botocore.exceptions import ClientError
import boto3
logger = logging.getLogger(__name__)
rekognition_client = boto3.client('rekognition')
class RekognitionText:
"""Encapsulates an Amazon Rekognition text element."""
def __init__(self, text_data):
"""
Initializes the text object.
:param text_data: Text data, in the format returned by Amazon Rekognition
functions.
"""
self.text = text_data.get('DetectedText')
self.kind = text_data.get('Type')
self.id = text_data.get('Id')
self.parent_id = text_data.get('ParentId')
self.confidence = text_data.get('Confidence')
self.geometry = text_data.get('Geometry')
def to_dict(self):
"""
Renders some of the text data to a dict.
:return: A dict that contains the text data.
"""
rendering = {}
if self.text is not None:
rendering['text'] = self.text
if self.kind is not None:
rendering['kind'] = self.kind
if self.geometry is not None:
rendering['polygon'] = self.geometry.get('Polygon')
return rendering
client = boto3.client('rekognition')
import meilisearch
client = meilisearch.Client('http://127.0.0.1:7700', 'masterKey')
# An index is where the documents are stored.
index = client.index('cards')
class CardDeterminer:
""" Stuff """
def __init__(self) -> None:
pass
def detect_text(self, img_file_name) -> None:
"""
Detects text in the image.
:return The list of text elements found in the image.
"""
try:
with open(img_file_name, 'rb') as img_file:
image = {'Bytes': img_file.read()}
response = rekognition_client.detect_text(Image=image)
texts = [RekognitionText(text)
for text in response['TextDetections']]
logger.info("Found %s texts in %s.", len(texts), img_file_name)
except ClientError:
logger.exception("Couldn't detect text in %s.", img_file_name)
raise
else:
results = index.search(texts[0].text)
print(results)
return results[0] | 0.727879 | 0.209854 |
import os
import docker
import socket
import logging
from docker.utils import kwargs_from_env
class SAIDaemon:
"""
The appearence of the SIA
"""
def build(self, path_dockerfile=''):
if path_dockerfile == '':
path_dockerfile = os.getcwd()
client = None
api_client = None
try:
client = docker.from_env()
# TODO only if images changes
#img = client.images.build(path=path_dockerfile, tag="sai_daemon")
kwargs = kwargs_from_env()
# @source : https://github.com/qazbnm456/tsaotun/blob/master/tsaotun/lib/docker_client.py
api_client = docker.APIClient(**kwargs)
print(api_client.version())
print(os.getcwd()[2:])
print("Docker run ---------->")
#/Users/johdu/PycharmProjects/SAI/test
# run container
# TODO stop current c_sai_daemon
for c in client.containers.list():
if c.__getattribute__("name") == "c_sai_daemon":
api_client.kill("c_sai_daemon")
# TODO rm current c_sai_daemon
for c in client.containers.list(all=True):
if c.__getattribute__("name") == "c_sai_daemon":
api_client.remove_container("c_sai_daemon")
# @source : http://www.geo.mtu.edu/geoschem/docs/putty_install.html
# @source : https://github.com/asweigart/pyautogui/issues/124
# https://github.com/niranjanshr13/Automate_Linux_with_GAssistant probably use or not
# TODO test if the ip is the real ip
IPAddr = socket.gethostbyname_ex(socket.gethostname())[-1][-1] # socket.gethostbyname(socket.gethostname())
#print("other ", socket.gethostbyname_ex(socket.gethostname())[-1][-1])
#print(socket.gethostname(), " with 99, it's a docker tools ip")
print("Is is the real ip ?", IPAddr)
#environment = {"DISPLAY": IPAddr + ':0.0'}
environment = {"DISPLAY": IPAddr + ':0.0'}
volumes = {"/c/Users/johdu/PycharmProjects/SAI":
{'bind': '/code/', 'mode': 'rw'}
}
# volume : src:dest
print(client.containers.run(image="sai_daemon",
name="c_sai_daemon",
volumes=volumes,
environment=environment).decode('utf8'))
# create container
"""
resp = api_client.create_container(image="sai_daemon", name="container_sai_daemon", host_config=api_client.create_host_config(binds=[
'/code/:' + os.getcwd()[2:],
]))
container = client.containers.get(resp['Id'])
container.start()
"""
client.close()
api_client.close()
#print(client.containers.run("sai_daemon").decode('utf8'))
#print(img)
except Exception as e:
logging.error("Build function don't work because " + str(e))
client.close()
api_client.close()
return -1
# TODO the daemon has been correctly build
return 0
def hello_world(self):
# TODO the daemon says hello world
return "hello world" | SAIDaemon.py | import os
import docker
import socket
import logging
from docker.utils import kwargs_from_env
class SAIDaemon:
"""
The appearence of the SIA
"""
def build(self, path_dockerfile=''):
if path_dockerfile == '':
path_dockerfile = os.getcwd()
client = None
api_client = None
try:
client = docker.from_env()
# TODO only if images changes
#img = client.images.build(path=path_dockerfile, tag="sai_daemon")
kwargs = kwargs_from_env()
# @source : https://github.com/qazbnm456/tsaotun/blob/master/tsaotun/lib/docker_client.py
api_client = docker.APIClient(**kwargs)
print(api_client.version())
print(os.getcwd()[2:])
print("Docker run ---------->")
#/Users/johdu/PycharmProjects/SAI/test
# run container
# TODO stop current c_sai_daemon
for c in client.containers.list():
if c.__getattribute__("name") == "c_sai_daemon":
api_client.kill("c_sai_daemon")
# TODO rm current c_sai_daemon
for c in client.containers.list(all=True):
if c.__getattribute__("name") == "c_sai_daemon":
api_client.remove_container("c_sai_daemon")
# @source : http://www.geo.mtu.edu/geoschem/docs/putty_install.html
# @source : https://github.com/asweigart/pyautogui/issues/124
# https://github.com/niranjanshr13/Automate_Linux_with_GAssistant probably use or not
# TODO test if the ip is the real ip
IPAddr = socket.gethostbyname_ex(socket.gethostname())[-1][-1] # socket.gethostbyname(socket.gethostname())
#print("other ", socket.gethostbyname_ex(socket.gethostname())[-1][-1])
#print(socket.gethostname(), " with 99, it's a docker tools ip")
print("Is is the real ip ?", IPAddr)
#environment = {"DISPLAY": IPAddr + ':0.0'}
environment = {"DISPLAY": IPAddr + ':0.0'}
volumes = {"/c/Users/johdu/PycharmProjects/SAI":
{'bind': '/code/', 'mode': 'rw'}
}
# volume : src:dest
print(client.containers.run(image="sai_daemon",
name="c_sai_daemon",
volumes=volumes,
environment=environment).decode('utf8'))
# create container
"""
resp = api_client.create_container(image="sai_daemon", name="container_sai_daemon", host_config=api_client.create_host_config(binds=[
'/code/:' + os.getcwd()[2:],
]))
container = client.containers.get(resp['Id'])
container.start()
"""
client.close()
api_client.close()
#print(client.containers.run("sai_daemon").decode('utf8'))
#print(img)
except Exception as e:
logging.error("Build function don't work because " + str(e))
client.close()
api_client.close()
return -1
# TODO the daemon has been correctly build
return 0
def hello_world(self):
# TODO the daemon says hello world
return "hello world" | 0.161717 | 0.066206 |
import os
class QTRun(object):
"""Run Ixia QuickTest.
"""
def __init__(self, request, tg):
"""Initialize QTRun class.
Args:
request(pytest.request): pytest request
tg(Environment instance): Ixia TG object
Raises:
Exception: Incorrect fixture scope
Exception: Incorrect type of TG
Exception: TG object isn't configured to use IxNetwork
Returns:
None
"""
if request.scope != "function":
raise Exception("This fixture has to be used only in function scope.")
# Passed tg object has to be Ixia
if "ixia" not in tg.type:
raise Exception("Provided TG object isn't Ixia.")
if not tg.is_protocol_emulation_present:
raise Exception("Provided Ixia TG object isn't configured to use IxNetwork API.")
self.tg = tg
self.__name__ = request.function.__name__
self.qtpath = request.config.option.qtpath
if self.qtpath is None:
_filename = request.function.__code__.co_filename
_dir = os.path.dirname(_filename)
_basefilename = os.path.splitext(os.path.basename(_filename))[0]
self.qtpath = os.path.join(_dir, "ixncfg", _basefilename + ".ixncfg")
def _load_cfg(self):
"""Loading ixncfg file.
Returns:
None
"""
if self.tg.ixncfg_file is None or os.path.basename(self.tg.ixncfg_file) != os.path.basename(self.qtpath):
self.tg.load_ixncfg(self.qtpath)
def run(self, qt_name=None, qt_id=None, pdf=True):
"""Execute QT and wait for result.
Args:
qt_name(str): QuickTest name
qt_id(str): QuickTest id
pdf(bool): Enable/Disable PDF report
Returns:
list: Path to results
"""
# Load config if it isn't loaded yet.
self._load_cfg()
# Variable to save destinations of QT results on IxNetwork host.
rc_path = []
# Enable pdf reports if requested
self.tg.qt.report(pdf=pdf)
# Run test(s)
if qt_name is None or qt_id is None:
qts = self.tg.qt.tc_list
else:
qts = [(qt_name, qt_id), ]
for qt_n, qt_i in qts:
rc = self.tg.qt.run(qt_n, qt_i, self.__name__)
rc_path.append(rc)
return rc_path | taf/testlib/Ixia/ixia_fixtures.py | import os
class QTRun(object):
"""Run Ixia QuickTest.
"""
def __init__(self, request, tg):
"""Initialize QTRun class.
Args:
request(pytest.request): pytest request
tg(Environment instance): Ixia TG object
Raises:
Exception: Incorrect fixture scope
Exception: Incorrect type of TG
Exception: TG object isn't configured to use IxNetwork
Returns:
None
"""
if request.scope != "function":
raise Exception("This fixture has to be used only in function scope.")
# Passed tg object has to be Ixia
if "ixia" not in tg.type:
raise Exception("Provided TG object isn't Ixia.")
if not tg.is_protocol_emulation_present:
raise Exception("Provided Ixia TG object isn't configured to use IxNetwork API.")
self.tg = tg
self.__name__ = request.function.__name__
self.qtpath = request.config.option.qtpath
if self.qtpath is None:
_filename = request.function.__code__.co_filename
_dir = os.path.dirname(_filename)
_basefilename = os.path.splitext(os.path.basename(_filename))[0]
self.qtpath = os.path.join(_dir, "ixncfg", _basefilename + ".ixncfg")
def _load_cfg(self):
"""Loading ixncfg file.
Returns:
None
"""
if self.tg.ixncfg_file is None or os.path.basename(self.tg.ixncfg_file) != os.path.basename(self.qtpath):
self.tg.load_ixncfg(self.qtpath)
def run(self, qt_name=None, qt_id=None, pdf=True):
"""Execute QT and wait for result.
Args:
qt_name(str): QuickTest name
qt_id(str): QuickTest id
pdf(bool): Enable/Disable PDF report
Returns:
list: Path to results
"""
# Load config if it isn't loaded yet.
self._load_cfg()
# Variable to save destinations of QT results on IxNetwork host.
rc_path = []
# Enable pdf reports if requested
self.tg.qt.report(pdf=pdf)
# Run test(s)
if qt_name is None or qt_id is None:
qts = self.tg.qt.tc_list
else:
qts = [(qt_name, qt_id), ]
for qt_n, qt_i in qts:
rc = self.tg.qt.run(qt_n, qt_i, self.__name__)
rc_path.append(rc)
return rc_path | 0.589835 | 0.210665 |
import argparse
import random
import socket
import sys
import urlparse
import json
from wsgiref.simple_server import make_server
TIMEZONE = "US/Central"
def validate_parameters(query_dict, parameters):
"""
Check parameters in query_dict using the parameters specified
:param query_dict: a dictionary with key / value pairs to test
:param parameters: a dictionary with parameter name / type
specifying the type of parameters in the query_dict
:return: true or false depending on whether the parameters are valid
"""
for key, val in parameters.iteritems():
if key not in query_dict:
return False
if val == int:
try:
int(query_dict[key][0])
except ValueError:
return False
elif val == bool:
try:
bool(query_dict[key][0])
except ValueError:
return False
return True
def delete_job(environ):
"""
Remove a job from being processed
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
response = {"status": 200,
"result": "success"}
status = '200 OK'
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str,
'jobid': int}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
if random.random() > 0.9:
# give an error in 10% of the cases
response = {'status': 500,
'result': "Server Error"}
return json.dumps(response), '500 Server Error'
return json.dumps(response), status
def get_user_params(environ):
"""
Get user id and security token from CGI query string
:param environ: dictionary with environment variables (See PEP 333)
:return: tuple with userid, security_token
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
if 'userid' not in query_dict or 'token' not in query_dict:
return '', ''
user_id = query_dict['userid']
token = query_dict['token']
return user_id, token
def validate_user(userid, token):
"""
Given an userid and security token, validate this against database
:param userid: string with user id
:param token: security token
:return: True if credentials are valid, false otherwise
"""
import random
if random.random() > 0.9:
# give an error in 10% of the cases
return False
return True
def get_current_jobs(environ):
"""
Get status for all jobs submitted by user in last week
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
userid, secret = get_user_params(environ)
if not validate_user(userid, secret):
response = {'status': 401,
'result': "invalid user"}
return json.dumps(response), '401 Not Authorized'
response = {'status': 200,
'jobs': [{'id': 1,
'input': 'subj_1.mgz',
'name': 'job_name1',
'status': 'PROCESSING',
'output': 'http://test.url/output_1.mgz'},
{'id': 23,
'input': 'subj_182.mgz',
'name': 'my_job2',
'status': 'COMPLETED',
'output': 'http://test.url/output_182.mgz'}]}
status = '200 OK'
return json.dumps(response), status
def submit_job(environ):
"""
Submit a job to be processed
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str,
'filename': str,
'singlecore': bool,
'jobname': str}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
if random.random() > 0.9:
# give an error in 10% of the cases
response = {'status': 500,
'result': "Server Error"}
return json.dumps(response), '500 Server Error'
response = {"status": 200,
"result": "success"}
return json.dumps(response), '200 OK'
def application(environ, start_response):
"""
Get parameters from GET request and publish to redis channel
:param environ: dictionary with environment variables (See PEP 333)
:param start_response: callable function to handle responses (see PEP 333)
:return: a list with the response_body to return to client
"""
if 'REQUEST_METHOD' not in environ:
response_body = "No request method"
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response('200 OK', response_headers)
print response_body
return [response_body]
if environ['REQUEST_METHOD'] == 'GET':
response_body, status = get_current_jobs(environ)
elif environ['REQUEST_METHOD'] == 'POST':
response_body, status = submit_job(environ)
elif environ['REQUEST_METHOD'] == 'DELETE':
response_body, status = delete_job(environ)
else:
response_body = '500 Server Error'
status = '500 Server Error'
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
print response_body
return [response_body]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse request and act appropriately')
parser.add_argument('--host', dest='hostname', default=socket.getfqdn(),
help='hostname of server')
args = parser.parse_args(sys.argv[1:])
srv = make_server(args.hostname, 8080, application)
srv.serve_forever() | wsgi/freesurfer_test.py |
import argparse
import random
import socket
import sys
import urlparse
import json
from wsgiref.simple_server import make_server
TIMEZONE = "US/Central"
def validate_parameters(query_dict, parameters):
"""
Check parameters in query_dict using the parameters specified
:param query_dict: a dictionary with key / value pairs to test
:param parameters: a dictionary with parameter name / type
specifying the type of parameters in the query_dict
:return: true or false depending on whether the parameters are valid
"""
for key, val in parameters.iteritems():
if key not in query_dict:
return False
if val == int:
try:
int(query_dict[key][0])
except ValueError:
return False
elif val == bool:
try:
bool(query_dict[key][0])
except ValueError:
return False
return True
def delete_job(environ):
"""
Remove a job from being processed
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
response = {"status": 200,
"result": "success"}
status = '200 OK'
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str,
'jobid': int}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
if random.random() > 0.9:
# give an error in 10% of the cases
response = {'status': 500,
'result': "Server Error"}
return json.dumps(response), '500 Server Error'
return json.dumps(response), status
def get_user_params(environ):
"""
Get user id and security token from CGI query string
:param environ: dictionary with environment variables (See PEP 333)
:return: tuple with userid, security_token
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
if 'userid' not in query_dict or 'token' not in query_dict:
return '', ''
user_id = query_dict['userid']
token = query_dict['token']
return user_id, token
def validate_user(userid, token):
"""
Given an userid and security token, validate this against database
:param userid: string with user id
:param token: security token
:return: True if credentials are valid, false otherwise
"""
import random
if random.random() > 0.9:
# give an error in 10% of the cases
return False
return True
def get_current_jobs(environ):
"""
Get status for all jobs submitted by user in last week
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
userid, secret = get_user_params(environ)
if not validate_user(userid, secret):
response = {'status': 401,
'result': "invalid user"}
return json.dumps(response), '401 Not Authorized'
response = {'status': 200,
'jobs': [{'id': 1,
'input': 'subj_1.mgz',
'name': 'job_name1',
'status': 'PROCESSING',
'output': 'http://test.url/output_1.mgz'},
{'id': 23,
'input': 'subj_182.mgz',
'name': 'my_job2',
'status': 'COMPLETED',
'output': 'http://test.url/output_182.mgz'}]}
status = '200 OK'
return json.dumps(response), status
def submit_job(environ):
"""
Submit a job to be processed
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str,
'filename': str,
'singlecore': bool,
'jobname': str}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
if random.random() > 0.9:
# give an error in 10% of the cases
response = {'status': 500,
'result': "Server Error"}
return json.dumps(response), '500 Server Error'
response = {"status": 200,
"result": "success"}
return json.dumps(response), '200 OK'
def application(environ, start_response):
"""
Get parameters from GET request and publish to redis channel
:param environ: dictionary with environment variables (See PEP 333)
:param start_response: callable function to handle responses (see PEP 333)
:return: a list with the response_body to return to client
"""
if 'REQUEST_METHOD' not in environ:
response_body = "No request method"
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response('200 OK', response_headers)
print response_body
return [response_body]
if environ['REQUEST_METHOD'] == 'GET':
response_body, status = get_current_jobs(environ)
elif environ['REQUEST_METHOD'] == 'POST':
response_body, status = submit_job(environ)
elif environ['REQUEST_METHOD'] == 'DELETE':
response_body, status = delete_job(environ)
else:
response_body = '500 Server Error'
status = '500 Server Error'
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
print response_body
return [response_body]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse request and act appropriately')
parser.add_argument('--host', dest='hostname', default=socket.getfqdn(),
help='hostname of server')
args = parser.parse_args(sys.argv[1:])
srv = make_server(args.hostname, 8080, application)
srv.serve_forever() | 0.340376 | 0.25508 |
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from PIL import Image
import cv2
def affine_elastic_transform(image, mask=None, alpha=100, sigma=11,
alpha_affine=40, random_state=None):
image = np.array(image)
if mask is not None:
mask = np.array(mask)
assert image.shape == mask.shape
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
# pts1: 仿射变换前的点(3个点)
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size,
center_square[1] - square_size],
center_square - square_size])
# pts2: 仿射变换后的点
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine,
size=pts1.shape).astype(np.float32)
# 仿射变换矩阵
M = cv2.getAffineTransform(pts1, pts2)
# 对image进行仿射变换.
imageB = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
maskB = cv2.warpAffine(mask, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
# generate random displacement fields
# random_state.rand(*shape)会产生一个和shape一样打的服从[0,1]均匀分布的矩阵
# *2-1是为了将分布平移到[-1, 1]的区间, alpha是控制变形强度的变形因子
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
# generate meshgrid
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
# x+dx,y+dy
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1))
# bilinear interpolation
imageC = map_coordinates(imageB, indices, order=1, mode='constant').reshape(shape)
image_elastic = Image.fromarray(imageC.astype('uint8'))
maskC = map_coordinates(maskB, indices, order=1, mode='constant').reshape(shape)
mask_elastic = Image.fromarray(maskC.astype('uint8'))
if mask is not None:
return image_elastic, mask_elastic
return image_elastic
def elastic_transform(image, mask=None, alpha=100, sigma=11, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] <NAME> Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
image = np.array(image)
if mask is not None:
mask = np.array(mask)
assert image.shape == mask.shape
assert len(image.shape) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
image_elastic = map_coordinates(image, indices, order=1).reshape(shape)
image_elastic = Image.fromarray(image_elastic.astype('uint8'))
if mask is not None:
mask_elastic = map_coordinates(mask, indices, order=1).reshape(shape)
mask_elastic = Image.fromarray(mask_elastic.astype('uint8'))
return image_elastic, mask_elastic
return image_elastic
if __name__ == '__main__':
img_ori = Image.open('/home/gy/ultrasound_dataset/T_BUSIS/test_gray/4B_60.bmp')
img_elastic = affine_elastic_transform(img_ori, alpha=20, sigma=11)
img_elastic.show() | elastic_transform.py | import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from PIL import Image
import cv2
def affine_elastic_transform(image, mask=None, alpha=100, sigma=11,
alpha_affine=40, random_state=None):
image = np.array(image)
if mask is not None:
mask = np.array(mask)
assert image.shape == mask.shape
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
# pts1: 仿射变换前的点(3个点)
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size,
center_square[1] - square_size],
center_square - square_size])
# pts2: 仿射变换后的点
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine,
size=pts1.shape).astype(np.float32)
# 仿射变换矩阵
M = cv2.getAffineTransform(pts1, pts2)
# 对image进行仿射变换.
imageB = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
maskB = cv2.warpAffine(mask, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
# generate random displacement fields
# random_state.rand(*shape)会产生一个和shape一样打的服从[0,1]均匀分布的矩阵
# *2-1是为了将分布平移到[-1, 1]的区间, alpha是控制变形强度的变形因子
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
# generate meshgrid
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
# x+dx,y+dy
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1))
# bilinear interpolation
imageC = map_coordinates(imageB, indices, order=1, mode='constant').reshape(shape)
image_elastic = Image.fromarray(imageC.astype('uint8'))
maskC = map_coordinates(maskB, indices, order=1, mode='constant').reshape(shape)
mask_elastic = Image.fromarray(maskC.astype('uint8'))
if mask is not None:
return image_elastic, mask_elastic
return image_elastic
def elastic_transform(image, mask=None, alpha=100, sigma=11, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] <NAME> Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
image = np.array(image)
if mask is not None:
mask = np.array(mask)
assert image.shape == mask.shape
assert len(image.shape) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
image_elastic = map_coordinates(image, indices, order=1).reshape(shape)
image_elastic = Image.fromarray(image_elastic.astype('uint8'))
if mask is not None:
mask_elastic = map_coordinates(mask, indices, order=1).reshape(shape)
mask_elastic = Image.fromarray(mask_elastic.astype('uint8'))
return image_elastic, mask_elastic
return image_elastic
if __name__ == '__main__':
img_ori = Image.open('/home/gy/ultrasound_dataset/T_BUSIS/test_gray/4B_60.bmp')
img_elastic = affine_elastic_transform(img_ori, alpha=20, sigma=11)
img_elastic.show() | 0.654343 | 0.670541 |
from datetime import datetime
from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from forms import SubmissionForm
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
SECRET_KEY = 'hrifrgtkghgt'
UPLOAD_FOLDER = '/uploads' #temporary
ALLOWED_EXTENSIONS = {'cue', 'log','flac', 'mp3', 'opus', 'wav', 'm4a', 'ogg', 'acc'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY'] = SECRET_KEY
db_name = 'site.db'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' + db_name
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
# Database tables
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True, index=True, nullable=False)
entry_count = db.Column(db.Integer, unique=False, nullable=False, default=0)
moderator = db.Column (db.Boolean, unique=False, nullable=False, default=False)
user_id = db.relationship('User', backref='author', lazy=True)
def __repr__(self):
return f"Metadata({self.release_title}, {self.release_artist})"
class Entry(db.Model):
id = db.Column(db.Integer, primary_key=True)
musicbrainz_album_id = db.Column(db.String(36), index=True, nullable=False)
audio_format = db.Column(db.String, nullable=False)
notes = db.Column(db.String, unique=False, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
entry_id = db.Column(db.Integer, db.ForeignKey('entry.id'))
def __repr__(self):
return f"Entry({self.musicbrainz_album_id}, {self.date_created})"
class Metadata(db.Model):
id = db.Column(db.Integer, primary_key=True)
catalog_number = db.Column(db.String, nullable=False)
release_artist = db.Column(db.String, nullable=False)
release_name = db.Column(db.String, nullable=False)
physical_format = db.Column(db.String, nullable=False)
entry_id = db.Column(db.Integer, db.ForeignKey('entry.id'))
def __repr__(self):
return f"Metadata({self.release_title}, {self.release_artist})"
# Dummy Entry to test templating
posts = [
{
'release': 'Myst3ry',
'artist': 'Ladies Code',
'catalog': 'L200001886',
'physicalformat': 'CD',
'audioformat': 'FLAC',
'notes': 'The spectograph of this release cuts off abruptly at 20db. This is likely due to how the song was produced or mastered.'
}
]
# Renders routes from templates
@app.route("/")
def home():
return render_template("search.html", title='Search')
@app.route("/search")
def search():
return render_template("search.html", title='Search')
@app.route("/leaderboard")
def leaderboard():
return render_template("leaderboard.html", title='Leaderboard')
@app.route("/entry")
def entry():
return render_template("entry.html", posts=posts)
@app.route("/login")
def login():
return render_template("login.html", title='Login')
@app.route("/logout")
def logout():
return redirect(url_for('/')) # Returns to home page after logout
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Post}
@app.route("/submit", methods=['GET', 'POST'])
def submit():
form= SubmissionForm()
if form.validate_on_submit():
# Defines new variables from the form fields
musicbrainz_album_id = request.form['musicbrainz_album_id']
source = request.form['source']
entry = (musicbrainz_album_id, source)
print(str(entry))
# Commits entry to database
db.session.add(entry)
db.session.commit()
flash(f'Submitted your files!')
return redirect(url_for('home'))
else:
print('error')
return render_template("submit.html", title='Submit', form=form)
if __name__ == "__main__": # Lets you see the changes live
app.run(debug=True) | main.py | from datetime import datetime
from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from forms import SubmissionForm
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
SECRET_KEY = 'hrifrgtkghgt'
UPLOAD_FOLDER = '/uploads' #temporary
ALLOWED_EXTENSIONS = {'cue', 'log','flac', 'mp3', 'opus', 'wav', 'm4a', 'ogg', 'acc'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY'] = SECRET_KEY
db_name = 'site.db'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' + db_name
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
# Database tables
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True, index=True, nullable=False)
entry_count = db.Column(db.Integer, unique=False, nullable=False, default=0)
moderator = db.Column (db.Boolean, unique=False, nullable=False, default=False)
user_id = db.relationship('User', backref='author', lazy=True)
def __repr__(self):
return f"Metadata({self.release_title}, {self.release_artist})"
class Entry(db.Model):
id = db.Column(db.Integer, primary_key=True)
musicbrainz_album_id = db.Column(db.String(36), index=True, nullable=False)
audio_format = db.Column(db.String, nullable=False)
notes = db.Column(db.String, unique=False, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
entry_id = db.Column(db.Integer, db.ForeignKey('entry.id'))
def __repr__(self):
return f"Entry({self.musicbrainz_album_id}, {self.date_created})"
class Metadata(db.Model):
id = db.Column(db.Integer, primary_key=True)
catalog_number = db.Column(db.String, nullable=False)
release_artist = db.Column(db.String, nullable=False)
release_name = db.Column(db.String, nullable=False)
physical_format = db.Column(db.String, nullable=False)
entry_id = db.Column(db.Integer, db.ForeignKey('entry.id'))
def __repr__(self):
return f"Metadata({self.release_title}, {self.release_artist})"
# Dummy Entry to test templating
posts = [
{
'release': 'Myst3ry',
'artist': 'Ladies Code',
'catalog': 'L200001886',
'physicalformat': 'CD',
'audioformat': 'FLAC',
'notes': 'The spectograph of this release cuts off abruptly at 20db. This is likely due to how the song was produced or mastered.'
}
]
# Renders routes from templates
@app.route("/")
def home():
return render_template("search.html", title='Search')
@app.route("/search")
def search():
return render_template("search.html", title='Search')
@app.route("/leaderboard")
def leaderboard():
return render_template("leaderboard.html", title='Leaderboard')
@app.route("/entry")
def entry():
return render_template("entry.html", posts=posts)
@app.route("/login")
def login():
return render_template("login.html", title='Login')
@app.route("/logout")
def logout():
return redirect(url_for('/')) # Returns to home page after logout
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Post}
@app.route("/submit", methods=['GET', 'POST'])
def submit():
form= SubmissionForm()
if form.validate_on_submit():
# Defines new variables from the form fields
musicbrainz_album_id = request.form['musicbrainz_album_id']
source = request.form['source']
entry = (musicbrainz_album_id, source)
print(str(entry))
# Commits entry to database
db.session.add(entry)
db.session.commit()
flash(f'Submitted your files!')
return redirect(url_for('home'))
else:
print('error')
return render_template("submit.html", title='Submit', form=form)
if __name__ == "__main__": # Lets you see the changes live
app.run(debug=True) | 0.399929 | 0.044369 |
import distutils.cmd
import distutils.log
import os
from shutil import rmtree
import pip
from setuptools import find_packages, setup
if tuple(map(int, pip.__version__.split("."))) >= (19, 3, 0):
from pip._internal.network.session import PipSession
from pip._internal.req import parse_requirements
elif tuple(map(int, pip.__version__.split("."))) >= (10, 0, 0):
from pip._internal.download import PipSession
from pip._internal.req import parse_requirements
else:
from pip.download import PipSession
from pip.req import parse_requirements
class CleanAllCommand(distutils.cmd.Command):
"""Docstring for public class."""
description = "remove extra build files"
user_options = []
dirname = os.path.dirname(os.path.realpath(__file__))
def initialize_options(self):
"""Docstring for public method."""
pass
def finalize_options(self):
"""Docstring for public method."""
pass
def run(self):
"""Docstring for public method."""
targets = [
".cache",
".coverage.py27",
".coverage.py36",
".tox",
"coverage-html.py27",
"coverage-html.py36",
"consoleme.egg-info",
"consoleme/__pycache__",
"test/__pycache__",
]
for t in targets:
path = os.path.join(self.dirname, t)
if os.path.isfile(path):
self.announce(f"removing file: {path}", level=distutils.log.INFO)
os.remove(path)
elif os.path.isdir(path):
self.announce(f"removing directory: {path}", level=distutils.log.INFO)
rmtree(path)
requirements = parse_requirements("requirements.txt", session=PipSession())
test_requirements = parse_requirements("requirements-test.txt", session=PipSession())
if tuple(map(int, pip.__version__.split("."))) >= (20, 1):
reqs = [str(ir.requirement) for ir in requirements]
test_reqs = [str(ir.requirement) for ir in test_requirements]
else:
reqs = [str(ir.req) for ir in requirements]
test_reqs = [str(ir.req) for ir in test_requirements]
setup(
name="consoleme",
author="<NAME>",
author_email="<EMAIL>",
description="Consoleme",
keywords="consoleme",
url="https://github.com/Netflix/ConsoleMe",
python_requires=">=3.8",
install_requires=reqs,
tests_require=test_reqs,
setup_requires=["setupmeta"],
extras_require={"test": ["tox"]},
packages=find_packages(exclude=("tests",)),
entry_points={},
cmdclass={"cleanall": CleanAllCommand},
include_package_data=True,
versioning="devcommit",
zip_safe=False,
) | setup.py | import distutils.cmd
import distutils.log
import os
from shutil import rmtree
import pip
from setuptools import find_packages, setup
if tuple(map(int, pip.__version__.split("."))) >= (19, 3, 0):
from pip._internal.network.session import PipSession
from pip._internal.req import parse_requirements
elif tuple(map(int, pip.__version__.split("."))) >= (10, 0, 0):
from pip._internal.download import PipSession
from pip._internal.req import parse_requirements
else:
from pip.download import PipSession
from pip.req import parse_requirements
class CleanAllCommand(distutils.cmd.Command):
"""Docstring for public class."""
description = "remove extra build files"
user_options = []
dirname = os.path.dirname(os.path.realpath(__file__))
def initialize_options(self):
"""Docstring for public method."""
pass
def finalize_options(self):
"""Docstring for public method."""
pass
def run(self):
"""Docstring for public method."""
targets = [
".cache",
".coverage.py27",
".coverage.py36",
".tox",
"coverage-html.py27",
"coverage-html.py36",
"consoleme.egg-info",
"consoleme/__pycache__",
"test/__pycache__",
]
for t in targets:
path = os.path.join(self.dirname, t)
if os.path.isfile(path):
self.announce(f"removing file: {path}", level=distutils.log.INFO)
os.remove(path)
elif os.path.isdir(path):
self.announce(f"removing directory: {path}", level=distutils.log.INFO)
rmtree(path)
requirements = parse_requirements("requirements.txt", session=PipSession())
test_requirements = parse_requirements("requirements-test.txt", session=PipSession())
if tuple(map(int, pip.__version__.split("."))) >= (20, 1):
reqs = [str(ir.requirement) for ir in requirements]
test_reqs = [str(ir.requirement) for ir in test_requirements]
else:
reqs = [str(ir.req) for ir in requirements]
test_reqs = [str(ir.req) for ir in test_requirements]
setup(
name="consoleme",
author="<NAME>",
author_email="<EMAIL>",
description="Consoleme",
keywords="consoleme",
url="https://github.com/Netflix/ConsoleMe",
python_requires=">=3.8",
install_requires=reqs,
tests_require=test_reqs,
setup_requires=["setupmeta"],
extras_require={"test": ["tox"]},
packages=find_packages(exclude=("tests",)),
entry_points={},
cmdclass={"cleanall": CleanAllCommand},
include_package_data=True,
versioning="devcommit",
zip_safe=False,
) | 0.278061 | 0.15511 |
import argparse
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import inputparser
import mutphi
import common
def sort_mutphi(mphi):
sorted_vids = common.sort_vids(mphi.vids)
mapping = [mphi.vids.index(V) for V in sorted_vids]
assert sorted_vids == [mphi.vids[idx] for idx in mapping]
sorted_logprobs = np.array([mphi.logprobs[idx] for idx in mapping])
return mutphi.Mutphi(
vids = sorted_vids,
assays = mphi.assays,
logprobs = sorted_logprobs,
)
def impute(ssmfn, params, mphi):
clustered = set([V for C in params['clusters'] for V in C])
mphi_vids = set(mphi.vids)
missing = list(clustered - mphi_vids)
if len(missing) == 0:
sys.exit()
variants = inputparser.load_ssms(ssmfn)
missing_reads = np.array([variants[V]['total_reads'] for V in missing]).astype(np.float)
assert np.all(missing_reads >= 1)
# Assign uniform probability based on total read count.
missing_logprobs = np.log(1 / missing_reads)
combined = mutphi.Mutphi(
vids = list(mphi.vids) + missing,
assays = mphi.assays,
logprobs = np.vstack((mphi.logprobs, missing_logprobs)),
)
return combined
def score(logprobs):
assert np.all(logprobs <= 0)
score = -np.sum(logprobs)
score /= logprobs.size
# Convert to bits.
score /= np.log(2)
return score
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('mutphi_fn')
args = parser.parse_args()
params = inputparser.load_params(args.params_fn)
orig_mphi = mutphi.load_mutphi(args.mutphi_fn)
mphi = impute(args.ssm_fn, params, orig_mphi)
mphi = sort_mutphi(mphi)
mutphi.write_mutphi(mphi, args.mutphi_fn)
old, new = score(orig_mphi.logprobs), score(mphi.logprobs)
#print('score_cmp', old, new, new - old, (new - old) > 0)
if __name__ == '__main__':
main() | comparison/impute_missing_mutphis.py | import argparse
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import inputparser
import mutphi
import common
def sort_mutphi(mphi):
sorted_vids = common.sort_vids(mphi.vids)
mapping = [mphi.vids.index(V) for V in sorted_vids]
assert sorted_vids == [mphi.vids[idx] for idx in mapping]
sorted_logprobs = np.array([mphi.logprobs[idx] for idx in mapping])
return mutphi.Mutphi(
vids = sorted_vids,
assays = mphi.assays,
logprobs = sorted_logprobs,
)
def impute(ssmfn, params, mphi):
clustered = set([V for C in params['clusters'] for V in C])
mphi_vids = set(mphi.vids)
missing = list(clustered - mphi_vids)
if len(missing) == 0:
sys.exit()
variants = inputparser.load_ssms(ssmfn)
missing_reads = np.array([variants[V]['total_reads'] for V in missing]).astype(np.float)
assert np.all(missing_reads >= 1)
# Assign uniform probability based on total read count.
missing_logprobs = np.log(1 / missing_reads)
combined = mutphi.Mutphi(
vids = list(mphi.vids) + missing,
assays = mphi.assays,
logprobs = np.vstack((mphi.logprobs, missing_logprobs)),
)
return combined
def score(logprobs):
assert np.all(logprobs <= 0)
score = -np.sum(logprobs)
score /= logprobs.size
# Convert to bits.
score /= np.log(2)
return score
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('mutphi_fn')
args = parser.parse_args()
params = inputparser.load_params(args.params_fn)
orig_mphi = mutphi.load_mutphi(args.mutphi_fn)
mphi = impute(args.ssm_fn, params, orig_mphi)
mphi = sort_mutphi(mphi)
mutphi.write_mutphi(mphi, args.mutphi_fn)
old, new = score(orig_mphi.logprobs), score(mphi.logprobs)
#print('score_cmp', old, new, new - old, (new - old) > 0)
if __name__ == '__main__':
main() | 0.251005 | 0.44342 |
import os
import platform
import shutil
from conans import ConanFile, tools
class AndroidtoolchainConan(ConanFile):
name = "android-toolchain"
version = "r17b"
license = "GPL/APACHE2"
url = "https://github.com/lasote/conan-android-toolchain"
settings = "os", "arch", "compiler"
options = {"use_system_python": [True, False], "ndk_path": "ANY"}
default_options = "use_system_python=True", "ndk_path=False"
requires = "android-ndk/%s@block/testing" % version
description = "Recipe for building an Android toolchain for cross compile Android apps from Windows/Linux/OSX"
@property
def ndk_path(self):
return os.path.expanduser(os.path.join(str(self.options.ndk_path), "build", "tools"))
def configure(self):
if self.options.ndk_path:
if os.path.exists(self.ndk_path):
del self.requires["android-ndk"]
else:
raise Exception("Invalid specified path to Android NDK: %s" % self.ndk_path)
if self.settings.os != "Android":
raise Exception("Only os Android supported")
if str(self.settings.compiler) not in ("gcc", "clang"):
raise Exception("Not supported compiler, gcc and clang available")
if str(self.settings.compiler) == "gcc" and str(self.settings.compiler.version) not in ("4.8", "4.9"):
raise Exception("Not supported gcc compiler version, 4.8 and 4.9 available")
if str(self.settings.compiler) == "clang" and str(self.settings.compiler.version) != "6.0":
raise Exception("Not supported clang compiler version, only 6.0 available")
@property
def arch_id_str(self):
return {"mips": "mipsel",
"mips64": "mips64",
"armv6": "arm",
"armv7": "arm",
"armv7hf": "arm",
"armv8": "aarch64"}.get(str(self.settings.arch),
str(self.settings.arch))
@property
def arch_id_str_compiler(self):
return {"x86": "i686",
"armv6": "arm",
"armv7": "arm",
"armv7hf": "arm",
"armv8": "aarch64",
"mips64": "mips64"}.get(str(self.settings.arch),
str(self.settings.arch))
@property
def android_id_str(self):
return "androideabi" if str(self.settings.arch) in ["armv6", "armv7"] else "android"
def build(self):
compiler_str = {"clang": "clang", "gcc": ""}.get(str(self.settings.compiler))
toolchain = "%s-linux-%s-%s%s" % (self.arch_id_str, self.android_id_str, compiler_str, self.settings.compiler.version)
# Command available in android-ndk package
# --stl => gnustl, libc++, stlport
pre_path = (self.ndk_path + "/") if self.options.ndk_path else ""
stl = {"libstdc++": "gnustl", "libstdc++11": "gnustl", "libc++": "libc++"}.get(str(self.settings.compiler.libcxx))
command = "%smake-standalone-toolchain.sh --toolchain=%s --platform=android-%s " \
"--install-dir=%s --stl=%s" % (pre_path, toolchain, self.settings.os.api_level, self.package_folder, stl)
self.output.warn(command)
# self.run("make-standalone-toolchain.sh --help")
if platform.system != "Windows":
self.run(command)
else:
tools.run_in_windows_bash(self, command)
if self.options.use_system_python:
if os.path.exists(os.path.join(self.package_folder, "bin", "python")):
os.unlink(os.path.join(self.package_folder, "bin", "python"))
if platform.system() == "Windows": # Create clang.exe to make CMake happy
dest_cc_compiler = os.path.join(self.package_folder, "bin", "clang.exe")
dest_cxx_compiler = os.path.join(self.package_folder, "bin", "clang++.exe")
src_cc_compiler = os.path.join(self.package_folder, "bin", "clang38.exe")
src_cxx_compiler = os.path.join(self.package_folder, "bin", "clang38++.exe")
shutil.copy(src_cc_compiler, dest_cc_compiler)
shutil.copy(src_cxx_compiler, dest_cxx_compiler)
if not os.path.exists(os.path.join(self.package_folder, "bin")):
raise Exception("Invalid toolchain, try a higher api_level or different architecture: %s-%s" % (self.settings.arch, self.settings.os.api_level))
def package_info(self):
prename = "%s-linux-%s-" % (self.arch_id_str_compiler, self.android_id_str)
if self.settings.compiler == "gcc":
cc_compiler = prename + "gcc"
cxx_compiler = prename + "g++"
else:
cc_compiler = "clang"
cxx_compiler = "clang++"
sysroot = os.path.join(self.package_folder, "sysroot")
self.env_info.CC = os.path.join(self.package_folder, "bin", cc_compiler)
self.env_info.CXX = os.path.join(self.package_folder, "bin", cxx_compiler)
self.env_info.SYSROOT = sysroot
self.env_info.CXXFLAGS = "-std=c++11 -I%s -I%s" % (os.path.join(self.package_folder, "include", "c++", "4.9.x"), os.path.join(self.package_folder, "include", "c++", "4.9.x", "arm-linux-androideabi", "armv7-a"))
self.env_info.CONAN_CMAKE_FIND_ROOT_PATH = sysroot
self.env_info.PATH.extend([os.path.join(self.package_folder, onedir) for onedir in self.cpp_info.bindirs])
arch = {"armv8": "armv8-a", "armv7": "armv7-a", "x86": "i686"}.get(str(self.settings.arch), self.settings.arch)
# valid arguments to '-march=' are: armv2 armv2a armv3 armv3m armv4 armv4t armv5 armv5e armv5t armv5te
# armv6 armv6-m armv6j armv6k armv6s-m armv6t2 armv6z armv6zk armv7 armv7-a armv7-m armv7-r armv7e-m armv7ve
# armv8-a armv8-a+crc iwmmxt iwmmxt2 native
arch_flag = "-march=%s" % arch if ("arm" in str(arch)) else ""
# Common flags to C, CXX and LINKER
flags = ["-fPIC"]
if self.settings.compiler == "clang":
flags.append("--gcc-toolchain=%s" % tools.unix_path(self.package_folder))
flags.append("-target %s-linux-android" % arch)
flags.append("-D_GLIBCXX_USE_CXX11_ABI=0")
else:
flags.append("-pic")
if self.settings.arch == "armv7":
flags.append("-mfloat-abi=softfp -mfpu=vfpv3-d16")
self.cpp_info.cflags.extend(flags)
self.cpp_info.cflags.append(arch_flag)
self.cpp_info.sharedlinkflags.extend(flags)
self.cpp_info.exelinkflags.extend(flags)
self.cpp_info.sysroot = sysroot
if platform.system() == "Windows":
self.cpp_info.includedirs.append(os.path.join(sysroot, "usr", "include"))
if platform.system() == "Darwin":
self.env_info.CHOST = prename
self.env_info.AR = "%sar" % prename
self.env_info.RANLIB = "%sranlib" % prename
self.env_info.ARFLAGS = "rcs" | android-toolchain/conanfile.py | import os
import platform
import shutil
from conans import ConanFile, tools
class AndroidtoolchainConan(ConanFile):
name = "android-toolchain"
version = "r17b"
license = "GPL/APACHE2"
url = "https://github.com/lasote/conan-android-toolchain"
settings = "os", "arch", "compiler"
options = {"use_system_python": [True, False], "ndk_path": "ANY"}
default_options = "use_system_python=True", "ndk_path=False"
requires = "android-ndk/%s@block/testing" % version
description = "Recipe for building an Android toolchain for cross compile Android apps from Windows/Linux/OSX"
@property
def ndk_path(self):
return os.path.expanduser(os.path.join(str(self.options.ndk_path), "build", "tools"))
def configure(self):
if self.options.ndk_path:
if os.path.exists(self.ndk_path):
del self.requires["android-ndk"]
else:
raise Exception("Invalid specified path to Android NDK: %s" % self.ndk_path)
if self.settings.os != "Android":
raise Exception("Only os Android supported")
if str(self.settings.compiler) not in ("gcc", "clang"):
raise Exception("Not supported compiler, gcc and clang available")
if str(self.settings.compiler) == "gcc" and str(self.settings.compiler.version) not in ("4.8", "4.9"):
raise Exception("Not supported gcc compiler version, 4.8 and 4.9 available")
if str(self.settings.compiler) == "clang" and str(self.settings.compiler.version) != "6.0":
raise Exception("Not supported clang compiler version, only 6.0 available")
@property
def arch_id_str(self):
return {"mips": "mipsel",
"mips64": "mips64",
"armv6": "arm",
"armv7": "arm",
"armv7hf": "arm",
"armv8": "aarch64"}.get(str(self.settings.arch),
str(self.settings.arch))
@property
def arch_id_str_compiler(self):
return {"x86": "i686",
"armv6": "arm",
"armv7": "arm",
"armv7hf": "arm",
"armv8": "aarch64",
"mips64": "mips64"}.get(str(self.settings.arch),
str(self.settings.arch))
@property
def android_id_str(self):
return "androideabi" if str(self.settings.arch) in ["armv6", "armv7"] else "android"
def build(self):
compiler_str = {"clang": "clang", "gcc": ""}.get(str(self.settings.compiler))
toolchain = "%s-linux-%s-%s%s" % (self.arch_id_str, self.android_id_str, compiler_str, self.settings.compiler.version)
# Command available in android-ndk package
# --stl => gnustl, libc++, stlport
pre_path = (self.ndk_path + "/") if self.options.ndk_path else ""
stl = {"libstdc++": "gnustl", "libstdc++11": "gnustl", "libc++": "libc++"}.get(str(self.settings.compiler.libcxx))
command = "%smake-standalone-toolchain.sh --toolchain=%s --platform=android-%s " \
"--install-dir=%s --stl=%s" % (pre_path, toolchain, self.settings.os.api_level, self.package_folder, stl)
self.output.warn(command)
# self.run("make-standalone-toolchain.sh --help")
if platform.system != "Windows":
self.run(command)
else:
tools.run_in_windows_bash(self, command)
if self.options.use_system_python:
if os.path.exists(os.path.join(self.package_folder, "bin", "python")):
os.unlink(os.path.join(self.package_folder, "bin", "python"))
if platform.system() == "Windows": # Create clang.exe to make CMake happy
dest_cc_compiler = os.path.join(self.package_folder, "bin", "clang.exe")
dest_cxx_compiler = os.path.join(self.package_folder, "bin", "clang++.exe")
src_cc_compiler = os.path.join(self.package_folder, "bin", "clang38.exe")
src_cxx_compiler = os.path.join(self.package_folder, "bin", "clang38++.exe")
shutil.copy(src_cc_compiler, dest_cc_compiler)
shutil.copy(src_cxx_compiler, dest_cxx_compiler)
if not os.path.exists(os.path.join(self.package_folder, "bin")):
raise Exception("Invalid toolchain, try a higher api_level or different architecture: %s-%s" % (self.settings.arch, self.settings.os.api_level))
def package_info(self):
prename = "%s-linux-%s-" % (self.arch_id_str_compiler, self.android_id_str)
if self.settings.compiler == "gcc":
cc_compiler = prename + "gcc"
cxx_compiler = prename + "g++"
else:
cc_compiler = "clang"
cxx_compiler = "clang++"
sysroot = os.path.join(self.package_folder, "sysroot")
self.env_info.CC = os.path.join(self.package_folder, "bin", cc_compiler)
self.env_info.CXX = os.path.join(self.package_folder, "bin", cxx_compiler)
self.env_info.SYSROOT = sysroot
self.env_info.CXXFLAGS = "-std=c++11 -I%s -I%s" % (os.path.join(self.package_folder, "include", "c++", "4.9.x"), os.path.join(self.package_folder, "include", "c++", "4.9.x", "arm-linux-androideabi", "armv7-a"))
self.env_info.CONAN_CMAKE_FIND_ROOT_PATH = sysroot
self.env_info.PATH.extend([os.path.join(self.package_folder, onedir) for onedir in self.cpp_info.bindirs])
arch = {"armv8": "armv8-a", "armv7": "armv7-a", "x86": "i686"}.get(str(self.settings.arch), self.settings.arch)
# valid arguments to '-march=' are: armv2 armv2a armv3 armv3m armv4 armv4t armv5 armv5e armv5t armv5te
# armv6 armv6-m armv6j armv6k armv6s-m armv6t2 armv6z armv6zk armv7 armv7-a armv7-m armv7-r armv7e-m armv7ve
# armv8-a armv8-a+crc iwmmxt iwmmxt2 native
arch_flag = "-march=%s" % arch if ("arm" in str(arch)) else ""
# Common flags to C, CXX and LINKER
flags = ["-fPIC"]
if self.settings.compiler == "clang":
flags.append("--gcc-toolchain=%s" % tools.unix_path(self.package_folder))
flags.append("-target %s-linux-android" % arch)
flags.append("-D_GLIBCXX_USE_CXX11_ABI=0")
else:
flags.append("-pic")
if self.settings.arch == "armv7":
flags.append("-mfloat-abi=softfp -mfpu=vfpv3-d16")
self.cpp_info.cflags.extend(flags)
self.cpp_info.cflags.append(arch_flag)
self.cpp_info.sharedlinkflags.extend(flags)
self.cpp_info.exelinkflags.extend(flags)
self.cpp_info.sysroot = sysroot
if platform.system() == "Windows":
self.cpp_info.includedirs.append(os.path.join(sysroot, "usr", "include"))
if platform.system() == "Darwin":
self.env_info.CHOST = prename
self.env_info.AR = "%sar" % prename
self.env_info.RANLIB = "%sranlib" % prename
self.env_info.ARFLAGS = "rcs" | 0.425486 | 0.090534 |
__author__ = 'wittawat'
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.signal as sig
from mskernel import util
class Kernel(object):
"""Abstract class for kernels"""
__metaclass__ = ABCMeta
@abstractmethod
def eval(self, X1, X2):
"""Evalute the kernel on data X1 and X2 """
pass
@abstractmethod
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ..."""
pass
class KHoPoly(Kernel):
"""Homogeneous polynomial kernel of the form
(x.dot(y))**d
"""
def __init__(self, degree):
assert degree > 0
self.degree = degree
def eval(self, X1, X2):
return X1.dot(X2.T)**self.degree
def pair_eval(self, X, Y):
return np.sum(X1*X2, 1)**self.degree
def __str__(self):
return 'KHoPoly(d=%d)'%self.degree
class KLinear(Kernel):
def eval(self, X1, X2):
return X1.dot(X2.T)
def pair_eval(self, X, Y):
return np.sum(X*Y, 1)
def __str__(self):
return "KLinear()"
class KGauss(Kernel):
def __init__(self, sigma2):
assert sigma2 > 0, 'sigma2 must be > 0. Was %s'%str(sigma2)
self.sigma2 = sigma2
def eval(self, X1, X2):
"""
Evaluate the Gaussian kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x d numpy array
X2 : n2 x d numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
(n1, d1) = X1.shape
(n2, d2) = X2.shape
assert d1==d2, 'Dimensions of the two inputs must be the same'
D2 = np.sum(X1**2, 1)[:, np.newaxis] - 2*X1.dot(X2.T) + np.sum(X2**2, 1)
K = np.exp(-D2/self.sigma2)
return K
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x d numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert n1==n2, 'Two inputs must have the same number of instances'
assert d1==d2, 'Two inputs must have the same dimension'
D2 = np.sum( (X-Y)**2, 1)
Kvec = np.exp(-D2/self.sigma2)
return Kvec
def __str__(self):
return "KGauss(%.3f)"%self.sigma2
class KTriangle(Kernel):
"""
A triangular kernel defined on 1D. k(x, y) = B_1((x-y)/width) where B_1 is the
B-spline function of order 1 (i.e., triangular function).
"""
def __init__(self, width):
assert width > 0, 'width must be > 0'
self.width = width
def eval(self, X1, X2):
"""
Evaluate the triangular kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x 1 numpy array
X2 : n2 x 1 numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
(n1, d1) = X1.shape
(n2, d2) = X2.shape
assert d1==1, 'd1 must be 1'
assert d2==1, 'd2 must be 1'
diff = (X1-X2.T)/self.width
K = sig.bspline( diff , 1)
return K
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x 1 numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert d1==1, 'd1 must be 1'
assert d2==1, 'd2 must be 1'
diff = (X-Y)/self.width
Kvec = sig.bspline( diff , 1)
return Kvec
def __str__(self):
return "KTriangle(w=%.3f)"%self.width
class KIMQ(Kernel):
"""
The inverse multiquadric (IMQ) kernel studied in
Measure Sample Quality with Kernels
<NAME>, <NAME>
k(x,y) = (c^2 + ||x-y||^2)^b
where c > 0 and b < 0. Following a theorem in the paper, this kernel is
convergence-determining only when -1 < b < 0. In the experiments,
the paper sets b = -1/2 and c = 1.
"""
def __init__(self, b=-0.5, c=1.0):
if not b < 0:
raise ValueError('b has to be negative. Was {}'.format(b))
if not c > 0:
raise ValueError('c has to be positive. Was {}'.format(c))
self.b = b
self.c = c
def eval(self, X, Y):
"""Evalute the kernel on data X and Y """
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
K = (c**2 + D2)**b
return K
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
"""
assert X.shape[0] == Y.shape[0]
b = self.b
c = self.c
return (c**2 + np.sum((X-Y)**2, 1))**b
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
D2 = util.dist2_matrix(X, Y)
# 1d array of length nx
Xi = X[:, dim]
# 1d array of length ny
Yi = Y[:, dim]
# nx x ny
dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]
b = self.b
c = self.c
Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
assert Gdim.shape[0] == X.shape[0]
assert Gdim.shape[1] == Y.shape[0]
return Gdim
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return -self.gradX_Y(X, Y, dim)
def gradXY_sum(self, X, Y):
"""
Compute
\sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
# d = input dimension
d = X.shape[1]
c2D2 = c**2 + D2
T1 = -4.0*b*(b-1)*D2*(c2D2**(b-2) )
T2 = -2.0*b*d*c2D2**(b-1)
return T1 + T2 | mskernel/kernel.py |
__author__ = 'wittawat'
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.signal as sig
from mskernel import util
class Kernel(object):
"""Abstract class for kernels"""
__metaclass__ = ABCMeta
@abstractmethod
def eval(self, X1, X2):
"""Evalute the kernel on data X1 and X2 """
pass
@abstractmethod
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ..."""
pass
class KHoPoly(Kernel):
"""Homogeneous polynomial kernel of the form
(x.dot(y))**d
"""
def __init__(self, degree):
assert degree > 0
self.degree = degree
def eval(self, X1, X2):
return X1.dot(X2.T)**self.degree
def pair_eval(self, X, Y):
return np.sum(X1*X2, 1)**self.degree
def __str__(self):
return 'KHoPoly(d=%d)'%self.degree
class KLinear(Kernel):
def eval(self, X1, X2):
return X1.dot(X2.T)
def pair_eval(self, X, Y):
return np.sum(X*Y, 1)
def __str__(self):
return "KLinear()"
class KGauss(Kernel):
def __init__(self, sigma2):
assert sigma2 > 0, 'sigma2 must be > 0. Was %s'%str(sigma2)
self.sigma2 = sigma2
def eval(self, X1, X2):
"""
Evaluate the Gaussian kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x d numpy array
X2 : n2 x d numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
(n1, d1) = X1.shape
(n2, d2) = X2.shape
assert d1==d2, 'Dimensions of the two inputs must be the same'
D2 = np.sum(X1**2, 1)[:, np.newaxis] - 2*X1.dot(X2.T) + np.sum(X2**2, 1)
K = np.exp(-D2/self.sigma2)
return K
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x d numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert n1==n2, 'Two inputs must have the same number of instances'
assert d1==d2, 'Two inputs must have the same dimension'
D2 = np.sum( (X-Y)**2, 1)
Kvec = np.exp(-D2/self.sigma2)
return Kvec
def __str__(self):
return "KGauss(%.3f)"%self.sigma2
class KTriangle(Kernel):
"""
A triangular kernel defined on 1D. k(x, y) = B_1((x-y)/width) where B_1 is the
B-spline function of order 1 (i.e., triangular function).
"""
def __init__(self, width):
assert width > 0, 'width must be > 0'
self.width = width
def eval(self, X1, X2):
"""
Evaluate the triangular kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x 1 numpy array
X2 : n2 x 1 numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
(n1, d1) = X1.shape
(n2, d2) = X2.shape
assert d1==1, 'd1 must be 1'
assert d2==1, 'd2 must be 1'
diff = (X1-X2.T)/self.width
K = sig.bspline( diff , 1)
return K
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x 1 numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert d1==1, 'd1 must be 1'
assert d2==1, 'd2 must be 1'
diff = (X-Y)/self.width
Kvec = sig.bspline( diff , 1)
return Kvec
def __str__(self):
return "KTriangle(w=%.3f)"%self.width
class KIMQ(Kernel):
"""
The inverse multiquadric (IMQ) kernel studied in
Measure Sample Quality with Kernels
<NAME>, <NAME>
k(x,y) = (c^2 + ||x-y||^2)^b
where c > 0 and b < 0. Following a theorem in the paper, this kernel is
convergence-determining only when -1 < b < 0. In the experiments,
the paper sets b = -1/2 and c = 1.
"""
def __init__(self, b=-0.5, c=1.0):
if not b < 0:
raise ValueError('b has to be negative. Was {}'.format(b))
if not c > 0:
raise ValueError('c has to be positive. Was {}'.format(c))
self.b = b
self.c = c
def eval(self, X, Y):
"""Evalute the kernel on data X and Y """
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
K = (c**2 + D2)**b
return K
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
"""
assert X.shape[0] == Y.shape[0]
b = self.b
c = self.c
return (c**2 + np.sum((X-Y)**2, 1))**b
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
D2 = util.dist2_matrix(X, Y)
# 1d array of length nx
Xi = X[:, dim]
# 1d array of length ny
Yi = Y[:, dim]
# nx x ny
dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]
b = self.b
c = self.c
Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
assert Gdim.shape[0] == X.shape[0]
assert Gdim.shape[1] == Y.shape[0]
return Gdim
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return -self.gradX_Y(X, Y, dim)
def gradXY_sum(self, X, Y):
"""
Compute
\sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
# d = input dimension
d = X.shape[1]
c2D2 = c**2 + D2
T1 = -4.0*b*(b-1)*D2*(c2D2**(b-2) )
T2 = -2.0*b*d*c2D2**(b-1)
return T1 + T2 | 0.816223 | 0.692642 |
import pytest
from tape import Tape
def test_get_content_of_non_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
assert tape.get_content() == 'a'
def test_get_content_of_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
assert tape.get_content() == 'B'
def test_get_content_of_non_empty_tape_at_start_with_head_moved_to_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_left()
assert tape.get_content() == 'B'
assert tape.position == 0
def test_get_content_of_non_empty_tape_with_head_moved_to_right_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
tape.move_left()
assert tape.get_content() == 'a'
def test_get_content_of_non_empty_tape_with_head_moved_to_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
assert tape.get_content() == 'b'
def test_get_content_of_non_empty_tape_at_end_with_head_moved_to_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
tape.move_right()
assert tape.get_content() == 'B'
def test_move_head_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('L')
assert tape.get_content() == 'B'
def test_move_head_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('R')
assert tape.get_content() == 'b'
def test_move_head_stay():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('S')
assert tape.get_content() == 'a'
def test_move_head_right_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('R')
tape.move_head('L')
assert tape.get_content() == 'a'
def test_set_content_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.set_content('a')
assert tape.get_content() == 'a'
def test_set_content_empty_tape_left_left_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.move_left()
tape.move_left()
tape.move_right()
tape.set_content('a')
assert tape.get_content() == 'a'
assert tape.position == 1
def test_set_string_empty_tape_left_left_right_a():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.move_left()
tape.move_left()
tape.move_right()
tape.set_content('a')
assert "(['B', 'a'])@1" == str(tape) | test_tape.py | import pytest
from tape import Tape
def test_get_content_of_non_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
assert tape.get_content() == 'a'
def test_get_content_of_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
assert tape.get_content() == 'B'
def test_get_content_of_non_empty_tape_at_start_with_head_moved_to_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_left()
assert tape.get_content() == 'B'
assert tape.position == 0
def test_get_content_of_non_empty_tape_with_head_moved_to_right_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
tape.move_left()
assert tape.get_content() == 'a'
def test_get_content_of_non_empty_tape_with_head_moved_to_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
assert tape.get_content() == 'b'
def test_get_content_of_non_empty_tape_at_end_with_head_moved_to_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
tape.move_right()
assert tape.get_content() == 'B'
def test_move_head_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('L')
assert tape.get_content() == 'B'
def test_move_head_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('R')
assert tape.get_content() == 'b'
def test_move_head_stay():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('S')
assert tape.get_content() == 'a'
def test_move_head_right_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('R')
tape.move_head('L')
assert tape.get_content() == 'a'
def test_set_content_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.set_content('a')
assert tape.get_content() == 'a'
def test_set_content_empty_tape_left_left_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.move_left()
tape.move_left()
tape.move_right()
tape.set_content('a')
assert tape.get_content() == 'a'
assert tape.position == 1
def test_set_string_empty_tape_left_left_right_a():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.move_left()
tape.move_left()
tape.move_right()
tape.set_content('a')
assert "(['B', 'a'])@1" == str(tape) | 0.59561 | 0.567577 |
# cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=2018133209
from school_sdk.client.api import BaseCrawler
class Score(BaseCrawler):
def __init__(self, user_client) -> None:
super().__init__(user_client)
self.endpoints: dict = self.school.config['url_endpoints']
self.raw_score = None
self.score_dict:dict = {}
self.score_list:list = []
def get_score(self, **kwargs):
return self.get_score_dict(**kwargs)
def get_score_list(self, **kwargs):
"""获取成绩清单-列表
Returns:
list: 成绩列表
"""
if not self.score_list:
self.parse(**kwargs)
return self.score_list
def get_score_dict(self, **kwargs):
"""获取成绩清单-字典
Returns:
dict: 成绩字典清单
"""
if not self.score_dict:
self.parse(**kwargs)
return self.score_dict
def parse(self, **kwargs):
"""解析数据
"""
if self.raw_score is None:
self.load_score(**kwargs)
self._parse(self.raw_score)
def load_score(self, **kwargs) -> None:
"""加载课表
"""
self.raw_score = self._get_score(**kwargs)
def _get_score(self, year: int, term: int = 1, **kwargs):
"""获取教务系统成绩
Args:
year (int): 学年
term (int, optional): 学期. Defaults to 1.
Returns:
json: json数据
"""
url = self.endpoints['SCORE']['API']
params = {
'doType': 'query',
'gnmkdm': 'N305005',
'su': self.account
}
data = {
'xnm': year,
'xqm': self.TERM.get(term, 3),
'_search': False,
'nd': self.t,
'queryModel.showCount': 500,
'queryModel.currentPage': 1,
'queryModel.sortName': None,
'queryModel.sortOrder': 'asc',
'time': 4,
}
res = self.post(url=url, params=params, data=data, **kwargs)
return res.json()
def _parse(self, raw: dict):
# kcmc -> 课程名称 # kcxzmc -> 课程性质名称 # kcbj -> 课程标记 # jsxm -> 教师姓名
# khfsmc -> 考核方式 # ksxz -> 考试性质 # xf -> 学分 # kkbmmc -> 开课部门名称 # cj -> 成绩
# njdm_id -> 年级代码
"""解析教务系统成绩
Args:
raw (dict): 教务系统的原始数据
"""
items = raw.get('items')
for item in items:
format_item = {
"course_name": item.get('kcmc'),
'course_nature': item.get('kcxzmc'),
'course_target': item.get('kcbj'),
'teacher': item.get('jsxm'),
'exam_method': item.get('khfsmc'),
'exam_nature': item.get('ksxz'),
'exam_result': item.get('cj'),
'credit': item.get('xf'),
'course_group': item.get('kkbmmc'),
'grade': item.get('njdm_id')
}
self.score_list.append(format_item)
self.score_dict.setdefault(item.get('kcmc'), format_item) | school_sdk/client/api/score.py | # cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=2018133209
from school_sdk.client.api import BaseCrawler
class Score(BaseCrawler):
def __init__(self, user_client) -> None:
super().__init__(user_client)
self.endpoints: dict = self.school.config['url_endpoints']
self.raw_score = None
self.score_dict:dict = {}
self.score_list:list = []
def get_score(self, **kwargs):
return self.get_score_dict(**kwargs)
def get_score_list(self, **kwargs):
"""获取成绩清单-列表
Returns:
list: 成绩列表
"""
if not self.score_list:
self.parse(**kwargs)
return self.score_list
def get_score_dict(self, **kwargs):
"""获取成绩清单-字典
Returns:
dict: 成绩字典清单
"""
if not self.score_dict:
self.parse(**kwargs)
return self.score_dict
def parse(self, **kwargs):
"""解析数据
"""
if self.raw_score is None:
self.load_score(**kwargs)
self._parse(self.raw_score)
def load_score(self, **kwargs) -> None:
"""加载课表
"""
self.raw_score = self._get_score(**kwargs)
def _get_score(self, year: int, term: int = 1, **kwargs):
"""获取教务系统成绩
Args:
year (int): 学年
term (int, optional): 学期. Defaults to 1.
Returns:
json: json数据
"""
url = self.endpoints['SCORE']['API']
params = {
'doType': 'query',
'gnmkdm': 'N305005',
'su': self.account
}
data = {
'xnm': year,
'xqm': self.TERM.get(term, 3),
'_search': False,
'nd': self.t,
'queryModel.showCount': 500,
'queryModel.currentPage': 1,
'queryModel.sortName': None,
'queryModel.sortOrder': 'asc',
'time': 4,
}
res = self.post(url=url, params=params, data=data, **kwargs)
return res.json()
def _parse(self, raw: dict):
# kcmc -> 课程名称 # kcxzmc -> 课程性质名称 # kcbj -> 课程标记 # jsxm -> 教师姓名
# khfsmc -> 考核方式 # ksxz -> 考试性质 # xf -> 学分 # kkbmmc -> 开课部门名称 # cj -> 成绩
# njdm_id -> 年级代码
"""解析教务系统成绩
Args:
raw (dict): 教务系统的原始数据
"""
items = raw.get('items')
for item in items:
format_item = {
"course_name": item.get('kcmc'),
'course_nature': item.get('kcxzmc'),
'course_target': item.get('kcbj'),
'teacher': item.get('jsxm'),
'exam_method': item.get('khfsmc'),
'exam_nature': item.get('ksxz'),
'exam_result': item.get('cj'),
'credit': item.get('xf'),
'course_group': item.get('kkbmmc'),
'grade': item.get('njdm_id')
}
self.score_list.append(format_item)
self.score_dict.setdefault(item.get('kcmc'), format_item) | 0.531696 | 0.202561 |
from musurgia.random import Random
class ReadAList(object):
##mode in forwards, backwards, zickzack, random
def __init__(self, pool=None, mode='random', seed=None):
self._pool = None
self._mode = None
self._random = None
self._index = None
self._direction = 1
self._next_index = None
self.pool = pool
self.mode = mode
self.seed = seed
@property
def pool(self):
return self._pool
@pool.setter
def pool(self, values):
if values is not None:
try:
self._pool = list(values)
except:
self._pool = [values]
self.random.pool = self.pool
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value not in ['forwards', 'backwards', 'zickzack', 'random']:
err = 'mode can only be forwards, backwards, zickzack or random'
raise ValueError(err)
self._mode = value
@property
def random(self):
if self._random is None:
self._random = Random()
return self._random
@property
def seed(self):
return self.random.seed
@seed.setter
def seed(self, value):
self.random.seed = value
@property
def next_index(self):
err = 'next_index can only be set'
raise AttributeError(err)
@next_index.setter
def next_index(self, value):
self._next_index = value
def _set_next_index(self):
if self.mode == 'forwards':
self._direction = 1
elif self.mode == 'backwards':
self._direction = -1
elif self.mode == 'zickzack':
pass
self._index += self._direction
def _check_index(self):
if self.mode == 'forwards':
if self._index >= len(self.pool):
self._index = 0
elif self.mode == 'backwards':
if self._index >= len(self.pool):
self._index = len(self.pool) - 1
elif self._index < 0:
self._index = len(self.pool) - 1
elif self.mode == 'zickzack':
if self._index == len(self.pool) - 1:
self._direction = -1
elif self._index > len(self.pool) - 1:
self._index = len(self.pool) - 1
self._direction = -1
elif self._index == 0:
self._direction = 1
elif self._index < 0:
self._index = 1
self._direction = 1
def next(self):
if self.pool is None:
err = 'pool can not be None'
raise AttributeError(err)
if self.mode != 'random':
# print 'read_a_list.next(): self.mode=',self.mode
# print 'read_a_list.next(): self._next_index=',self._next_index
if self._next_index is None and self._index is None:
if self.mode == 'backwards':
self._next_index = len(self.pool) - 1
else:
self._next_index = 0
if self._next_index is None:
self._set_next_index()
else:
self._index = self._next_index
self._next_index = None
self._check_index()
# print 'read_a_list.next(): self._index after check=',self._index
# print 'read_a_list.next(): self.pool', self.pool
return self.pool[self._index]
else:
return self.random.next() | musurgia/readalist.py | from musurgia.random import Random
class ReadAList(object):
##mode in forwards, backwards, zickzack, random
def __init__(self, pool=None, mode='random', seed=None):
self._pool = None
self._mode = None
self._random = None
self._index = None
self._direction = 1
self._next_index = None
self.pool = pool
self.mode = mode
self.seed = seed
@property
def pool(self):
return self._pool
@pool.setter
def pool(self, values):
if values is not None:
try:
self._pool = list(values)
except:
self._pool = [values]
self.random.pool = self.pool
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value not in ['forwards', 'backwards', 'zickzack', 'random']:
err = 'mode can only be forwards, backwards, zickzack or random'
raise ValueError(err)
self._mode = value
@property
def random(self):
if self._random is None:
self._random = Random()
return self._random
@property
def seed(self):
return self.random.seed
@seed.setter
def seed(self, value):
self.random.seed = value
@property
def next_index(self):
err = 'next_index can only be set'
raise AttributeError(err)
@next_index.setter
def next_index(self, value):
self._next_index = value
def _set_next_index(self):
if self.mode == 'forwards':
self._direction = 1
elif self.mode == 'backwards':
self._direction = -1
elif self.mode == 'zickzack':
pass
self._index += self._direction
def _check_index(self):
if self.mode == 'forwards':
if self._index >= len(self.pool):
self._index = 0
elif self.mode == 'backwards':
if self._index >= len(self.pool):
self._index = len(self.pool) - 1
elif self._index < 0:
self._index = len(self.pool) - 1
elif self.mode == 'zickzack':
if self._index == len(self.pool) - 1:
self._direction = -1
elif self._index > len(self.pool) - 1:
self._index = len(self.pool) - 1
self._direction = -1
elif self._index == 0:
self._direction = 1
elif self._index < 0:
self._index = 1
self._direction = 1
def next(self):
if self.pool is None:
err = 'pool can not be None'
raise AttributeError(err)
if self.mode != 'random':
# print 'read_a_list.next(): self.mode=',self.mode
# print 'read_a_list.next(): self._next_index=',self._next_index
if self._next_index is None and self._index is None:
if self.mode == 'backwards':
self._next_index = len(self.pool) - 1
else:
self._next_index = 0
if self._next_index is None:
self._set_next_index()
else:
self._index = self._next_index
self._next_index = None
self._check_index()
# print 'read_a_list.next(): self._index after check=',self._index
# print 'read_a_list.next(): self.pool', self.pool
return self.pool[self._index]
else:
return self.random.next() | 0.480722 | 0.215021 |
import numpy as np
from scipy.special import lpmv, gamma, hyp1f1, legendre
from scipy.special.orthogonal import genlaguerre
from scipy.misc import factorial
_default_rank = 4
class SphericalHarmonics:
"""This class describes a real, antipodally symmetric spherical function by
its spherical harmonics coefficients. It also contains a set of static
methods related to the definition and manipulation of spherical harmonics.
Parameters
----------
coefficients : array-like, shape (R, )
A 1d array of coefficients representing the function.
"""
def __init__(self, coefficients):
self._create_from_coefficients(coefficients)
def _create_from_coefficients(self, coefficients):
rank = 2
while True:
dim_sh = dimension(rank)
if len(coefficients) == dim_sh:
self.rank = rank
self.coefficients = coefficients
return
elif len(coefficients) < dim_sh:
raise ValueError("Invalid dimension for SH coefficients.")
rank += 2
def get_rank(self):
return self._rank
def set_rank(self, value):
if value % 2 != 0:
raise ValueError("'rank' only accepts even values.")
self._rank = value
rank = property(get_rank, set_rank)
def get_coefficients(self):
return self._coefficients
def set_coefficients(self, value):
if value.shape[0] != dimension(self.rank):
raise ValueError("Coefficients shape and rank mismatch.")
self._coefficients = value
coefficients = property(get_coefficients, set_coefficients)
def angular_function(self, theta, phi):
"""Computes the function at angles theta, phi.
Parameters
----------
theta : array-like
Polar angles, using the physics convention.
phi : array-like
Azimuthal angle, using the physics convention.
"""
coefs = self.coefficients
result = 0
rank = self.rank
for l in range(0, rank+1, 2):
for m in range(-l, l+1):
j = index_j(l, m)
if coefs[j] != 0.0:
if m < 0:
result += coefs[j] * np.sqrt(2) \
* np.sqrt((2*l + 1) * factorial(l + m) \
/ (4 * np.pi * factorial(l - m))) \
* (-1) ** (-m) \
* lpmv(-m, l, np.cos(theta)) * np.cos(m * phi)
if m == 0:
result += coefs[j] \
* np.sqrt((2*l + 1) * factorial(l - m) \
/ (4 * np.pi * factorial(l + m))) \
* lpmv(m, l, np.cos(theta))
if m > 0:
result += coefs[j] * np.sqrt(2) \
* np.sqrt((2*l + 1) * factorial(l - m) \
/ (4 * np.pi * factorial(l + m))) \
* lpmv(m, l, np.cos(theta)) * np.sin(m * phi)
return result
def dimension(rank):
"""Returns the dimension of the spherical harmonics basis for a given
rank.
"""
return (rank + 1) * (rank + 2) / 2
def index_j(l, m):
"Returns the flattened index j of spherical harmonics."
# l is between 0 and rankSH, m is btw -l and l
if np.abs(m) > l:
raise NameError('SphericalHarmonics.j: m must lie in [-l, l]')
return int(l + m + (2 * np.array(range(0, l, 2)) + 1).sum())
def index_l(j):
"Returns the degree l of SH associated to index j"
l = 0
while dimension(l) - 1 < j:
l += 2
return l
def index_m(j):
"Returns the order m of SH associated to index j"
l = index_l(j)
return j - dimension(l) + l + 1
def matrix(theta, phi, rank=_default_rank):
"""Returns the spherical harmonics observation matrix for a given set
of directions represented by their polar and azimuthal angles.
Parameters
----------
theta : array-like, shape (K, )
Polar angles of the direction set.
phi : array-like, shape (K, )
Azimuthal angles of the direction set.
rank : int
The truncation rank of the SH basis.
Returns
-------
H : array-like, shape (K, R)
The observation matrix corresponding to the direction set passed as
input.
"""
dim_sh = dimension(rank)
sh = SphericalHarmonics(np.zeros(dim_sh))
N = theta.shape[0]
H = np.zeros((N, dim_sh))
for j in range(dim_sh):
sh.coefficients[:] = 0
sh.coefficients[j] = 1.0
H[:, j] = sh.angular_function(theta, phi)
return H
def L(rank=_default_rank):
"""Returns Laplace-Beltrami regularization matrix.
Parameters
----------
rank : int
The truncation rank of the SH basis.
"""
dim_sh = dimension(rank)
L = np.zeros((dimSH, dimSH))
for j in range(dimSH):
l = index_l(j)
L[j, j] = - (l * (l + 1))
return L
def P(rank=_default_rank):
"returns the Funk-Radon operator matrix"
dim_sh = dimension(rank)
P = np.zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = index_l(j)
P[j, j] = 2 * np.pi * legendre(l)(0)
return P | qspace/bases/sh.py |
import numpy as np
from scipy.special import lpmv, gamma, hyp1f1, legendre
from scipy.special.orthogonal import genlaguerre
from scipy.misc import factorial
_default_rank = 4
class SphericalHarmonics:
"""This class describes a real, antipodally symmetric spherical function by
its spherical harmonics coefficients. It also contains a set of static
methods related to the definition and manipulation of spherical harmonics.
Parameters
----------
coefficients : array-like, shape (R, )
A 1d array of coefficients representing the function.
"""
def __init__(self, coefficients):
self._create_from_coefficients(coefficients)
def _create_from_coefficients(self, coefficients):
rank = 2
while True:
dim_sh = dimension(rank)
if len(coefficients) == dim_sh:
self.rank = rank
self.coefficients = coefficients
return
elif len(coefficients) < dim_sh:
raise ValueError("Invalid dimension for SH coefficients.")
rank += 2
def get_rank(self):
return self._rank
def set_rank(self, value):
if value % 2 != 0:
raise ValueError("'rank' only accepts even values.")
self._rank = value
rank = property(get_rank, set_rank)
def get_coefficients(self):
return self._coefficients
def set_coefficients(self, value):
if value.shape[0] != dimension(self.rank):
raise ValueError("Coefficients shape and rank mismatch.")
self._coefficients = value
coefficients = property(get_coefficients, set_coefficients)
def angular_function(self, theta, phi):
"""Computes the function at angles theta, phi.
Parameters
----------
theta : array-like
Polar angles, using the physics convention.
phi : array-like
Azimuthal angle, using the physics convention.
"""
coefs = self.coefficients
result = 0
rank = self.rank
for l in range(0, rank+1, 2):
for m in range(-l, l+1):
j = index_j(l, m)
if coefs[j] != 0.0:
if m < 0:
result += coefs[j] * np.sqrt(2) \
* np.sqrt((2*l + 1) * factorial(l + m) \
/ (4 * np.pi * factorial(l - m))) \
* (-1) ** (-m) \
* lpmv(-m, l, np.cos(theta)) * np.cos(m * phi)
if m == 0:
result += coefs[j] \
* np.sqrt((2*l + 1) * factorial(l - m) \
/ (4 * np.pi * factorial(l + m))) \
* lpmv(m, l, np.cos(theta))
if m > 0:
result += coefs[j] * np.sqrt(2) \
* np.sqrt((2*l + 1) * factorial(l - m) \
/ (4 * np.pi * factorial(l + m))) \
* lpmv(m, l, np.cos(theta)) * np.sin(m * phi)
return result
def dimension(rank):
"""Returns the dimension of the spherical harmonics basis for a given
rank.
"""
return (rank + 1) * (rank + 2) / 2
def index_j(l, m):
"Returns the flattened index j of spherical harmonics."
# l is between 0 and rankSH, m is btw -l and l
if np.abs(m) > l:
raise NameError('SphericalHarmonics.j: m must lie in [-l, l]')
return int(l + m + (2 * np.array(range(0, l, 2)) + 1).sum())
def index_l(j):
"Returns the degree l of SH associated to index j"
l = 0
while dimension(l) - 1 < j:
l += 2
return l
def index_m(j):
"Returns the order m of SH associated to index j"
l = index_l(j)
return j - dimension(l) + l + 1
def matrix(theta, phi, rank=_default_rank):
"""Returns the spherical harmonics observation matrix for a given set
of directions represented by their polar and azimuthal angles.
Parameters
----------
theta : array-like, shape (K, )
Polar angles of the direction set.
phi : array-like, shape (K, )
Azimuthal angles of the direction set.
rank : int
The truncation rank of the SH basis.
Returns
-------
H : array-like, shape (K, R)
The observation matrix corresponding to the direction set passed as
input.
"""
dim_sh = dimension(rank)
sh = SphericalHarmonics(np.zeros(dim_sh))
N = theta.shape[0]
H = np.zeros((N, dim_sh))
for j in range(dim_sh):
sh.coefficients[:] = 0
sh.coefficients[j] = 1.0
H[:, j] = sh.angular_function(theta, phi)
return H
def L(rank=_default_rank):
"""Returns Laplace-Beltrami regularization matrix.
Parameters
----------
rank : int
The truncation rank of the SH basis.
"""
dim_sh = dimension(rank)
L = np.zeros((dimSH, dimSH))
for j in range(dimSH):
l = index_l(j)
L[j, j] = - (l * (l + 1))
return L
def P(rank=_default_rank):
"returns the Funk-Radon operator matrix"
dim_sh = dimension(rank)
P = np.zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = index_l(j)
P[j, j] = 2 * np.pi * legendre(l)(0)
return P | 0.913621 | 0.72113 |
import collections
import re
from copy import copy
from decimal import Decimal
from beancount.core.data import Custom, Transaction
from beancount.core.amount import Amount, add, sub, mul, div
from beancount.core import account, getters, realization
__plugins__ = ['balexpr']
BalExprError = collections.namedtuple('BalExprError', 'source message entry')
def compute_stack(stack):
for i in range(1, len(stack), 2):
if stack[i] == '+':
stack[0] = add(stack[0], stack[i + 1])
elif stack[i] == '-':
stack[0] = sub(stack[0], stack[i + 1])
return stack[0]
def push_amount_into_stack(stack, amount):
if not stack:
stack.append(amount)
elif stack[-1] == '*':
stack[-2] = mul(stack[-2], amount.number)
stack.pop()
elif stack[-1] == '/':
stack[-2] = div(stack[-2], amount.number)
stack.pop()
else:
stack.append(amount)
def get_balance(account, currency, real_root):
real_account = realization.get(real_root, account)
subtree_balance = realization.compute_balance(real_account, leaf_only=False)
return subtree_balance.get_currency_units(currency)
def calcuate(expr, currency, real_root):
stack = []
paren = []
balances = {}
pos = 0
while pos < len(expr):
ch = expr[pos]
if str.isalpha(ch):
start = pos
while pos < len(expr) and (str.isalnum(expr[pos]) or expr[pos] == ':'):
pos += 1
account = expr[start:pos]
if account in balances:
amount = balances[account]
else:
amount = get_balance(account, currency, real_root)
balances[account] = amount
push_amount_into_stack(stack, amount)
elif str.isnumeric(ch):
start = pos
while pos < len(expr) and (str.isnumeric(expr[pos]) or expr[pos] == '.'):
pos += 1
push_amount_into_stack(stack, Amount(Decimal(expr[start:pos]), currency))
elif ch in ['+', '-', '*', '/']:
stack.append(ch)
pos += 1
elif ch == '(':
paren.append(len(stack))
stack.append(ch)
pos += 1
elif ch == ')':
result = compute_stack(stack[paren[-1] + 1:])
stack = stack[:paren[-1]]
push_amount_into_stack(stack, result)
paren.pop()
pos += 1
elif ch in [' ', '\t', '\r', '\n']:
pos += 1
else:
return None, 'Unknown char \'{}\''.format(ch)
if paren:
return None, 'Unclosed paren detected'
return compute_stack(stack), None
def is_balexpr_entry(entry):
return isinstance(entry, Custom) and entry.type == 'balexpr'
def get_expression_from_entry(entry):
return entry.values[0].value
def get_expected_amount_from_entry(entry):
return entry.values[1].value
def get_accounts_from_entry(entry):
return map(
lambda m: m[0],
re.findall(
'((Assets|Liabilities|Expenses|Equity)(:\w+)+)',
get_expression_from_entry(entry)))
def balexpr(entries, options_map):
errors = []
accounts = []
real_root = realization.RealAccount('')
balexpr_entries = [
entry
for entry in entries
if is_balexpr_entry(entry)]
asserted_accounts = {
account_
for entry in balexpr_entries
for account_ in get_accounts_from_entry(entry)}
asserted_match_list = [
account.parent_matcher(account_)
for account_ in asserted_accounts]
for account_ in getters.get_accounts(entries):
if (account_ in asserted_accounts or
any(match(account_) for match in asserted_match_list)):
realization.get_or_create(real_root, account_)
open_close_map = getters.get_account_open_close(entries)
current_checking_balexpr_entry = 0
for entry in entries:
if current_checking_balexpr_entry >= len(balexpr_entries):
break
while current_checking_balexpr_entry < len(balexpr_entries) and balexpr_entries[current_checking_balexpr_entry].date == entry.date:
checking_entry = balexpr_entries[current_checking_balexpr_entry]
current_checking_balexpr_entry += 1
accounts = get_accounts_from_entry(checking_entry)
if not accounts:
errors.append(BalExprError(
checking_entry.meta,
'No account found in the expression',
checking_entry))
continue
currency = get_expected_amount_from_entry(checking_entry).currency
error_found_in_currencies = False
for account_ in accounts:
try:
open, _ = open_close_map[account_]
except KeyError:
errors.append(BalExprError(
checking_entry.meta,
'Invalid reference to unknown account \'{}\''.format(account_),
checking_entry))
error_found_in_currencies = True
break
if currency not in open.currencies:
errors.append(BalExprError(
checking_entry.meta,
'Currencies are inconsistent',
checking_entry))
error_found_in_currencies = True
break
if error_found_in_currencies:
continue
expression = get_expression_from_entry(checking_entry)
expected_amount = get_expected_amount_from_entry(checking_entry)
real_amount, error_msg = calcuate(expression, currency, real_root)
if error_msg:
errors.append(BalExprError(checking_entry.meta, error_msg, checking_entry))
continue
diff_amount = sub(real_amount, expected_amount)
if abs(diff_amount.number) > 0.005:
errors.append(BalExprError(
checking_entry.meta,
"BalExpr failed: expected {} != accumulated {} ({} {})".format(
expected_amount,
real_amount,
abs(diff_amount.number),
('too much'
if diff_amount.number > 0
else 'too little')),
checking_entry))
if isinstance(entry, Transaction):
for posting in entry.postings:
real_account = realization.get(real_root, posting.account)
if real_account is not None:
real_account.balance.add_position(posting)
return entries, errors | beancount_balexpr/balexpr.py |
import collections
import re
from copy import copy
from decimal import Decimal
from beancount.core.data import Custom, Transaction
from beancount.core.amount import Amount, add, sub, mul, div
from beancount.core import account, getters, realization
__plugins__ = ['balexpr']
BalExprError = collections.namedtuple('BalExprError', 'source message entry')
def compute_stack(stack):
for i in range(1, len(stack), 2):
if stack[i] == '+':
stack[0] = add(stack[0], stack[i + 1])
elif stack[i] == '-':
stack[0] = sub(stack[0], stack[i + 1])
return stack[0]
def push_amount_into_stack(stack, amount):
if not stack:
stack.append(amount)
elif stack[-1] == '*':
stack[-2] = mul(stack[-2], amount.number)
stack.pop()
elif stack[-1] == '/':
stack[-2] = div(stack[-2], amount.number)
stack.pop()
else:
stack.append(amount)
def get_balance(account, currency, real_root):
real_account = realization.get(real_root, account)
subtree_balance = realization.compute_balance(real_account, leaf_only=False)
return subtree_balance.get_currency_units(currency)
def calcuate(expr, currency, real_root):
stack = []
paren = []
balances = {}
pos = 0
while pos < len(expr):
ch = expr[pos]
if str.isalpha(ch):
start = pos
while pos < len(expr) and (str.isalnum(expr[pos]) or expr[pos] == ':'):
pos += 1
account = expr[start:pos]
if account in balances:
amount = balances[account]
else:
amount = get_balance(account, currency, real_root)
balances[account] = amount
push_amount_into_stack(stack, amount)
elif str.isnumeric(ch):
start = pos
while pos < len(expr) and (str.isnumeric(expr[pos]) or expr[pos] == '.'):
pos += 1
push_amount_into_stack(stack, Amount(Decimal(expr[start:pos]), currency))
elif ch in ['+', '-', '*', '/']:
stack.append(ch)
pos += 1
elif ch == '(':
paren.append(len(stack))
stack.append(ch)
pos += 1
elif ch == ')':
result = compute_stack(stack[paren[-1] + 1:])
stack = stack[:paren[-1]]
push_amount_into_stack(stack, result)
paren.pop()
pos += 1
elif ch in [' ', '\t', '\r', '\n']:
pos += 1
else:
return None, 'Unknown char \'{}\''.format(ch)
if paren:
return None, 'Unclosed paren detected'
return compute_stack(stack), None
def is_balexpr_entry(entry):
return isinstance(entry, Custom) and entry.type == 'balexpr'
def get_expression_from_entry(entry):
return entry.values[0].value
def get_expected_amount_from_entry(entry):
return entry.values[1].value
def get_accounts_from_entry(entry):
return map(
lambda m: m[0],
re.findall(
'((Assets|Liabilities|Expenses|Equity)(:\w+)+)',
get_expression_from_entry(entry)))
def balexpr(entries, options_map):
errors = []
accounts = []
real_root = realization.RealAccount('')
balexpr_entries = [
entry
for entry in entries
if is_balexpr_entry(entry)]
asserted_accounts = {
account_
for entry in balexpr_entries
for account_ in get_accounts_from_entry(entry)}
asserted_match_list = [
account.parent_matcher(account_)
for account_ in asserted_accounts]
for account_ in getters.get_accounts(entries):
if (account_ in asserted_accounts or
any(match(account_) for match in asserted_match_list)):
realization.get_or_create(real_root, account_)
open_close_map = getters.get_account_open_close(entries)
current_checking_balexpr_entry = 0
for entry in entries:
if current_checking_balexpr_entry >= len(balexpr_entries):
break
while current_checking_balexpr_entry < len(balexpr_entries) and balexpr_entries[current_checking_balexpr_entry].date == entry.date:
checking_entry = balexpr_entries[current_checking_balexpr_entry]
current_checking_balexpr_entry += 1
accounts = get_accounts_from_entry(checking_entry)
if not accounts:
errors.append(BalExprError(
checking_entry.meta,
'No account found in the expression',
checking_entry))
continue
currency = get_expected_amount_from_entry(checking_entry).currency
error_found_in_currencies = False
for account_ in accounts:
try:
open, _ = open_close_map[account_]
except KeyError:
errors.append(BalExprError(
checking_entry.meta,
'Invalid reference to unknown account \'{}\''.format(account_),
checking_entry))
error_found_in_currencies = True
break
if currency not in open.currencies:
errors.append(BalExprError(
checking_entry.meta,
'Currencies are inconsistent',
checking_entry))
error_found_in_currencies = True
break
if error_found_in_currencies:
continue
expression = get_expression_from_entry(checking_entry)
expected_amount = get_expected_amount_from_entry(checking_entry)
real_amount, error_msg = calcuate(expression, currency, real_root)
if error_msg:
errors.append(BalExprError(checking_entry.meta, error_msg, checking_entry))
continue
diff_amount = sub(real_amount, expected_amount)
if abs(diff_amount.number) > 0.005:
errors.append(BalExprError(
checking_entry.meta,
"BalExpr failed: expected {} != accumulated {} ({} {})".format(
expected_amount,
real_amount,
abs(diff_amount.number),
('too much'
if diff_amount.number > 0
else 'too little')),
checking_entry))
if isinstance(entry, Transaction):
for posting in entry.postings:
real_account = realization.get(real_root, posting.account)
if real_account is not None:
real_account.balance.add_position(posting)
return entries, errors | 0.38549 | 0.41253 |
# see scripts/percentiletest.py for an example
from typing import Tuple, Mapping, Callable, Optional, Any, cast
from typing_extensions import TypedDict
import numpy as np
from . import accel
from . import tune
from .abc import AbstractContext, AbstractCommandQueue
_TuningDict = TypedDict('_TuningDict', {'size': int, 'wgsy': int})
class Percentile5Template:
"""Kernel for calculating percentiles of a 2D array of data.
5 percentiles [0,100,25,75,50] are calculated per row (along columns, independently per row).
The lower percentile element, rather than a linear interpolation is chosen.
WARNING: assumes all values are positive.
Parameters
----------
context
Context for which kernels will be compiled
max_columns
Maximum number of columns
is_amplitude
If true, the inputs are scalar amplitudes; if false, they are complex
numbers and the answers are computed on the absolute values
tuning
Kernel tuning parameters; if omitted, will autotune. The possible
parameters are
- size: number of workitems per workgroup along each row
- wgsy: number of workitems per workgroup along each column
"""
autotune_version = 8
def __init__(self, context: AbstractContext, max_columns: int,
is_amplitude: bool = True, tuning: Optional[_TuningDict] = None) -> None:
self.context = context
self.max_columns = max_columns
self.is_amplitude = is_amplitude
if tuning is None:
tuning = self.autotune(context, max_columns, is_amplitude)
self.size = tuning['size']
self.wgsy = tuning['wgsy']
self.vt = accel.divup(max_columns, tuning['size'])
self.program = accel.build(context, "percentile.mako", {
'size': self.size,
'wgsy': self.wgsy,
'vt': self.vt,
'is_amplitude': self.is_amplitude
})
@classmethod
@tune.autotuner(test={'size': 64, 'wgsy': 4})
def autotune(cls, context: AbstractContext, max_columns: int,
is_amplitude: bool) -> _TuningDict:
queue = context.create_tuning_command_queue()
in_shape = (4096, max_columns)
rs = np.random.RandomState(seed=1)
if is_amplitude:
host_data: np.ndarray = rs.uniform(size=in_shape).astype(np.float32)
else:
host_data = rs.standard_normal(in_shape) + 1j * rs.standard_normal(in_shape)
host_data = host_data.astype(np.complex64)
def generate(size: int, wgsy: int) -> Callable[[int], float]:
if size * wgsy < 32 or size * wgsy > 1024:
raise RuntimeError('work group size is unnecessarily large or small, skipping')
if max_columns > size * 256:
raise RuntimeError('too many columns')
fn = cls(context, max_columns, is_amplitude, {
'size': size, 'wgsy': wgsy}).instantiate(queue, in_shape)
inp = fn.slots['src'].allocate(fn.allocator)
fn.slots['dest'].allocate(fn.allocator)
inp.set(queue, host_data)
return tune.make_measure(queue, fn)
return cast(_TuningDict, tune.autotune(generate,
size=[8, 16, 32, 64, 128, 256, 512, 1024],
wgsy=[1, 2, 4, 8, 16, 32]))
def instantiate(self, command_queue: AbstractCommandQueue,
shape: Tuple[int, int],
column_range: Optional[Tuple[int, int]] = None,
allocator: Optional[accel.AbstractAllocator] = None) -> 'Percentile5':
return Percentile5(self, command_queue, shape, column_range, allocator)
class Percentile5(accel.Operation):
"""Concrete instance of :class:`PercentileTemplate`.
.. warning::
Assumes all values are positive when `template.is_amplitude` is `True`.
.. rubric:: Slots
**src**
Input type float32 or complex64.
Shape is number of rows by number of columns, where 5 percentiles
are computed along the columns, per row.
**dest**
Output type float32.
Shape is (5, number of rows of input)
Parameters
----------
template
Operation template
command_queue
Command queue for the operation
shape
Shape of the source data
column_range:
Half-open interval of columns that will be processed. If not specified, all columns are
processed.
allocator
Allocator used to allocate unbound slots
"""
def __init__(self, template: Percentile5Template, command_queue: AbstractCommandQueue,
shape: Tuple[int, int], column_range: Optional[Tuple[int, int]],
allocator: Optional[accel.AbstractAllocator] = None) -> None:
super().__init__(command_queue, allocator)
if column_range is None:
column_range = (0, shape[1])
if column_range[1] <= column_range[0]:
raise ValueError('column range is empty')
if column_range[0] < 0 or column_range[1] > shape[1]:
raise IndexError('column range is out of range')
if column_range[1] - column_range[0] > template.max_columns:
raise ValueError('columns exceeds max_columns')
self.template = template
self.kernel = template.program.get_kernel("percentile5_float")
self.shape = shape
self.column_range = column_range
src_type = np.float32 if self.template.is_amplitude else np.complex64
row_dim = accel.Dimension(shape[0], self.template.wgsy)
col_dim = accel.Dimension(shape[1])
self.slots['src'] = accel.IOSlot((row_dim, col_dim), src_type)
self.slots['dest'] = accel.IOSlot((5, row_dim), np.float32)
def _run(self) -> None:
src = self.buffer('src')
dest = self.buffer('dest')
rows_padded = accel.roundup(src.shape[0], self.template.wgsy)
self.command_queue.enqueue_kernel(
self.kernel,
[
src.buffer, dest.buffer,
np.int32(src.padded_shape[1]),
np.int32(dest.padded_shape[1]),
np.int32(self.column_range[0]),
np.int32(self.column_range[1] - self.column_range[0])
],
global_size=(self.template.size, rows_padded),
local_size=(self.template.size, self.template.wgsy))
def parameters(self) -> Mapping[str, Any]:
return {
'max_columns': self.template.max_columns,
'is_amplitude': self.template.is_amplitude,
'shape': self.slots['src'].shape, # type: ignore
'column_range': self.column_range
} | katsdpsigproc/percentile.py | # see scripts/percentiletest.py for an example
from typing import Tuple, Mapping, Callable, Optional, Any, cast
from typing_extensions import TypedDict
import numpy as np
from . import accel
from . import tune
from .abc import AbstractContext, AbstractCommandQueue
_TuningDict = TypedDict('_TuningDict', {'size': int, 'wgsy': int})
class Percentile5Template:
"""Kernel for calculating percentiles of a 2D array of data.
5 percentiles [0,100,25,75,50] are calculated per row (along columns, independently per row).
The lower percentile element, rather than a linear interpolation is chosen.
WARNING: assumes all values are positive.
Parameters
----------
context
Context for which kernels will be compiled
max_columns
Maximum number of columns
is_amplitude
If true, the inputs are scalar amplitudes; if false, they are complex
numbers and the answers are computed on the absolute values
tuning
Kernel tuning parameters; if omitted, will autotune. The possible
parameters are
- size: number of workitems per workgroup along each row
- wgsy: number of workitems per workgroup along each column
"""
autotune_version = 8
def __init__(self, context: AbstractContext, max_columns: int,
is_amplitude: bool = True, tuning: Optional[_TuningDict] = None) -> None:
self.context = context
self.max_columns = max_columns
self.is_amplitude = is_amplitude
if tuning is None:
tuning = self.autotune(context, max_columns, is_amplitude)
self.size = tuning['size']
self.wgsy = tuning['wgsy']
self.vt = accel.divup(max_columns, tuning['size'])
self.program = accel.build(context, "percentile.mako", {
'size': self.size,
'wgsy': self.wgsy,
'vt': self.vt,
'is_amplitude': self.is_amplitude
})
@classmethod
@tune.autotuner(test={'size': 64, 'wgsy': 4})
def autotune(cls, context: AbstractContext, max_columns: int,
is_amplitude: bool) -> _TuningDict:
queue = context.create_tuning_command_queue()
in_shape = (4096, max_columns)
rs = np.random.RandomState(seed=1)
if is_amplitude:
host_data: np.ndarray = rs.uniform(size=in_shape).astype(np.float32)
else:
host_data = rs.standard_normal(in_shape) + 1j * rs.standard_normal(in_shape)
host_data = host_data.astype(np.complex64)
def generate(size: int, wgsy: int) -> Callable[[int], float]:
if size * wgsy < 32 or size * wgsy > 1024:
raise RuntimeError('work group size is unnecessarily large or small, skipping')
if max_columns > size * 256:
raise RuntimeError('too many columns')
fn = cls(context, max_columns, is_amplitude, {
'size': size, 'wgsy': wgsy}).instantiate(queue, in_shape)
inp = fn.slots['src'].allocate(fn.allocator)
fn.slots['dest'].allocate(fn.allocator)
inp.set(queue, host_data)
return tune.make_measure(queue, fn)
return cast(_TuningDict, tune.autotune(generate,
size=[8, 16, 32, 64, 128, 256, 512, 1024],
wgsy=[1, 2, 4, 8, 16, 32]))
def instantiate(self, command_queue: AbstractCommandQueue,
shape: Tuple[int, int],
column_range: Optional[Tuple[int, int]] = None,
allocator: Optional[accel.AbstractAllocator] = None) -> 'Percentile5':
return Percentile5(self, command_queue, shape, column_range, allocator)
class Percentile5(accel.Operation):
"""Concrete instance of :class:`PercentileTemplate`.
.. warning::
Assumes all values are positive when `template.is_amplitude` is `True`.
.. rubric:: Slots
**src**
Input type float32 or complex64.
Shape is number of rows by number of columns, where 5 percentiles
are computed along the columns, per row.
**dest**
Output type float32.
Shape is (5, number of rows of input)
Parameters
----------
template
Operation template
command_queue
Command queue for the operation
shape
Shape of the source data
column_range:
Half-open interval of columns that will be processed. If not specified, all columns are
processed.
allocator
Allocator used to allocate unbound slots
"""
def __init__(self, template: Percentile5Template, command_queue: AbstractCommandQueue,
shape: Tuple[int, int], column_range: Optional[Tuple[int, int]],
allocator: Optional[accel.AbstractAllocator] = None) -> None:
super().__init__(command_queue, allocator)
if column_range is None:
column_range = (0, shape[1])
if column_range[1] <= column_range[0]:
raise ValueError('column range is empty')
if column_range[0] < 0 or column_range[1] > shape[1]:
raise IndexError('column range is out of range')
if column_range[1] - column_range[0] > template.max_columns:
raise ValueError('columns exceeds max_columns')
self.template = template
self.kernel = template.program.get_kernel("percentile5_float")
self.shape = shape
self.column_range = column_range
src_type = np.float32 if self.template.is_amplitude else np.complex64
row_dim = accel.Dimension(shape[0], self.template.wgsy)
col_dim = accel.Dimension(shape[1])
self.slots['src'] = accel.IOSlot((row_dim, col_dim), src_type)
self.slots['dest'] = accel.IOSlot((5, row_dim), np.float32)
def _run(self) -> None:
src = self.buffer('src')
dest = self.buffer('dest')
rows_padded = accel.roundup(src.shape[0], self.template.wgsy)
self.command_queue.enqueue_kernel(
self.kernel,
[
src.buffer, dest.buffer,
np.int32(src.padded_shape[1]),
np.int32(dest.padded_shape[1]),
np.int32(self.column_range[0]),
np.int32(self.column_range[1] - self.column_range[0])
],
global_size=(self.template.size, rows_padded),
local_size=(self.template.size, self.template.wgsy))
def parameters(self) -> Mapping[str, Any]:
return {
'max_columns': self.template.max_columns,
'is_amplitude': self.template.is_amplitude,
'shape': self.slots['src'].shape, # type: ignore
'column_range': self.column_range
} | 0.865352 | 0.558748 |