hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a023c7989fcec81b1dff14361d4088348f987e7
| 10,473
|
py
|
Python
|
release/scripts/startup/bl_ui/properties_material_gpencil.py
|
PrototypeNM1/blender
|
4e4c5afdd830678fd99416119345059e87ee3425
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/properties_material_gpencil.py
|
PrototypeNM1/blender
|
4e4c5afdd830678fd99416119345059e87ee3425
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/properties_material_gpencil.py
|
PrototypeNM1/blender
|
4e4c5afdd830678fd99416119345059e87ee3425
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
from rna_prop_ui import PropertyPanel
from bl_ui.utils import PresetPanel
from bl_ui.properties_grease_pencil_common import (
GreasePencilMaterialsPanel,
)
class GPENCIL_MT_color_context_menu(Menu):
bl_label = "Layer"
def draw(self, _context):
layout = self.layout
layout.operator("gpencil.color_reveal", icon='RESTRICT_VIEW_OFF', text="Show All")
layout.operator("gpencil.color_hide", icon='RESTRICT_VIEW_ON', text="Hide Others").unselected = True
layout.separator()
layout.operator("gpencil.color_lock_all", icon='LOCKED', text="Lock All")
layout.operator("gpencil.color_unlock_all", icon='UNLOCKED', text="UnLock All")
layout.operator("gpencil.stroke_lock_color", text="Lock Unselected")
layout.operator("gpencil.lock_layer", text="Lock Unused")
layout.separator()
layout.operator("object.material_slot_remove_unused")
class GPENCIL_UL_matslots(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, _index):
slot = item
ma = slot.material
if (ma is not None) and (ma.grease_pencil is not None):
gpcolor = ma.grease_pencil
if self.layout_type in {'DEFAULT', 'COMPACT'}:
if gpcolor.lock:
layout.active = False
row = layout.row(align=True)
row.enabled = not gpcolor.lock
row.prop(ma, "name", text="", emboss=False, icon_value=icon)
row = layout.row(align=True)
row.prop(gpcolor, "lock", text="", emboss=False)
row.prop(gpcolor, "hide", text="", emboss=False)
if gpcolor.ghost is True:
icon = 'ONIONSKIN_OFF'
else:
icon = 'ONIONSKIN_ON'
row.prop(gpcolor, "ghost", text="", icon=icon, emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class GPMaterialButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
@classmethod
def poll(cls, context):
ma = context.material
return ma and ma.grease_pencil
class MATERIAL_PT_gpencil_slots(GreasePencilMaterialsPanel, Panel):
bl_label = "Grease Pencil Material Slots"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
ob = context.object
ma = context.material
return (ma and ma.grease_pencil) or (ob and ob.type == 'GPENCIL')
# Used as parent for "Stroke" and "Fill" panels
class MATERIAL_PT_gpencil_surface(GPMaterialButtonsPanel, Panel):
bl_label = "Surface"
def draw_header_preset(self, _context):
MATERIAL_PT_gpencil_material_presets.draw_panel_header(self.layout)
def draw(self, _context):
layout = self.layout
layout.use_property_split = True
class MATERIAL_PT_gpencil_strokecolor(GPMaterialButtonsPanel, Panel):
bl_label = "Stroke"
bl_parent_id = 'MATERIAL_PT_gpencil_surface'
def draw_header(self, context):
ma = context.material
if ma is not None and ma.grease_pencil is not None:
gpcolor = ma.grease_pencil
self.layout.enabled = not gpcolor.lock
self.layout.prop(gpcolor, "show_stroke", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
ma = context.material
if ma is not None and ma.grease_pencil is not None:
gpcolor = ma.grease_pencil
col = layout.column()
col.enabled = not gpcolor.lock
col.prop(gpcolor, "mode")
col.prop(gpcolor, "stroke_style", text="Style")
if gpcolor.stroke_style == 'TEXTURE':
row = col.row()
row.enabled = not gpcolor.lock
col = row.column(align=True)
col.template_ID(gpcolor, "stroke_image", open="image.open")
if gpcolor.mode == 'LINE':
col.prop(gpcolor, "pixel_size", text="UV Factor")
col.prop(gpcolor, "use_stroke_pattern", text="Use As Stencil Mask")
if gpcolor.use_stroke_pattern is False:
col.prop(gpcolor, "use_stroke_texture_mix", text="Mix Color")
if gpcolor.use_stroke_texture_mix is True:
col.prop(gpcolor, "mix_stroke_factor", text="Factor")
if (gpcolor.stroke_style == 'SOLID' or gpcolor.use_stroke_pattern or gpcolor.use_stroke_texture_mix):
col.prop(gpcolor, "color", text="Color")
if gpcolor.mode in {'DOTS', 'BOX'}:
col.prop(gpcolor, "alignment_mode")
if gpcolor.mode == 'LINE' and gpcolor.stroke_style != 'TEXTURE':
col.prop(gpcolor, "use_overlap_strokes")
class MATERIAL_PT_gpencil_fillcolor(GPMaterialButtonsPanel, Panel):
bl_label = "Fill"
bl_parent_id = 'MATERIAL_PT_gpencil_surface'
def draw_header(self, context):
ma = context.material
gpcolor = ma.grease_pencil
self.layout.enabled = not gpcolor.lock
self.layout.prop(gpcolor, "show_fill", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
ma = context.material
gpcolor = ma.grease_pencil
# color settings
col = layout.column()
col.enabled = not gpcolor.lock
col.prop(gpcolor, "fill_style", text="Style")
if gpcolor.fill_style == 'GRADIENT':
col.prop(gpcolor, "gradient_type")
if gpcolor.fill_style != 'TEXTURE':
col.prop(gpcolor, "fill_color", text="Color")
if gpcolor.fill_style in {'GRADIENT', 'CHECKER'}:
col.prop(gpcolor, "mix_color", text="Secondary Color")
if gpcolor.fill_style == 'GRADIENT':
col.prop(gpcolor, "mix_factor", text="Mix Factor", slider=True)
if gpcolor.fill_style in {'GRADIENT', 'CHECKER'}:
col.prop(gpcolor, "flip", text="Flip Colors")
col.prop(gpcolor, "pattern_shift", text="Location")
col.prop(gpcolor, "pattern_scale", text="Scale")
if gpcolor.gradient_type == 'RADIAL' and gpcolor.fill_style not in {'SOLID', 'CHECKER'}:
col.prop(gpcolor, "pattern_radius", text="Radius")
else:
if gpcolor.fill_style != 'SOLID':
col.prop(gpcolor, "pattern_angle", text="Angle")
if gpcolor.fill_style == 'CHECKER':
col.prop(gpcolor, "pattern_gridsize", text="Box Size")
# Texture
if gpcolor.fill_style == 'TEXTURE' or (gpcolor.use_fill_texture_mix is True and gpcolor.fill_style == 'SOLID'):
col.template_ID(gpcolor, "fill_image", open="image.open")
if gpcolor.fill_style == 'TEXTURE':
col.prop(gpcolor, "use_fill_pattern", text="Use As Stencil Mask")
if gpcolor.use_fill_pattern is True:
col.prop(gpcolor, "fill_color", text="Color")
col.prop(gpcolor, "texture_offset", text="Offset")
col.prop(gpcolor, "texture_scale", text="Scale")
col.prop(gpcolor, "texture_angle")
col.prop(gpcolor, "texture_opacity")
col.prop(gpcolor, "texture_clamp", text="Clip Image")
if gpcolor.use_fill_pattern is False:
col.prop(gpcolor, "use_fill_texture_mix", text="Mix With Color")
if gpcolor.use_fill_texture_mix is True:
col.prop(gpcolor, "fill_color", text="Mix Color")
col.prop(gpcolor, "mix_factor", text="Mix Factor", slider=True)
class MATERIAL_PT_gpencil_preview(GPMaterialButtonsPanel, Panel):
bl_label = "Preview"
COMPAT_ENGINES = {'BLENDER_EEVEE'}
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
ma = context.material
self.layout.label(text=ma.name)
self.layout.template_preview(ma)
class MATERIAL_PT_gpencil_custom_props(GPMaterialButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
_context_path = "object.active_material"
_property_type = bpy.types.Material
class MATERIAL_PT_gpencil_options(GPMaterialButtonsPanel, Panel):
bl_label = "Options"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
ma = context.material
gpcolor = ma.grease_pencil
layout.prop(gpcolor, "pass_index")
class MATERIAL_PT_gpencil_material_presets(PresetPanel, Panel):
"""Material settings"""
bl_label = "Material Presets"
preset_subdir = "gpencil_material"
preset_operator = "script.execute_preset"
preset_add_operator = "scene.gpencil_material_preset_add"
classes = (
GPENCIL_UL_matslots,
GPENCIL_MT_color_context_menu,
MATERIAL_PT_gpencil_slots,
MATERIAL_PT_gpencil_preview,
MATERIAL_PT_gpencil_material_presets,
MATERIAL_PT_gpencil_surface,
MATERIAL_PT_gpencil_strokecolor,
MATERIAL_PT_gpencil_fillcolor,
MATERIAL_PT_gpencil_options,
MATERIAL_PT_gpencil_custom_props,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 35.381757
| 119
| 0.639454
|
4a023dead2ccb832ec2579e4d7e726aba35d92cb
| 2,580
|
py
|
Python
|
docs/examples/add-to-your-application/python/app/data_filtering.py
|
lafrech/oso
|
f89ae9d95678b68697c6dbfc736ac14ee89b09b8
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/add-to-your-application/python/app/data_filtering.py
|
lafrech/oso
|
f89ae9d95678b68697c6dbfc736ac14ee89b09b8
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/add-to-your-application/python/app/data_filtering.py
|
lafrech/oso
|
f89ae9d95678b68697c6dbfc736ac14ee89b09b8
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from .routes import serialize, app
from . import models
from oso import Oso
from polar.data.adapter.sqlalchemy_adapter import SqlAlchemyAdapter
from sqlalchemy import Column, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
# This example uses a separate Oso instance so that I can re-register classes
# with data filtering query builders but use the same `main.polar` policy.
oso = Oso()
engine = create_engine("sqlite://")
Session = sessionmaker(bind=engine)
Base = declarative_base(bind=engine)
class Repository(Base):
__tablename__ = "repo"
name = Column(String(128), primary_key=True)
is_public = Column(Boolean)
Base.metadata.create_all()
# docs: begin-data-filtering
# This is an example implementation for the SQLAlchemy ORM, but you can
# use any ORM with this API.
def get_repositories(filters):
query = Session().query(Repository)
for filter in filters:
value = filter.value
if filter.field is None:
# If the field is None, this filter is comparing against
# the repository object, so we construct a query that makes sure
# the primary key (name) matches.
value = value.name
field = Repository.name
else:
# Otherwise, we get the field to compare against.
field = getattr(Repository, filter.field)
# Build SQLAlchemy query based on filters.
if filter.kind == "Eq":
query = query.filter(field == value)
elif filter.kind == "In":
query = query.filter(field.in_(value))
else:
# See full guide to handle other constraint types.
raise NotImplementedError("unsupported constraint type")
return query
oso.register_class(models.User)
oso.register_class(
Repository,
fields={
# Tell Oso the types of fields you will use in your policy.
"is_public": bool
},
)
oso.set_data_filtering_adapter(SqlAlchemyAdapter(Session()))
oso.load_files([Path(__file__).parent / "main.polar"])
# docs: end-data-filtering
class User:
@staticmethod
def get_current_user():
return models.User(
roles=[{"name": "admin", "repository": Repository(name="gmail")}]
)
# docs: begin-list-route
@app.route("/repos")
def repo_list():
repositories = oso.authorized_resources(User.get_current_user(), "read", Repository)
return serialize(repositories)
# docs: end-list-route
| 26.597938
| 88
| 0.683333
|
4a023e4212dc4278ab34881fe728d3c70af72b9a
| 35,204
|
py
|
Python
|
mesonbuild/dependencies/misc.py
|
nostream/meson
|
d5a6ab31bfbcec9838805c598be75abb036aff87
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/dependencies/misc.py
|
nostream/meson
|
d5a6ab31bfbcec9838805c598be75abb036aff87
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/dependencies/misc.py
|
nostream/meson
|
d5a6ab31bfbcec9838805c598be75abb036aff87
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
import glob
import os
import re
import shlex
import shutil
import sysconfig
from .. import mlog
from .. import mesonlib
from ..mesonlib import Popen_safe, extract_as_list
from ..environment import detect_cpu_family
from .base import DependencyException, DependencyMethods
from .base import ExternalDependency, ExternalProgram, ExtraFrameworkDependency, PkgConfigDependency
# On windows 3 directory layouts are supported:
# * The default layout (versioned) installed:
# - $BOOST_ROOT/include/boost-x_x/boost/*.hpp
# - $BOOST_ROOT/lib/*.lib
# * The non-default layout (system) installed:
# - $BOOST_ROOT/include/boost/*.hpp
# - $BOOST_ROOT/lib/*.lib
# * The pre-built binaries from sf.net:
# - $BOOST_ROOT/boost/*.hpp
# - $BOOST_ROOT/lib<arch>-<compiler>/*.lib where arch=32/64 and compiler=msvc-14.1
#
# Library names supported:
# - libboost_<module>-<compiler>-mt-gd-x_x.lib (static)
# - boost_<module>-<compiler>-mt-gd-x_x.lib|.dll (shared)
# - libboost_<module>.lib (static)
# - boost_<module>.lib|.dll (shared)
# where compiler is vc141 for example.
#
# NOTE: -gb means runtime and build time debugging is on
# -mt means threading=multi
#
# The `modules` argument accept library names. This is because every module that
# has libraries to link against also has multiple options regarding how to
# link. See for example:
# * http://www.boost.org/doc/libs/1_65_1/libs/test/doc/html/boost_test/usage_variants.html
# * http://www.boost.org/doc/libs/1_65_1/doc/html/stacktrace/configuration_and_build.html
# * http://www.boost.org/doc/libs/1_65_1/libs/math/doc/html/math_toolkit/main_tr1.html
class BoostDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('boost', environment, 'cpp', kwargs)
self.need_static_link = ['boost_exception', 'boost_test_exec_monitor']
self.is_debug = environment.cmd_line_options.buildtype.startswith('debug')
threading = kwargs.get("threading", "multi")
self.is_multithreading = threading == "multi"
self.requested_modules = self.get_requested(kwargs)
invalid_modules = [c for c in self.requested_modules if 'boost_' + c not in BOOST_LIBS]
if invalid_modules:
mlog.warning('Invalid Boost modules: ' + ', '.join(invalid_modules))
self.log_fail()
return
self.boost_root = None
self.boost_roots = []
self.incdir = None
self.libdir = None
if 'BOOST_ROOT' in os.environ:
self.boost_root = os.environ['BOOST_ROOT']
self.boost_roots = [self.boost_root]
if not os.path.isabs(self.boost_root):
raise DependencyException('BOOST_ROOT must be an absolute path.')
if 'BOOST_INCLUDEDIR' in os.environ:
self.incdir = os.environ['BOOST_INCLUDEDIR']
if 'BOOST_LIBRARYDIR' in os.environ:
self.libdir = os.environ['BOOST_LIBRARYDIR']
if self.boost_root is None:
if mesonlib.is_windows():
self.boost_roots = self.detect_win_roots()
else:
self.boost_roots = self.detect_nix_roots()
if self.boost_root is None and not self.boost_roots:
self.log_fail()
return
if self.incdir is None:
if mesonlib.is_windows():
self.incdir = self.detect_win_incdir()
else:
self.incdir = self.detect_nix_incdir()
if self.incdir is None:
self.log_fail()
return
mlog.debug('Boost library root dir is', mlog.bold(self.boost_root))
mlog.debug('Boost include directory is', mlog.bold(self.incdir))
self.lib_modules = {}
self.detect_version()
if self.is_found:
self.detect_lib_modules()
mlog.debug('Boost library directory is', mlog.bold(self.libdir))
self.validate_requested()
self.log_success()
else:
self.log_fail()
def log_fail(self):
module_str = ', '.join(self.requested_modules)
mlog.log("Dependency Boost (%s) found:" % module_str, mlog.red('NO'))
def log_success(self):
module_str = ', '.join(self.requested_modules)
if self.boost_root:
info = self.version + ', ' + self.boost_root
else:
info = self.version
mlog.log('Dependency Boost (%s) found:' % module_str, mlog.green('YES'), info)
def detect_nix_roots(self):
return [os.path.abspath(os.path.join(x, '..'))
for x in self.compiler.get_default_include_dirs()]
def detect_win_roots(self):
res = []
# Where boost documentation says it should be
globtext = 'C:\\Program Files\\boost\\boost_*'
files = glob.glob(globtext)
res.extend(files)
# Where boost built from source actually installs it
if os.path.isdir('C:\\Boost'):
res.append('C:\\Boost')
# Where boost prebuilt binaries are
globtext = 'C:\\local\\boost_*'
files = glob.glob(globtext)
res.extend(files)
return res
def detect_nix_incdir(self):
for root in self.boost_roots:
incdir = os.path.join(root, 'include', 'boost')
if os.path.isdir(incdir):
return os.path.join(root, 'include')
return None
# FIXME: Should pick a version that matches the requested version
# Returns the folder that contains the boost folder.
def detect_win_incdir(self):
for root in self.boost_roots:
globtext = os.path.join(root, 'include', 'boost-*')
incdirs = glob.glob(globtext)
if len(incdirs) > 0:
return incdirs[0]
incboostdir = os.path.join(root, 'include', 'boost')
if os.path.isdir(incboostdir):
return os.path.join(root, 'include')
incboostdir = os.path.join(root, 'boost')
if os.path.isdir(incboostdir):
return root
return None
def get_compile_args(self):
args = []
include_dir = self.incdir
# Use "-isystem" when including boost headers instead of "-I"
# to avoid compiler warnings/failures when "-Werror" is used
# Careful not to use "-isystem" on default include dirs as it
# breaks some of the headers for certain gcc versions
# For example, doing g++ -isystem /usr/include on a simple
# "int main()" source results in the error:
# "/usr/include/c++/6.3.1/cstdlib:75:25: fatal error: stdlib.h: No such file or directory"
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129
# and http://stackoverflow.com/questions/37218953/isystem-on-a-system-include-directory-causes-errors
# for more details
if include_dir and include_dir not in self.compiler.get_default_include_dirs():
args.append("".join(self.compiler.get_include_args(include_dir, True)))
return args
def get_requested(self, kwargs):
candidates = extract_as_list(kwargs, 'modules')
for c in candidates:
if not isinstance(c, str):
raise DependencyException('Boost module argument is not a string.')
return candidates
def validate_requested(self):
for m in self.requested_modules:
if 'boost_' + m not in self.lib_modules:
msg = 'Requested Boost library {!r} not found'
raise DependencyException(msg.format(m))
def detect_version(self):
try:
ifile = open(os.path.join(self.incdir, 'boost', 'version.hpp'))
except FileNotFoundError:
return
except TypeError:
return
with ifile:
for line in ifile:
if line.startswith("#define") and 'BOOST_LIB_VERSION' in line:
ver = line.split()[-1]
ver = ver[1:-1]
self.version = ver.replace('_', '.')
self.is_found = True
return
def detect_lib_modules(self):
if mesonlib.is_windows():
return self.detect_lib_modules_win()
return self.detect_lib_modules_nix()
def detect_lib_modules_win(self):
arch = detect_cpu_family(self.env.coredata.compilers)
compiler_ts = self.env.detect_cpp_compiler(self.want_cross).get_toolset_version().split('.')
compiler = 'vc{}{}'.format(compiler_ts[0], compiler_ts[1])
if not self.libdir:
# The libdirs in the distributed binaries
if arch == 'x86':
gl = 'lib32*'
elif arch == 'x86_64':
gl = 'lib64*'
else:
# Does anyone do Boost cross-compiling to other archs on Windows?
gl = None
if self.boost_root:
roots = [self.boost_root]
else:
roots = self.boost_roots
for root in roots:
# The default libdir when building
libdir = os.path.join(root, 'lib')
if os.path.isdir(libdir):
self.libdir = libdir
break
if gl:
tmp = glob.glob(os.path.join(root, gl))
if len(tmp) > 0:
# FIXME: Should pick the correct version
self.libdir = tmp[0]
break
if not self.libdir:
return
for name in self.need_static_link:
libname = "lib{}".format(name) + '-' + compiler
if self.is_multithreading:
libname = libname + '-mt'
if self.is_debug:
libname = libname + '-gd'
libname = libname + "-{}.lib".format(self.version.replace('.', '_'))
if os.path.isfile(os.path.join(self.libdir, libname)):
modname = libname.split('-', 1)[0][3:]
self.lib_modules[modname] = libname
else:
libname = "lib{}.lib".format(name)
if os.path.isfile(os.path.join(self.libdir, libname)):
self.lib_modules[name[3:]] = libname
# globber1 applies to a layout=system installation
# globber2 applies to a layout=versioned installation
globber1 = 'libboost_*' if self.static else 'boost_*'
globber2 = globber1 + '-' + compiler
if self.is_multithreading:
globber2 = globber2 + '-mt'
if self.is_debug:
globber2 = globber2 + '-gd'
globber2 = globber2 + '-{}'.format(self.version.replace('.', '_'))
globber2_matches = glob.glob(os.path.join(self.libdir, globber2 + '.lib'))
for entry in globber2_matches:
(_, fname) = os.path.split(entry)
modname = fname.split('-', 1)
if len(modname) > 1:
modname = modname[0]
else:
modname = modname.split('.', 1)[0]
if self.static:
modname = modname[3:]
self.lib_modules[modname] = fname
if len(globber2_matches) == 0:
for entry in glob.glob(os.path.join(self.libdir, globber1 + '.lib')):
(_, fname) = os.path.split(entry)
modname = fname.split('.', 1)[0]
if self.static:
modname = modname[3:]
self.lib_modules[modname] = fname
def detect_lib_modules_nix(self):
if self.static:
libsuffix = 'a'
elif mesonlib.is_osx() and not self.want_cross:
libsuffix = 'dylib'
else:
libsuffix = 'so'
globber = 'libboost_*.{}'.format(libsuffix)
if self.libdir:
libdirs = [self.libdir]
elif self.boost_root is None:
libdirs = mesonlib.get_library_dirs()
else:
libdirs = [os.path.join(self.boost_root, 'lib')]
for libdir in libdirs:
for name in self.need_static_link:
libname = 'lib{}.a'.format(name)
if os.path.isfile(os.path.join(libdir, libname)):
self.lib_modules[name] = libname
for entry in glob.glob(os.path.join(libdir, globber)):
lib = os.path.basename(entry)
name = lib.split('.')[0][3:]
# I'm not 100% sure what to do here. Some distros
# have modules such as thread only as -mt versions.
# On debian all packages are built threading=multi
# but not suffixed with -mt.
# FIXME: implement detect_lib_modules_{debian, redhat, ...}
if self.is_multithreading and mesonlib.is_debianlike():
self.lib_modules[name] = lib
elif self.is_multithreading and entry.endswith('-mt.{}'.format(libsuffix)):
self.lib_modules[name] = lib
elif not entry.endswith('-mt.{}'.format(libsuffix)):
self.lib_modules[name] = lib
def get_win_link_args(self):
args = []
# TODO: should this check self.libdir?
if self.libdir:
args.append('-L' + self.libdir)
for lib in self.requested_modules:
args.append(self.lib_modules['boost_' + lib])
return args
def get_link_args(self):
if mesonlib.is_windows():
return self.get_win_link_args()
args = []
if self.boost_root:
args.append('-L' + os.path.join(self.boost_root, 'lib'))
elif self.libdir:
args.append('-L' + self.libdir)
for lib in self.requested_modules:
# The compiler's library detector is the most reliable so use that first.
boost_lib = 'boost_' + lib
default_detect = self.compiler.find_library(boost_lib, self.env, [])
if default_detect is not None:
args += default_detect
elif boost_lib in self.lib_modules:
linkcmd = '-l' + boost_lib
args.append(linkcmd)
return args
def get_sources(self):
return []
def need_threads(self):
return 'thread' in self.requested_modules
class MPIDependency(ExternalDependency):
def __init__(self, environment, kwargs):
language = kwargs.get('language', 'c')
super().__init__('mpi', environment, language, kwargs)
required = kwargs.pop('required', True)
kwargs['required'] = False
kwargs['silent'] = True
self.is_found = False
# NOTE: Only OpenMPI supplies a pkg-config file at the moment.
if language == 'c':
env_vars = ['MPICC']
pkgconfig_files = ['ompi-c']
default_wrappers = ['mpicc']
elif language == 'cpp':
env_vars = ['MPICXX']
pkgconfig_files = ['ompi-cxx']
default_wrappers = ['mpic++', 'mpicxx', 'mpiCC']
elif language == 'fortran':
env_vars = ['MPIFC', 'MPIF90', 'MPIF77']
pkgconfig_files = ['ompi-fort']
default_wrappers = ['mpifort', 'mpif90', 'mpif77']
else:
raise DependencyException('Language {} is not supported with MPI.'.format(language))
for pkg in pkgconfig_files:
try:
pkgdep = PkgConfigDependency(pkg, environment, kwargs)
if pkgdep.found():
self.compile_args = pkgdep.get_compile_args()
self.link_args = pkgdep.get_link_args()
self.version = pkgdep.get_version()
self.is_found = True
break
except Exception:
pass
if not self.is_found:
# Prefer environment.
for var in env_vars:
if var in os.environ:
wrappers = [os.environ[var]]
break
else:
# Or search for default wrappers.
wrappers = default_wrappers
for prog in wrappers:
result = self._try_openmpi_wrapper(prog)
if result is not None:
self.is_found = True
self.version = result[0]
self.compile_args = self._filter_compile_args(result[1])
self.link_args = self._filter_link_args(result[2])
break
result = self._try_other_wrapper(prog)
if result is not None:
self.is_found = True
self.version = result[0]
self.compile_args = self._filter_compile_args(result[1])
self.link_args = self._filter_link_args(result[2])
break
if not self.is_found and mesonlib.is_windows():
result = self._try_msmpi()
if result is not None:
self.is_found = True
self.version, self.compile_args, self.link_args = result
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'for', self.language, 'found:', mlog.green('YES'), self.version)
else:
mlog.log('Dependency', mlog.bold(self.name), 'for', self.language, 'found:', mlog.red('NO'))
if required:
raise DependencyException('MPI dependency {!r} not found'.format(self.name))
def _filter_compile_args(self, args):
"""
MPI wrappers return a bunch of garbage args.
Drop -O2 and everything that is not needed.
"""
result = []
multi_args = ('-I', )
if self.language == 'fortran':
fc = self.env.coredata.compilers['fortran']
multi_args += fc.get_module_incdir_args()
include_next = False
for f in args:
if f.startswith(('-D', '-f') + multi_args) or f == '-pthread' \
or (f.startswith('-W') and f != '-Wall' and not f.startswith('-Werror')):
result.append(f)
if f in multi_args:
# Path is a separate argument.
include_next = True
elif include_next:
include_next = False
result.append(f)
return result
def _filter_link_args(self, args):
"""
MPI wrappers return a bunch of garbage args.
Drop -O2 and everything that is not needed.
"""
result = []
include_next = False
for f in args:
if f.startswith(('-L', '-l', '-Xlinker')) or f == '-pthread' \
or (f.startswith('-W') and f != '-Wall' and not f.startswith('-Werror')):
result.append(f)
if f in ('-L', '-Xlinker'):
include_next = True
elif include_next:
include_next = False
result.append(f)
return result
def _try_openmpi_wrapper(self, prog):
prog = ExternalProgram(prog, silent=True)
if prog.found():
cmd = prog.get_command() + ['--showme:compile']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
cargs = shlex.split(o)
cmd = prog.get_command() + ['--showme:link']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
libs = shlex.split(o)
cmd = prog.get_command() + ['--showme:version']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
version = re.search('\d+.\d+.\d+', o)
if version:
version = version.group(0)
else:
version = 'none'
return version, cargs, libs
def _try_other_wrapper(self, prog):
prog = ExternalProgram(prog, silent=True)
if prog.found():
cmd = prog.get_command() + ['-show']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
args = shlex.split(o)
version = 'none'
return version, args, args
def _try_msmpi(self):
if self.language == 'cpp':
# MS-MPI does not support the C++ version of MPI, only the standard C API.
return
if 'MSMPI_INC' not in os.environ:
return
incdir = os.environ['MSMPI_INC']
arch = detect_cpu_family(self.env.coredata.compilers)
if arch == 'x86':
if 'MSMPI_LIB32' not in os.environ:
return
libdir = os.environ['MSMPI_LIB32']
post = 'x86'
elif arch == 'x86_64':
if 'MSMPI_LIB64' not in os.environ:
return
libdir = os.environ['MSMPI_LIB64']
post = 'x64'
else:
return
if self.language == 'fortran':
return ('none',
['-I' + incdir, '-I' + os.path.join(incdir, post)],
[os.path.join(libdir, 'msmpi.lib'), os.path.join(libdir, 'msmpifec.lib')])
else:
return ('none',
['-I' + incdir, '-I' + os.path.join(incdir, post)],
[os.path.join(libdir, 'msmpi.lib')])
class ThreadDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('threads', environment, None, {})
self.name = 'threads'
self.is_found = True
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
def need_threads(self):
return True
def get_version(self):
return 'unknown'
class Python3Dependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('python3', environment, None, kwargs)
self.name = 'python3'
# We can only be sure that it is Python 3 at this point
self.version = '3'
self.pkgdep = None
if DependencyMethods.PKGCONFIG in self.methods:
try:
self.pkgdep = PkgConfigDependency('python3', environment, kwargs)
if self.pkgdep.found():
self.compile_args = self.pkgdep.get_compile_args()
self.link_args = self.pkgdep.get_link_args()
self.version = self.pkgdep.get_version()
self.is_found = True
return
else:
self.pkgdep = None
except Exception:
pass
if not self.is_found:
if mesonlib.is_windows() and DependencyMethods.SYSCONFIG in self.methods:
self._find_libpy3_windows(environment)
elif mesonlib.is_osx() and DependencyMethods.EXTRAFRAMEWORK in self.methods:
# In OSX the Python 3 framework does not have a version
# number in its name.
fw = ExtraFrameworkDependency('python', False, None, self.env,
self.language, kwargs)
if fw.found():
self.compile_args = fw.get_compile_args()
self.link_args = fw.get_link_args()
self.is_found = True
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
else:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.red('NO'))
def _find_libpy3_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = sysconfig.get_platform()
arch = detect_cpu_family(env.coredata.compilers)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch[-2:]:
mlog.log('Need', mlog.bold(self.name),
'for {}-bit, but found {}-bit'.format(arch, pyarch[-2:]))
self.is_found = False
return
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.compile_args = ['-I' + inc]
if inc != platinc:
self.compile_args.append('-I' + platinc)
# Nothing exposes this directly that I coulf find
basedir = sysconfig.get_config_var('base')
vernum = sysconfig.get_config_var('py_version_nodot')
self.link_args = ['-L{}/libs'.format(basedir),
'-lpython{}'.format(vernum)]
self.version = sysconfig.get_config_var('py_version_short')
self.is_found = True
def get_methods(self):
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG]
def get_pkgconfig_variable(self, variable_name):
if self.pkgdep:
return self.pkgdep.get_pkgconfig_variable(variable_name)
else:
return super().get_pkgconfig_variable(variable_name)
class PcapDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('pcap', environment, None, kwargs)
if DependencyMethods.PKGCONFIG in self.methods:
try:
kwargs['required'] = False
pcdep = PkgConfigDependency('pcap', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.compile_args = pcdep.get_compile_args()
self.link_args = pcdep.get_link_args()
self.version = pcdep.get_version()
return
except Exception as e:
mlog.debug('Pcap not found via pkgconfig. Trying next, error was:', str(e))
if DependencyMethods.PCAPCONFIG in self.methods:
pcapconf = shutil.which('pcap-config')
if pcapconf:
stdo = Popen_safe(['pcap-config', '--cflags'])[1]
self.compile_args = stdo.strip().split()
stdo = Popen_safe(['pcap-config', '--libs'])[1]
self.link_args = stdo.strip().split()
self.version = self.get_pcap_lib_version()
self.is_found = True
mlog.log('Dependency', mlog.bold('pcap'), 'found:',
mlog.green('YES'), '(%s)' % pcapconf)
return
mlog.debug('Could not find pcap-config binary, trying next.')
def get_methods(self):
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.PCAPCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.PCAPCONFIG]
def get_pcap_lib_version(self):
return self.compiler.get_return_value('pcap_lib_version', 'string',
'#include <pcap.h>', self.env, [], [self])
class CupsDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('cups', environment, None, kwargs)
if DependencyMethods.PKGCONFIG in self.methods:
try:
kwargs['required'] = False
pcdep = PkgConfigDependency('cups', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.compile_args = pcdep.get_compile_args()
self.link_args = pcdep.get_link_args()
self.version = pcdep.get_version()
return
except Exception as e:
mlog.debug('cups not found via pkgconfig. Trying next, error was:', str(e))
if DependencyMethods.CUPSCONFIG in self.methods:
cupsconf = shutil.which('cups-config')
if cupsconf:
stdo = Popen_safe(['cups-config', '--cflags'])[1]
self.compile_args = stdo.strip().split()
stdo = Popen_safe(['cups-config', '--libs'])[1]
self.link_args = stdo.strip().split()
stdo = Popen_safe(['cups-config', '--version'])[1]
self.version = stdo.strip().split()
self.is_found = True
mlog.log('Dependency', mlog.bold('cups'), 'found:',
mlog.green('YES'), '(%s)' % cupsconf)
return
mlog.debug('Could not find cups-config binary, trying next.')
if DependencyMethods.EXTRAFRAMEWORK in self.methods:
if mesonlib.is_osx():
fwdep = ExtraFrameworkDependency('cups', False, None, self.env,
self.language, kwargs)
if fwdep.found():
self.is_found = True
self.compile_args = fwdep.get_compile_args()
self.link_args = fwdep.get_link_args()
self.version = fwdep.get_version()
return
mlog.log('Dependency', mlog.bold('cups'), 'found:', mlog.red('NO'))
def get_methods(self):
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CUPSCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.CUPSCONFIG]
class LibWmfDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('libwmf', environment, None, kwargs)
if DependencyMethods.PKGCONFIG in self.methods:
try:
kwargs['required'] = False
pcdep = PkgConfigDependency('libwmf', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.compile_args = pcdep.get_compile_args()
self.link_args = pcdep.get_link_args()
self.version = pcdep.get_version()
return
except Exception as e:
mlog.debug('LibWmf not found via pkgconfig. Trying next, error was:', str(e))
if DependencyMethods.LIBWMFCONFIG in self.methods:
libwmfconf = shutil.which('libwmf-config')
if libwmfconf:
stdo = Popen_safe(['libwmf-config', '--cflags'])[1]
self.compile_args = stdo.strip().split()
stdo = Popen_safe(['libwmf-config', '--libs'])[1]
self.link_args = stdo.strip().split()
stdo = Popen_safe(['libwmf-config', '--version'])[1]
self.version = stdo.strip()
self.is_found = True
mlog.log('Dependency', mlog.bold('libwmf'), 'found:',
mlog.green('YES'), '(%s)' % libwmfconf)
return
mlog.debug('Could not find libwmf-config binary, trying next.')
def get_methods(self):
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.LIBWMFCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.LIBWMFCONFIG]
# Generated with boost_names.py
BOOST_LIBS = [
'boost_atomic',
'boost_chrono',
'boost_chrono',
'boost_container',
'boost_context',
'boost_coroutine',
'boost_date_time',
'boost_exception',
'boost_fiber',
'boost_filesystem',
'boost_graph',
'boost_iostreams',
'boost_locale',
'boost_log',
'boost_log_setup',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_mpi',
'boost_program_options',
'boost_python',
'boost_python3',
'boost_numpy',
'boost_numpy3',
'boost_random',
'boost_regex',
'boost_serialization',
'boost_wserialization',
'boost_signals',
'boost_stacktrace_noop',
'boost_stacktrace_backtrace',
'boost_stacktrace_addr2line',
'boost_stacktrace_basic',
'boost_stacktrace_windbg',
'boost_stacktrace_windbg_cached',
'boost_system',
'boost_prg_exec_monitor',
'boost_test_exec_monitor',
'boost_unit_test_framework',
'boost_thread',
'boost_timer',
'boost_type_erasure',
'boost_wave'
]
| 39.422172
| 121
| 0.565049
|
4a023e72f4555f5a218ddf45262c30e7294f6672
| 14,267
|
py
|
Python
|
plugins/modules/oci_opsi_database_configuration_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_database_configuration_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_database_configuration_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_database_configuration_facts
short_description: Fetches details about one or multiple DatabaseConfiguration resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple DatabaseConfiguration resources in Oracle Cloud Infrastructure
- Gets a list of database insight configurations based on the query parameters specified. Either compartmentId or databaseInsightId query parameter must be
specified.
When both compartmentId and compartmentIdInSubtree are specified, a list of database insight configurations in that compartment and in all sub-
compartments will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
enterprise_manager_bridge_id:
description:
- Unique Enterprise Manager bridge identifier
type: str
id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated DBaaS entity.
type: list
elements: str
exadata_insight_id:
description:
- Optional list of exadata insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D, EXTERNAL-PDB, EXTERNAL-NONCDB.
type: list
elements: str
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
- "EXTERNAL-PDB"
- "EXTERNAL-NONCDB"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- Database configuration list sort options. If `fields` parameter is selected, the `sortBy` parameter must be one of the fields specified.
type: str
choices:
- "databaseName"
- "databaseDisplayName"
- "databaseType"
host_name:
description:
- Filter by one or more hostname.
type: list
elements: str
defined_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
The key for each tag is \\"{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same tag name are interpreted as \\"OR\\". Values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
defined_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.true\\" (for checking existence of a defined tag)
or \\"{namespace}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
The key for each tag is \\"{tagName}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
compartment_id_in_subtree:
description:
- A flag to search all resources within a given compartment and all sub-compartments.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List database_configurations
oci_opsi_database_configuration_facts:
# optional
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
enterprise_manager_bridge_id: "ocid1.enterprisemanagerbridge.oc1..xxxxxxEXAMPLExxxxxx"
id: [ "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" ]
database_id: [ "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx" ]
exadata_insight_id: [ "ocid1.exadatainsight.oc1..xxxxxxEXAMPLExxxxxx" ]
cdb_name: [ "cdb_name_example" ]
database_type: [ "ADW-S" ]
sort_order: ASC
sort_by: databaseName
host_name: [ "host_name_example" ]
defined_tag_equals: [ "defined_tag_equals_example" ]
freeform_tag_equals: [ "freeform_tag_equals_example" ]
defined_tag_exists: [ "defined_tag_exists_example" ]
freeform_tag_exists: [ "freeform_tag_exists_example" ]
compartment_id_in_subtree: true
"""
RETURN = """
database_configurations:
description:
- List of DatabaseConfiguration resources
returned: on success
type: complex
contains:
database_insight_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the database insight resource.
returned: on success
type: str
sample: "ocid1.databaseinsight.oc1..xxxxxxEXAMPLExxxxxx"
entity_source:
description:
- Source of the database entity.
returned: on success
type: str
sample: AUTONOMOUS_DATABASE
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
database_name:
description:
- The database name. The database name is unique within the tenancy.
returned: on success
type: str
sample: database_name_example
database_display_name:
description:
- The user-friendly name for the database. The name does not have to be unique.
returned: on success
type: str
sample: database_display_name_example
database_type:
description:
- Operations Insights internal representation of the database type.
returned: on success
type: str
sample: database_type_example
database_version:
description:
- The version of the database.
returned: on success
type: str
sample: database_version_example
cdb_name:
description:
- Name of the CDB.Only applies to PDB.
returned: on success
type: str
sample: cdb_name_example
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
processor_count:
description:
- Processor count. This is the OCPU count for Autonomous Database and CPU core count for other database types.
returned: on success
type: int
sample: 56
sample: [{
"database_insight_id": "ocid1.databaseinsight.oc1..xxxxxxEXAMPLExxxxxx",
"entity_source": "AUTONOMOUS_DATABASE",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"database_name": "database_name_example",
"database_display_name": "database_display_name_example",
"database_type": "database_type_example",
"database_version": "database_version_example",
"cdb_name": "cdb_name_example",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"freeform_tags": {'Department': 'Finance'},
"processor_count": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DatabaseConfigurationFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return []
def list_resources(self):
optional_list_method_params = [
"compartment_id",
"enterprise_manager_bridge_id",
"id",
"database_id",
"exadata_insight_id",
"cdb_name",
"database_type",
"sort_order",
"sort_by",
"host_name",
"defined_tag_equals",
"freeform_tag_equals",
"defined_tag_exists",
"freeform_tag_exists",
"compartment_id_in_subtree",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_database_configurations, **optional_kwargs
)
DatabaseConfigurationFactsHelperCustom = get_custom_class(
"DatabaseConfigurationFactsHelperCustom"
)
class ResourceFactsHelper(
DatabaseConfigurationFactsHelperCustom, DatabaseConfigurationFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str"),
enterprise_manager_bridge_id=dict(type="str"),
id=dict(type="list", elements="str"),
database_id=dict(type="list", elements="str"),
exadata_insight_id=dict(type="list", elements="str"),
cdb_name=dict(type="list", elements="str"),
database_type=dict(
type="list",
elements="str",
choices=[
"ADW-S",
"ATP-S",
"ADW-D",
"ATP-D",
"EXTERNAL-PDB",
"EXTERNAL-NONCDB",
],
),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(
type="str",
choices=["databaseName", "databaseDisplayName", "databaseType"],
),
host_name=dict(type="list", elements="str"),
defined_tag_equals=dict(type="list", elements="str"),
freeform_tag_equals=dict(type="list", elements="str"),
defined_tag_exists=dict(type="list", elements="str"),
freeform_tag_exists=dict(type="list", elements="str"),
compartment_id_in_subtree=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="database_configuration",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(database_configurations=result)
if __name__ == "__main__":
main()
| 38.874659
| 159
| 0.626691
|
4a024028c1193698f2edf0c6b1279a0fd8b0f65c
| 1,252
|
py
|
Python
|
github_trending_bot/spiders/GithubTrendingRepo.py
|
Pavneet-Sing/github_trending_bot
|
76eb2a98078509521ffac266e01b1997f475d045
|
[
"MIT"
] | null | null | null |
github_trending_bot/spiders/GithubTrendingRepo.py
|
Pavneet-Sing/github_trending_bot
|
76eb2a98078509521ffac266e01b1997f475d045
|
[
"MIT"
] | null | null | null |
github_trending_bot/spiders/GithubTrendingRepo.py
|
Pavneet-Sing/github_trending_bot
|
76eb2a98078509521ffac266e01b1997f475d045
|
[
"MIT"
] | 2
|
2021-07-05T05:02:06.000Z
|
2021-12-16T08:47:29.000Z
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
class GithubtrendingrepoSpider(scrapy.Spider):
name = 'GithubTrendingRepo'
allowed_domains = ['github.com/trending/']
start_urls = ['http://github.com/trending//']
def parse(self, response):
# print("%s : %s : %s" % (response.status, response.url, response.text) )
# print title text with css and xpath selectors
title_text = response.css('title::text')
print(title_text.get())
title_text = response.xpath('//title[1]/text()')
print(title_text.get())
# Get all anchor tags with css and xpath selectors
css_links = response.css('a::attr(href)').getall()
xpath_links = response.xpath('//a/@href').getall()
print(len(css_links))
print(len(xpath_links))
for (link, xlink) in zip(css_links, xpath_links):
print('{} {} '.format(link, xlink))
# fetch url from github and avoid social media sites
trending_links = LxmlLinkExtractor(allow= r'^https://[a-z.]+/[a-z.]+$', deny_domains=['shop.github.com','youtube.com','twitter.com'], unique = True).extract_links(response)
for link in trending_links:
print("%s : %s " % (link.url, link.text))
| 40.387097
| 177
| 0.652556
|
4a02403359f9362c8a53f41a51b7890857e59388
| 1,219
|
py
|
Python
|
MainPalindrome.py
|
PBHotdog/palindromecheck
|
2bc96a7383ca13d3a8c801cfad515f4ef4613383
|
[
"BSD-2-Clause"
] | null | null | null |
MainPalindrome.py
|
PBHotdog/palindromecheck
|
2bc96a7383ca13d3a8c801cfad515f4ef4613383
|
[
"BSD-2-Clause"
] | null | null | null |
MainPalindrome.py
|
PBHotdog/palindromecheck
|
2bc96a7383ca13d3a8c801cfad515f4ef4613383
|
[
"BSD-2-Clause"
] | null | null | null |
# Start of the top of the program, hello!
'''
check_palindrome checks to see if the word, inverted, is equal to the word regularly
[::-1] is using a slice to check the inverted word
'''
def check_palindrome(user_input):
if str(user_input) == str(user_input)[::-1]:
print("Congrats! (" + user_input + ") is indeed a palindrome!")
return True
else:
print("Turns out (" + user_input + ") isn't a palindrome. Try again!")
return False
'''
MAIN STARTS HERE:
Starts off with a while function this loops the program if the word isn't a palindrome
After that, a print function is asking user for input
At the start of the if statement, checks to see if the user input contains only alphabetical characters
if so goes along the else statement
'''
is_running = True
while is_running is True:
print("Hello, please enter a word you would like to check")
user_input = input()
if user_input.isalpha():
print("Ok! Your word (" + user_input + ") is in fact a word! Let's check it now")
if check_palindrome(user_input) is True:
is_running = False
else:
is_running = True
else:
print("Hey! You gotta input a word!")
| 30.475
| 103
| 0.664479
|
4a02414efa3101b2e66b993df5178062ffbc827a
| 540
|
py
|
Python
|
podcasts/migrations/0005_auto_20180121_1541.py
|
GrumpyAny/podcasti.si
|
d446e9e91791886a16b5a33d8f8ec7b2b32ba6ed
|
[
"MIT"
] | 5
|
2018-01-22T07:12:15.000Z
|
2020-12-05T06:06:32.000Z
|
podcasts/migrations/0005_auto_20180121_1541.py
|
GrumpyAny/podcasti.si
|
d446e9e91791886a16b5a33d8f8ec7b2b32ba6ed
|
[
"MIT"
] | 288
|
2018-01-22T09:22:27.000Z
|
2022-03-30T09:13:39.000Z
|
podcasts/migrations/0005_auto_20180121_1541.py
|
GrumpyAny/podcasti.si
|
d446e9e91791886a16b5a33d8f8ec7b2b32ba6ed
|
[
"MIT"
] | 4
|
2018-01-22T08:34:20.000Z
|
2021-01-06T11:07:30.000Z
|
# Generated by Django 2.0.1 on 2018-01-21 15:41
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0004_stats'),
]
operations = [
migrations.RemoveField(
model_name='stats',
name='episode',
),
migrations.AddField(
model_name='stats',
name='payload',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
| 22.5
| 79
| 0.594444
|
4a0241604c8b02f60fc79a957ec8495bc8c9618f
| 1,154
|
py
|
Python
|
examples/hansen2011.py
|
mholdg16/py-mdptools
|
ae986edc2097e97cb73331d66f0051ca9f5bd15c
|
[
"MIT"
] | 1
|
2021-12-15T13:22:48.000Z
|
2021-12-15T13:22:48.000Z
|
examples/hansen2011.py
|
mholdg16/py-mdptools
|
ae986edc2097e97cb73331d66f0051ca9f5bd15c
|
[
"MIT"
] | 2
|
2021-11-09T23:43:48.000Z
|
2021-11-13T20:41:12.000Z
|
examples/hansen2011.py
|
mholdg16/py-mdptools
|
ae986edc2097e97cb73331d66f0051ca9f5bd15c
|
[
"MIT"
] | null | null | null |
from mdptools import MarkovDecisionProcess as MDP
from mdptools.set_methods import conflicting_transitions
from helpers import at_root, display_graph, display_dot
from mdptools.utils import highlight
# %%
m1 = MDP(
[
("a", "s0", {"s1": 0.2, "s2": 0.8}),
("b", "s0", {"s2": 0.7, "s3": 0.3}),
("tau", "s1"),
("x", "s2"),
("y", "s2"),
("z", "s2"),
("x", "s3"),
("z", "s3"),
],
name="M1",
)
print(m1, "\n")
m2 = MDP([("x", "r0", "r1"), ("y", "r1", "r0"), ("z", "r1")], name="M2")
print(m2, "\n")
m3 = MDP([("c", "w0", "w1"), ("y", "w0"), ("tau", "w1")], name="M3")
print(m3, "\n")
m4 = MDP([("z", "v0", "v1"), ("y", "v0"), ("z", "v1")], name="M4")
print(m4, "\n")
m = MDP(m1, m2, m3, m4)
print(m, "\n")
# %%
display_graph(m1, m2, m3, m4, file_path="out/graphs/hansen2011_mdps.gv")
display_dot(
m.to_graph(
at_root("out/graphs/hansen2011_combined.gv"),
set_method=conflicting_transitions,
highlight=True,
)
)
# %%
print(m1.to_prism(at_root("out/prism/generated.prism")), "\n")
print(m.to_prism(at_root("out/prism/testing.prism")), "\n")
# %%
| 23.08
| 72
| 0.516464
|
4a0241841244e33389f5f0af1aa071880cee0e2f
| 412
|
py
|
Python
|
venv/Scripts/pip-script.py
|
hardik0899/MyTextEditor
|
ab1b1816215cb3e7fc5ca517f4eaeaa38f8d3220
|
[
"Apache-2.0"
] | 2
|
2019-04-07T08:03:00.000Z
|
2021-04-09T18:30:32.000Z
|
venv/Scripts/pip-script.py
|
hardik0899/MyTextEditor
|
ab1b1816215cb3e7fc5ca517f4eaeaa38f8d3220
|
[
"Apache-2.0"
] | 1
|
2020-07-01T16:29:03.000Z
|
2020-07-01T16:29:03.000Z
|
venv/Scripts/pip-script.py
|
hardik0899/MyTextEditor
|
ab1b1816215cb3e7fc5ca517f4eaeaa38f8d3220
|
[
"Apache-2.0"
] | null | null | null |
#!C:\Users\Nanak\PycharmProjects\MyEditor\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| 31.692308
| 69
| 0.669903
|
4a024261d6d358d938507731019e6f7ded40f61f
| 745
|
py
|
Python
|
src/urls.py
|
rcaballero218/turtlenotes-project
|
506971227ccfcaac7b6520e20ff8f50f1e9e08de
|
[
"Unlicense"
] | null | null | null |
src/urls.py
|
rcaballero218/turtlenotes-project
|
506971227ccfcaac7b6520e20ff8f50f1e9e08de
|
[
"Unlicense"
] | null | null | null |
src/urls.py
|
rcaballero218/turtlenotes-project
|
506971227ccfcaac7b6520e20ff8f50f1e9e08de
|
[
"Unlicense"
] | null | null | null |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^$', 'newsletter.views.home', name='home'),
url(r'^contact/$', 'newsletter.views.contact', name='contact'),
url(r'^about/$', 'src.views.about', name='about'),
url(r'^notes/$', 'src.views.notes', name='notes'),
url(r'^upload/$', 'src.views.upload', name='upload'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('registration.backends.default.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 33.863636
| 79
| 0.716779
|
4a0242ad2800f367e79fcd9e66a2bfb07eabb6ae
| 48,921
|
py
|
Python
|
ScriptResearch/entoforms-read-only/addons/object_entoform.py
|
kellpossible/VoxelEditor
|
5ce37e419b8b98f31d4c78ffcb14415256f475e9
|
[
"MIT"
] | 3
|
2017-07-15T14:51:07.000Z
|
2018-07-13T22:42:34.000Z
|
ScriptResearch/entoforms-read-only/addons/object_entoform.py
|
kellpossible/VoxelEditor
|
5ce37e419b8b98f31d4c78ffcb14415256f475e9
|
[
"MIT"
] | null | null | null |
ScriptResearch/entoforms-read-only/addons/object_entoform.py
|
kellpossible/VoxelEditor
|
5ce37e419b8b98f31d4c78ffcb14415256f475e9
|
[
"MIT"
] | null | null | null |
# Entoform.py Copyright (C) 2011, Dolf Veenvliet
#
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Entoform",
"author": "Dolf Veenvliet",
"version": 1,
"blender": (2, 5, 6),
"api": 31847,
"location": "object > Entoform ",
"description": "Build an entoform",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Object"}
"""
Usage:
Launch from Object menu
Additional links:
Author Site: http://www.macouno.com
e-mail: dolf {at} macouno {dot} com
"""
import bpy, mathutils, math, cProfile, colorsys, datetime, time
from mathutils import geometry
from bpy.props import StringProperty, IntProperty, BoolProperty
from macouno import mesh_extras, misc, colour, select_faces, falloff_curve, liberty
# Make it as a class
class Entoform():
# Initialise the class
def __init__(self, context, dnaString, subdivide, keepgroups, finish, run):
if not run:
return
# Start by setting up some default vars and such (in sepparate function because it's a bit much)
self.setup(context, dnaString, keepgroups)
# GO make the DNA strings
self.createDNA()
# Make the base group
baseGroups = self.makeBaseGroup()
for string in self.dna['strings']:
self.executeDNA(string, baseGroups, 1.0)
# Make sure we're shaded smoothly
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.shade_smooth()
# Add self shadow (after the first subdivision)!
bpy.ops.object.mode_set(mode='VERTEX_PAINT')
bpy.ops.paint.self_shadow(contrast=3.0,method='EDG',normalize=True)
# Subsurf the first time if required
if subdivide:
# Split the edges!
'''
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
mod = self.ob.modifiers[0]
mod.use_edge_angle = False
mod.use_edge_sharp = True
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="EdgeSplit")
'''
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_add(type='SUBSURF')
mod = self.ob.modifiers[0]
mod.levels = subdivide
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Subsurf")
bpy.ops.object.mode_set(mode='EDIT')
if finish:
self.finish(context)
self.reset(context)
# Go grow something!
def executeDNA(self, string, baseGroups, baseWeight):
'''
if string['number'] >= 1:
#if string['number'] in [0,1,3]:
return
elif string['number'] == 5 or string['number'] == 6:
return
'''
# Redraw hack to see what is happening
# bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
newGroups, formmatrix, growmatrices = self.makeAffectedGroups(string, baseGroups)
groupLen = len(newGroups)\
pad = str(' ').rjust(string['level'], ' ')
idText = 'limb '+misc.nr4(string['number'])+' '+string['name'].ljust(10, ' ')
print(pad,idText)
# only if we made a group with something in it do we continue
if not groupLen:
print(' - No group!')
else:
# Loop through all the groups
for i, group in enumerate(newGroups):
# The step number to print out
stepText = misc.nr4(i+1)+' of '+misc.nr4(groupLen)
# We need a check matrix only if we're not on the head or body
if string['name'] == 'head' or string['name'] == 'body' or True:
try:
del(self.ob['formmatrix'])
except:
pass
# If not... then just try to get rid of it
else:
self.ob['formmatrix'] = formmatrix
# Body gets a set matrix (so it grows nice and straight)
if string['name'] == 'head':
growmatrix = mathutils.Matrix(((1.0,0.0,0.0),(0.0,0.0,1.0),(0.0,-1.0,0.0))).transposed()
# Head gets a set matrix (so it grows nice and straight)
elif string['name'] == 'body':
growmatrix = mathutils.Matrix(((-1.0,0.0,0.0),(0.0,0.0,1.0),(0.0,1.0,0.0))).transposed()
# In all other cases the matrix can be dealt with by the grow addon
else:
growmatrix = growmatrices[i]
self.ob['growmatrix'] = growmatrix
# Select a group
select_faces.none()
select_faces.in_group(group)
# No need to continue if we have no selected faces
if not mesh_extras.contains_selected_item(self.me.faces):
print(pad,'skip ',stepText,'no selection',string['action']['name'])
else:
a = string['action']
if a['type'] == 'grow':
# Check for mirroring
right = mathutils.Vector((1.0,0.0,0.0))
check = mathutils.Vector(growmatrix[2])
# If we're aiming left we "invert" the rotation
if right.dot(check) < 0.0:
rot = mathutils.Vector((-a['rotation'][0],a['rotation'][1],-a['rotation'][2]))
else:
rot = a['rotation']
# Add relative intensity here (half the original + half the weight)
weight = baseWeight * self.getWeight(groupLen, a['scalin'])
trans = a['translation']
#trans = self.applyIntensity(a['translation'], weight, 'float')
#rot = self.applyIntensity(rot, weight, 'inc')
if a['type'] == 'grow' and trans == 0.0:
print(pad,'skip ',stepText,'too short',trans,'from',a['translation'])
else:
print(pad,'step ',stepText,a['name'])
#print(self.applyIntensity(a['push'], weight, 'float'))
bpy.ops.object.mode_set(mode='EDIT')
if a['type'] == 'bump':
bpy.ops.mesh.bump(
type=a['bumptype'],
scale=a['bumpscale'],
steps=True,
)
else:
bpy.ops.mesh.grow(
translation=trans,
rotation=rot,
rotation_falloff=a['rotation_falloff'],
scale=a['scale'],
scale_falloff=a['scale_falloff'],
retain=True,
steps=True,
debug=False,
)
bpy.ops.object.mode_set(mode='OBJECT')
select_faces.none()
select_faces.in_group(group)
self.applyGrowthColor(a)
if a['type'] == 'grow':
self.applyGrowthCrease(a)
# Remove new stuff from all but the current group
self.cleanGroup(group)
# Keep track of how much steps we've taken
self.dnaStep += 1
# If there's a sub
if len(string['strings']):
for s in string['strings']:
#print('going sub', string['name'], s['name'])
self.executeDNA(s, [group], weight)
def createDNA(self):
# Make the color palette
if self.options['palettes']:
self.options['basecolor'] = self.choose('select', 'palette', 'base color')
colour.setBaseColor(self.options['basecolor'])
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
# Make the head
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for the head\n")
# Start with all directions
self.options['local_directions'] = self.options['directions']
selection = self.getSelection('head')
action = self.makeAction(selection, 'head')
string = {'name': 'head', 'action':action, 'selection':selection, 'strings':[], 'level':1,'number':self.stringCount}
self.dna['strings'].append(string)
self.stringCount += 1
# Make eyes on the head!
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for eyes\n")
selection = self.getSelection('eyes')
action = self.makeAction(selection, 'eyes')
string = {'name': 'eyes', 'action':action, 'selection':selection, 'strings':[], 'level':2,'number':self.stringCount}
self.dna['strings'][0]['strings'].append(string)
self.stringCount += 1
# Mirror the action in case it's left or right
if selection['type'] == 'direction' and (selection['vector'] == mathutils.Vector((1.0,0.0,0.0)) or selection['vector'] == mathutils.Vector((-1.0,0.0,0.0))):
string = self.mirrorDNA(action, selection, 2)
self.dna['strings'][0]['strings'].append(string)
# SUB HEAD!
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for sub head\n")
selection = self.getSelection('sub head')
action = self.makeAction(selection, 'bump')
string = {'name': 'sub head', 'action':action, 'selection':selection, 'strings':[], 'level':2,'number':self.stringCount}
self.dna['strings'][0]['strings'].append(string)
self.stringCount += 1
# Mirror the action in case it's left or right
if selection['type'] == 'direction' and (selection['vector'] == mathutils.Vector((1.0,0.0,0.0)) or selection['vector'] == mathutils.Vector((-1.0,0.0,0.0))):
string = self.mirrorDNA(action, selection, 2)
self.dna['strings'][0]['strings'].append(string)
# Make the body
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for the body\n")
self.options['local_directions'] = self.options['directions']
selection = self.getSelection('body')
action = self.makeAction(selection, 'body')
string = {'name': 'body', 'action':action, 'selection':selection, 'strings':[], 'level':1,'number':self.stringCount}
self.dna['strings'].append(string)
self.stringCount += 1
# Make a tail!
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for the tail\n")
selection = self.getSelection('tail')
action = self.makeAction(selection, 'tail')
string = {'name':'tail', 'action':action, 'selection':selection, 'strings':[], 'level':2,'number':self.stringCount}
self.dna['strings'][1]['strings'].append(string)
self.stringCount += 1
# Make some legs (well hopefully)
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for the legs\n")
selection = self.getSelection('legs')
action = self.makeAction(selection, 'legs')
#action['translation'] *= 2
string = {'name':'left legs', 'action':action, 'selection':selection, 'strings':[], 'level':2,'number':self.stringCount}
self.dna['strings'][1]['strings'].append(string)
self.stringCount += 1
# Mirror the legs
string = self.mirrorDNA(action, selection, 3)
self.dna['strings'][1]['strings'].append(string)
# Lower legs
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for the lower legs\n")
selection = self.getSelection('lowerlegs')
action = self.makeAction(selection, 'lower legs')
#action['translation'] *= 2
string = {'name':'lower legs', 'action':action, 'selection':selection, 'strings':[], 'level':3,'number':self.stringCount}
self.dna['strings'][1]['strings'][1]['strings'].append(string)
self.stringCount += 1
string = self.mirrorDNA(action, selection, 3)
self.dna['strings'][1]['strings'][2]['strings'].append(string)
# SUB body!
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print((self.stringCount+1),"Making nr",(self.stringCount+1),"DNA string for the tail\n")
selection = self.getSelection('sub body')
action = self.makeAction(selection, 'bump')
string = {'name':'sub body', 'action':action, 'selection':selection, 'strings':[], 'level':2,'number':self.stringCount}
self.dna['strings'][1]['strings'].append(string)
self.stringCount += 1
# Mirror the action in case it's left or right
if selection['type'] == 'direction' and (selection['vector'] == mathutils.Vector((1.0,0.0,0.0)) or selection['vector'] == mathutils.Vector((-1.0,0.0,0.0))):
string = self.mirrorDNA(action, selection, 2)
self.dna['strings'][1]['strings'].append(string)
print("\n - - - DONE MAKING DNA - - - LETS GO GROW SOMETHING - - -\n")
# Take a dna's action and selection and mirror the vector
def mirrorDNA(self, action, selection, level):
a = action.copy()
s = selection.copy()
s['vector'] = mathutils.Vector(s['vector']).copy()
s['vector'][0] = -s['vector'][0]
self.cleanDirections(s['vector'])
str = {'name':'mirrored', 'action': action, 'selection': s, 'strings': [], 'level':level,'number':self.stringCount}
self.stringCount += 1
return str
# Make an action for the dna string
def makeAction(self, selection, style='shape'):
if style == 'eyes':
action = {
'name':style,
'type': 'bump',
'bumptype': 'BUM',
'bumpscale': 0.5,
'vertexcolor': (0.0,0.0,0.0),
'jointcolor': (1.0,1.0,1.0),
'colorstyle': 'hard',
'crease': self.choose('float', 'crease', 'crease'),
'sub': False,
}
elif style == 'bump' or selection['type'] == 'loops':
action = {
'name':style,
'type': 'bump',
'bumptype': self.choose('select','bumptypes','bump type'),
'bumpscale': self.choose('float','bumpscale','bump factor'),
'vertexcolor': self.choose('select','palette','vertex color'),
'jointcolor': self.choose('select','palette','joint color'),
'colorstyle': self.choose('select','colorstyles','color style'),
'crease': self.choose('float', 'crease', 'crease'),
'sub': False,
}
else:
axis = 'all'
print('\n style = ',style,'\n')
if style == 'head' or style == 'body' or style == 'tail':
axis = 'x'
action = {
'name':style,
'type': 'grow',
'translation': self.choose('float', 'translate', 'translation'),
'rotation': self.makeRotationVector(axis),
'rotation_falloff': self.choose('select', 'falloffs', 'rotation falloff'),
'scale': self.choose('float', 'scale', 'scale'),
'scale_falloff': self.choose('select', 'falloffs', 'scale falloff'),
'vertexcolor': self.choose('select','palette', 'vertex color'),
'jointcolor': self.choose('select','palette','joint color'),
'colorstyle': self.choose('select','colorstyles','color style'),
'crease': self.choose('float', 'crease', 'crease'),
'scalin': 'preset',
'sub': False,
}
return action
# Set the intensity for a vector
def applyIntensity(self, vector, intensity, mode):
if mode == 'float':
return vector * intensity
if mode == 'int':
return math.ceil(vector * intensity)
vector = mathutils.Vector((vector[0], vector[1], vector[2]))
if mode == 'inc':
vector *= intensity
else:
for i, v in enumerate(vector):
if v > 1.0:
vector[i] = ((v-1.0) * intensity) + 1.0
elif v < 1.0:
vector[i] = 1.0 - ((1.0 - v) * intensity)
return vector
# Apply a vertex colour to a vertex group
def applyGrowthColor(self, a):
# Just apply the vertex colour to all the verts if it applies... easy!
if self.options['palettes']:
vec = list(a['vertexcolor'])
selFaces = []
for f in self.ob.data.faces:
if f.select:
selFaces.append(f)
if a['colorstyle'] == 'soft':
for v in f.vertices:
self.applyColorToVert(v, vec)
else:
self.applyColorToFace(f.index, vec)
select_faces.outermost()
vec = list(a['jointcolor'])
selVerts = []
outFaces = []
for f in self.ob.data.faces:
if f.select:
if a['colorstyle'] == 'soft':
for v in f.vertices:
self.applyColorToVert(v, vec)
else:
selVerts.extend(f.vertices)
outFaces.append(f)
self.applyColorToFace(f.index, vec)
# Lets make some sharp edges
if a['type'] == 'bump' and a['colorstyle'] == 'hard':
# Check every edge
for e in self.ob.data.edges:
v0 = e.vertices[0]
v1 = e.vertices[1]
# If both verts in the edge are selected... this could be sharp
if v0 in selVerts and v1 in selVerts:
ond = 0
snd = 0
# See how many faces this edge is part of
for f in outFaces:
if v0 in f.vertices and v1 in f.vertices:
ond += 1
for f in selFaces:
if not f in outFaces:
if v0 in f.vertices and v1 in f.vertices:
snd += 1
# If the edge is only part of one seleced face it's on the outside
if ond == 1: # and snd == 1:
e.crease = 1.0
'''
sharp = 0
pole = 1
for ec in self.ob.data.edges:
if not ec == e:
ecVerts = ec.vertices
if v0 in ecVerts or v1 in ecVerts:
pole += 1
if ec.use_edge_sharp:
sharp += 1
if pole == 4 and sharp < 2:
e.use_edge_sharp = True
'''
# Set the selection of faces back to the original
for f in selFaces:
f.select = True
def applyGrowthCrease(self, a):
# LETS LOOK AT CREASES!
vec = a['crease']
# Now we want to find out how many steps we made
steps = self.ob['growsteps']
if steps:
# Loop through all the steps
for i in range(int(steps)):
select_faces.outermost(True)
# Find all the selected vertices
selFaces = mesh_extras.get_selected_faces()
selVerts = []
for f in selFaces:
selVerts.extend(f.vertices)
# Loop through all edges
for e in self.me.edges:
eVerts = e.vertices
# If an edge has only 1 selected vert... it's "long" and on the outside of the selection
intersection = [v for v in e.vertices if v in selVerts]
if len(intersection) == 1 and e.crease < 1.0:
e.crease = vec
# Apply a color to a face for a harsh transition
def applyColorToFace(self, fInd, vCol):
# Get the faces
face = bpy.context.active_object.data.faces[fInd]
vColFace = self.me.vertex_colors.active.data[fInd]
vColFace.color1 = vCol
vColFace.color2 = vCol
vColFace.color3 = vCol
if len(face.vertices) == 4:
vColFace.color4 = vCol
# Apply a vertex colour to a vert
def applyColorToVert(self, vInd, vCol):
# Get the faces
for f in bpy.context.active_object.data.faces:
if vInd in f.vertices:
vColFace = self.me.vertex_colors.active.data[f.index]
for r, v in enumerate(f.vertices):
if v == vInd:
if not r:
vColFace.color1 = vCol
elif r == 1:
vColFace.color2 = vCol
elif r == 2:
vColFace.color3 = vCol
elif r == 3:
vColFace.color4 = vCol
break
# Make a section type for the dna string
def getSelection(self, type='none'):
selection = {
'type': 'direction',
'area': 'area',
'vector': mathutils.Vector(),
'divergence': math.radians(90),
'method': 'generated'
}
## HEAD AND BODY
if type == 'head' or type == 'body' or type == 'tail' or type == 'legs' or type == 'lowerlegs':
if type == 'head':
selection['vector'] = mathutils.Vector((0.0,-1.0,0.0))
elif type == 'body' or type == 'tail':
selection['vector'] = mathutils.Vector((0.0,1.0,0.0))
elif type == 'legs':
selection['vector'] = mathutils.Vector((1.0,0.0,0.0))
selection['area'] = 'faces'
elif type == 'lowerlegs':
selection['vector'] = self.choose('select', 'local_directions', 'selection direction')
selection['type'] = 'joint'
if type == 'tail' or type == 'legs':
selection['divergence'] = self.choose('float', 'divergence', 'directional divergence')
# Remove the opposite!
self.cleanDirections(selection['vector'])
selection['method'] = 'forced'
elif type == 'eyes':
selection['type'] = self.choose('select', 'selectioneyes', 'selection type')
selection['area'] = 'faces'
selection['method'] = 'limited'
# Now we just pick what's nice
else:
selection['type'] = self.choose('select', 'selectiontypes', 'selection type')
if selection['type'] == 'all':
selection['area'] = 'faces'
elif selection['type'] == 'direction':
selection['vector'] = self.choose('select', 'local_directions', 'selection direction')
self.cleanDirections(selection['vector'])
selection['area'] = self.choose('select', 'areatypes', 'area for selection')
selection['divergence'] = self.choose('float', 'divergence', 'directional divergence')
elif selection['type'] == 'liberal':
selection['area'] = 'faces'
elif selection['type'] == 'checkered':
selection['area'] = 'faces'
elif selection['type'] == 'loops':
selection['frequency'] = self.choose('select', 'frequencies', 'loop frequencies')
elif selection['type'] == 'tip':
selection['area'] = self.choose('select', 'areatypes', 'area for selection')
elif selection['type'] == 'joint':
selection['vector'] = self.choose('select', 'local_directions', 'selection direction')
self.cleanDirections(selection['vector'])
selection['divergence'] = self.choose('float', 'divergence', 'directional divergence')
if selection['area'] == 'faces':
selection['limit'] = self.choose('int', 'limit', 'selection limit')
selection['formmatrix'] = ''
selection['growmatrices'] = []
return selection
# Make a rotation vector
def makeRotationVector(self, axis='all'):
# For the body head and tail we only rotate up and down
if axis == 'x':
return mathutils.Vector((self.choose('float', 'rotation', 'X rotation'),0.0,0.0))
vector = mathutils.Vector((
self.choose('float', 'rotation', 'X rotation'),
self.choose('float', 'rotation', 'Y rotation'),
self.choose('float', 'rotation', 'Z rotation')
))
return vector
# Remove the items in the current group from all others
def cleanGroup(self, group):
bpy.ops.object.mode_set(mode='EDIT')
self.ob.vertex_groups.active_index = group.index
# Make sure the entire group is selected
bpy.ops.mesh.select_all(action='DESELECT')
self.ob.vertex_groups.active_index = group.index
bpy.ops.object.vertex_group_select()
# Set editing to vert mode before selecting less
bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode', value="(True, False, False)")
bpy.ops.mesh.select_less()
# Set editing back to face mode
bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode', value="(False, False, True)")
for g in self.newGroups:
if g.index != group.index:
self.ob.vertex_groups.active_index = g.index
bpy.ops.object.vertex_group_remove_from(all=False)
bpy.ops.object.mode_set(mode='OBJECT')
# Make all the faces that are affected selected and return them as a list
def makeAffectedGroups(self, string, baseGroups):
selection = string['selection']
newGroups = []
formmatrix = mathutils.Matrix()
growmatrices = []
# Deselect all faces to start clean!
select_faces.none()
# Select everything in the base groups
for g in baseGroups:
select_faces.in_group(g,True)
#print('in_group',len(mesh_extras.get_selected_faces()))
# If nothing is selected there's nothing to do
if mesh_extras.contains_selected_item(self.me.faces):
# Select the faces at the tip in a certain direction
if selection['type'] == 'joint' or selection['type'] == 'tip':
select_faces.innermost()
if mesh_extras.contains_selected_item(self.me.faces):
if selection['type'] == 'joint':
select_faces.connected(True)
selCnt = len(mesh_extras.get_selected_faces())
nuCnt = selCnt
div = selection['divergence']
# If the nr of faces selected isn't diminished... we select less!
while selCnt and selCnt == nuCnt and div > 0.1:
select_faces.by_direction(selection['vector'],div)
div = div * 0.75
selFaces = mesh_extras.get_selected_faces()
nuCnt = len(selFaces)
# Check for opposing normals.. .cause they should not be there!
for f1 in selFaces:
if f1.select:
f1No = f1.normal
for f2 in selFaces:
if f2.select and not f1 is f2:
f2No = f2.normal
ang = f2No.angle(f1No)
if ang > math.radians(120):
f1.select = False
break
selFaces = mesh_extras.get_selected_faces()
nuCnt = len(selFaces)
if nuCnt == selCnt:
select_faces.none()
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# Select by pi (fake random)
elif selection['type'] == 'liberal':
select_faces.liberal(self.dnaString)
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# Select all loops in the group
elif selection['type'] == 'loops':
select_faces.connected()
self.deselectUnGrouped()
step = 0
# As long as something is selected, we can continue
while mesh_extras.contains_selected_item(self.ob.data.faces):
select_faces.connected()
self.deselectGrouped(baseGroups)
# Skip selection just in case
if not step % selection['frequency']:
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, grw = self.addToNewGroups(string, newGroups, growmatrices)
growmatrices.extend(grw)
step += 1
print(step)
# Select by direction
elif selection['type'] == 'direction':
select_faces.by_direction(selection['vector'],selection['divergence'])
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# All!
else:
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
return newGroups, formmatrix, growmatrices
# Deselect all the faces that are not in a group
def deselectUnGrouped(self):
# Get the faces (and go into object mode)
faces = mesh_extras.get_selected_faces()
if len(faces):
for f in faces:
if f.select:
inGroup = True
# See all the verts (all should be in the group!)
for v in f.vertices:
found = False
vert = self.ob.data.vertices[v]
vertGroups = vert.groups
for g in vert.groups:
if g.weight:
found = True
if not found:
inGroup = False
if not inGroup:
f.select = False
# Deselect all faces that are already grouped, but not in the baseGroups
def deselectGrouped(self, baseGroups):
# Get the faces (and go into object mode)
faces = mesh_extras.get_selected_faces()
if len(faces):
# First lets make sure the faces are in the current base groups
for g in baseGroups:
# Check all selected faces
for f in faces:
if f.select:
inGroup = True
# See all the verts (all should be in the group!)
for v in f.vertices:
found = False
vert = self.ob.data.vertices[v]
vertGroups = vert.groups
for vg in vert.groups:
if vg.group == g.index:
found = True
if not found:
inGroup = False
if not inGroup:
f.select = False
faces = mesh_extras.get_selected_faces()
if len(faces):
for g in self.newGroups:
if not g in baseGroups:
# Check all selected faces
for f in faces:
if f.select:
inGroup = True
# See all the verts (all should be in the group!)
for v in f.vertices:
found = False
vert = self.ob.data.vertices[v]
vertGroups = vert.groups
for vg in vert.groups:
if vg.group == g.index:
found = True
if not found:
inGroup = False
if inGroup:
f.select = False
# Adding the current selection to a new group
def addToNewGroups(self, string, newGroups, growmatrices=[]):
selection = string['selection']
self.doubleCheckSelection(selection)
faces = mesh_extras.get_selected_faces()
formmatrix = mathutils.Matrix()
growmatrices = []
if len(faces):
verts = []
inds = []
for f in faces:
for v in f.vertices:
if not v in inds:
inds.append(v)
verts.append(self.me.vertices[v])
# NOW WE GO MAKE THE GROUPS
if len(verts):
weights = self.makeWeights(verts)
formmatrix = mesh_extras.get_selection_matrix(faces)
# If we do this per area, we want the entire area to be part of one group
if selection['area'] == 'area':
growmatrices.append(formmatrix)
newGroup = self.ob.vertex_groups.new(string['name']+'.'+selection['type'])
newGroups.append(newGroup)
self.newGroups.append(newGroup)
for v in verts:
newGroup.add([v.index], 1.0, 'REPLACE')
# If we have it per face, we need sepparate weights and groups
elif selection['area'] == 'faces':
if len(faces):
for i, f in enumerate(faces):
growmatrices.append(mesh_extras.get_selection_matrix([f]))
newGroup = self.ob.vertex_groups.new(string['name']+'.'+selection['type']+'.'+misc.nr4(i))
newGroups.append(newGroup)
self.newGroups.append(newGroup)
vertList = f.vertices
for i,v in enumerate(verts):
ind = v.index
if ind in vertList:
newGroup.add([v.index], weights[i], 'REPLACE')
return newGroups, formmatrix, growmatrices
# make the base group that we're working with
def makeBaseGroup(self):
newGroup = self.ob.vertex_groups.new('base')
self.ob.vertex_groups.active_index = newGroup.index
baseGroupList = [newGroup]
self.newGroups.append(newGroup)
vList = [v.index for v in self.ob.data.vertices]
newGroup.add(vList, 1.0, 'REPLACE')
return baseGroupList
# Just some nice checks to do with selections
def doubleCheckSelection(self, selection):
# Make sure there's never more than 12 faces we grow out of
if selection['area'] == 'faces':
select_faces.limit(selection['limit'], self.dnaString)
# If we still have something selected, then we need to check for Islands (only one coninuous island should be selected)
if selection['type'] == 'direction' and selection['area'] == 'area' and mesh_extras.contains_selected_item(self.me.faces):
self.checkForIslands(selection['vector'])
# Make sure only one "island" is selected
def checkForIslands(self, vector):
faces = mesh_extras.get_selected_faces()
# Find the face furthest along the vector
max = 0.0
closestFace = 0
closestVerts = 0
for i,f in enumerate(faces):
dist = vector.dot(f.center)
if dist > max or not i:
max = dist
closestFace = f
closestVerts = f.vertices
# Find the faces connected to this one!
connectedFaces = [closestFace]
connectedVerts = list(closestVerts)
foundNew = True
# As long as we can find connected faces we continue
while foundNew:
foundNew = False
for f in faces:
addThis = False
# If we haven't done this one yet
if not f in connectedFaces:
intersection = [v for v in f.vertices if v in connectedVerts]
if len(intersection):
addThis = True
if addThis:
foundNew = True
connectedFaces.append(f)
connectedVerts.extend(f.vertices)
# Deselect disconnected faces
for f in faces:
if not f in connectedFaces:
f.select = False
# Make relative weights for the verts
def makeWeights(self, verts):
cen = mathutils.Vector()
for v in verts:
cen += v.co
cen *= (1.0/len(verts))
# Find the minimum and maximum distance from the centre
min = 0.0
max = 0.0
distances = []
for i, v in enumerate(verts):
dist = (v.co - cen).length
distances.append(dist)
if not i or dist < min:
min = dist
if not i or dist > max:
max = dist
max = max - min
if max > 0.0:
factor = (1.0 / max)
else:
factor = 1.0
# Make the weights
weights = []
for i, v in enumerate(verts):
weight = (max - (distances[i] - min)) * factor
weights.append(weight)
return weights
# Get the weight of the current selection
def getWeight(self, groupLen, scalin):
weight = 1.0
# If we're applying the weight based on the edge, we find the shortest edge
if scalin == 'edge':
short = 0.0
check = 0
bpy.ops.object.mode_set(mode='OBJECT')
# Find the shortest edge
for e in self.ob.data.edges:
if e.select == True:
v0 = self.ob.data.vertices[e.vertices[0]]
v1 = self.ob.data.vertices[e.vertices[1]]
ed = v1.co - v0.co
leng = ed.length
if leng < short or not check:
short = leng
check = 1
weight *= short
# If we're doing multiple groups, we find out the distance from the centre of the group
if groupLen > 1:
bpy.ops.object.mode_set(mode='EDIT')
groupId = self.ob.vertex_groups.active_index
verts = mesh_extras.get_selected_vertices()
vLen = len(verts)
if vLen:
w = 0.0
for v in verts:
for g in v.groups:
if g.group == groupId:
w += g.weight
w *= (1.0/vLen)
weight *= w
return weight
# Remove a specific direction from the dict and rebuild it
def cleanDirections(self, direction):
directions = self.options['local_directions']
# We actually remove the negated direction (can't grow backwards!)
direction = mathutils.Vector((-direction[0],-direction[1],-direction[2]))
key = False
# See if the direction is still in the dict at all, and find it's key
for k in directions.keys():
angle = direction.angle(mathutils.Vector(directions[k]))
if angle == 0.0:
key = k
# If the direction is not there, we just return the original list... fine
if key is False:
return
# Make a new fresh dict (a-z) with the remaining directions
newDirections = {}
letter = 97
for k in directions.keys():
if not k == key:
newDirections[chr(letter)] = directions[k]
letter+=1
self.options['local_directions'] = newDirections
return
# Get the palette!
def getPalette(self):
try:
self.options['palettes'] = bpy.context.scene['palettes']
palette = self.choose('select', 'palettes', 'palette')
print(palette['title'])
self.paletteAuthor = palette['author']
self.paletteTitle = palette['title']
self.paletteId = palette['id']
self.paletteHexes = palette['hexes']
letter = 97
self.options['palette'] = {}
for swatch in palette['swatches']:
print('swatch', float(swatch[0]),float(swatch[1]),float(swatch[2]))
self.options['palette'][chr(letter)] = [float(swatch[0]),float(swatch[1]),float(swatch[2])]
letter += 1
except:
self.options['palettes'] = False
print('no palette available')
# Go choose something
def choose(self, type, val, desk):
if val in self.options.keys():
if val == 'palette':
result = self.secondary.Choose(type,self.options[val],desk)
else:
result = self.primary.Choose(type,self.options[val],desk)
elif val in self.options['primary'].keys():
pl = self.primary.key[self.primary.pos]
p = self.primary.Choose(type,self.options['primary'][val])
sl = self.secondary.key[self.secondary.pos]
s = self.secondary.Choose(type,self.options['secondary'][val])
result = p+s
print(' ',pl,sl,desk.ljust(22, ' '),'=',round(p,2),'+',round(s,2),'=',round(result,2))
else:
print('ERROR Unable to choose',val,desk)
result = False
return result
# Start with some setup
def setup(self, context, dnaString, keepgroups):
print("\n\n->-> Starting Entorform <-<-\n")
print(' - DNA string',dnaString,"\n")
# Get the active object
self.ob = context.active_object
self.me = self.ob.data
self.dnaString = dnaString
# Split the dna string into two parts if possible
prt = dnaString.partition(' ')
if not prt[2]:
self.dnaParts = {
'primary': dnaString,
'secondary': dnaString
}
else:
sec = ''
for i, p in enumerate(prt):
if i > 1:
sec = sec + p
self.dnaParts = {
'primary': prt[0],
'secondary': sec
}
self.primary = liberty.liberty('string', self.dnaParts['secondary'])
self.secondary = liberty.liberty('string', self.dnaParts['primary'])
self.options = {}
self.options['basecolor'] = [0.0,0.0,0.0]
self.options['bool'] = {'a': True,'b': False}
self.options['primary'] = {
'translate': {'min': 2.0, 'max': 3.0},
'scale': {'min': 0.4, 'max': 0.7},
'crease': {'min': 0.4, 'max': 0.7},
'bumpscale': {'min': 0.4, 'max': 0.7},
'rotation': {'min': math.radians(-60.0), 'max': math.radians(60.0)},
'divergence': {'min': math.radians(45),'max': math.radians(75)},
'limit': {'min': 4, 'max': 6},
}
self.options['secondary'] = {
'translate': {'min': -0.5, 'max': 1.5},
'scale': {'min': -0.3, 'max': 0.3},
'crease': {'min': -0.3, 'max': 0.3},
'bumpscale': {'min': -0.35, 'max': 0.3},
'rotation': {'min': math.radians(-60.0), 'max': math.radians(60.0)},
'divergence': {'min': math.radians(-15),'max': math.radians(15)},
'limit': {'min': -2, 'max': 2},
}
self.options['falloffs'] = {'a': 'LIN', 'b': 'INC', 'c': 'DEC', 'd': 'SWO', 'e': 'SPI', 'f': 'BUM', 'g': 'SWE'}
self.options['bumptypes'] = {'a': 'BUM', 'b': 'SPI', 'c': 'DIM', 'd': 'PIM'}
self.options['selectiontypes'] = {'a': 'direction', 'b': 'liberal', 'c': 'joint', 'd': 'all', 'e': 'checkered', 'f': 'loops'} # tip = disabled
self.options['selectioneyes'] = {'a': 'direction', 'b': 'liberal', 'c': 'joint', 'd': 'all', 'e': 'checkered'}
self.options['directions'] = {
'a': mathutils.Vector((1.0,0.0,0.0)), #top
'b': mathutils.Vector((-1.0,0.0,0.0)), #bottom
'c': mathutils.Vector((0.0,1.0,0.0)), #front
'd': mathutils.Vector((0.0,-1.0,0.0)), #rear
'e': mathutils.Vector((0.0,0.0,1.0)), #right
'f': mathutils.Vector((0.0,0.0,-1.0)), #left
}
self.options['areatypes'] = {'a': 'area','b': 'faces'}
self.options['frequencies'] = {'a': 1, 'b': 2}
self.options['colorstyles'] = {'a': 'hard','b': 'soft'}
self.getPalette()
# Set the editing to face mode only
#bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode', value="(False, False, True)")
self.startTime = time.time()
self.dnaPos = 0
self.dnaStep = 1
self.dna = {'name':'base','strings': []}
self.palette = []
self.keepgroups = keepgroups
# Total number of strings
self.stringCount = 0
# Level of deepness
self.LOD = 2
# If the grow function made a matrix previously, we can remove it now
try:
del(self.ob['growmatrix'])
except:
pass
# Get the vertex colours
if not self.ob.data.vertex_colors.active:
self.ob.data.vertex_colors.new()
for f in self.ob.data.vertex_colors.active.data:
try:
f.color1 = f.color2 = f.color3 = f.color4 = (0.0,0.0,0.0)
except:
f.color1 = f.color2 = f.color3 = (0.0,0.0,0.0)
self.vCols = self.ob.data.vertex_colors.active.data
# Save the dna string in a property if we want!
self.ob['dnastring'] = dnaString
# Convert the string to a list
self.origDNA = dnaString
self.newGroups = []
# Change Selection mode to face selection
self.lastSelectioMode = bpy.context.tool_settings.mesh_select_mode[:]
if bpy.context.tool_settings.mesh_select_mode != (False, False, True):
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
# Set some variables before finishing
def finish(self, context):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.shade_smooth()
#self.setFloor()
#self.setDefaultView()
#bpy.ops.wm.redraw_timer(type='DRAW', iterations=1)
# Temporarily rescale the object for camera view stuff
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN')
bpy.ops.wm.redraw_timer(type='DRAW', iterations=1)
# Lets scale the object
dimensions = self.ob.dimensions
max = 0.0
for i, d in enumerate(dimensions):
if (not i) or d > max:
max = d
if max != 0.0:
ratio = 15 / max
self.ob.scale *= ratio
#bpy.ops.object.scale_apply()
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# Lets put the floor in the correct location
if 'floor' in bpy.data.objects:
for i, v in enumerate(self.ob.data.vertices):
loc = v.co[2]
if (not i) or loc < max:
max = loc
bpy.data.objects['floor'].location[2] = max
# Entoform number
filePath = bpy.data.filepath.split('\\')
fileName = filePath[len(filePath)-1]
numbers = fileName.split('.')
for n in numbers:
if n.isdigit():
bpy.data.objects['text-form'].data.body = 'Entoform '+n
# Dna string
if 'text-dna' in bpy.data.objects:
bpy.data.objects['text-dna'].data.body = self.origDNA
# Datetime
if 'text-date' in bpy.data.objects:
now = datetime.datetime.today()
dateString = str(now.day)+' '+misc.int_to_roman(now.month)+' '+str(now.year)+' '+str(now.hour)+':'+str(now.minute)+':'+str(now.second)
bpy.data.objects['text-date'].data.body = dateString
# execution time
if 'text-maketime' in bpy.data.objects:
bpy.data.objects['text-maketime'].data.body = str(round(time.time() - self.startTime))+'s'
if self.options['palettes']:
# Palette
if 'text-paletter' in bpy.data.objects:
bpy.data.objects['text-paletter'].data.body = self.paletteAuthor
bpy.data.objects['text-palettid'].data.body = self.paletteId
bpy.data.objects['text-palette'].data.body = self.paletteTitle
self.ob['paletter'] = self.paletteAuthor
self.ob['paletteId'] = self.paletteId
self.ob['palette'] = self.paletteTitle
#paletteQuery = "INSERT INTO ff_palettes(id, theme_id, name, creator, colour_1, colour_2, colour_3, colour_4, colour_5) VALUES (NULL,'"+self.paletteId+"','"+self.paletteTitle+"','"+self.paletteAuthor+"'"
#swatches
if 'swatches' in bpy.data.objects:
paletteOb = bpy.data.objects['swatches']
else:
paletteOb = None
for j, k in enumerate(self.options['palette'].keys()):
hex = self.paletteHexes[j]
#paletteQuery = paletteQuery+",'"+hex+"'"
swatch = self.options['palette'][k]
col = 'colour_'+str(j+1)
self.ob[col] = hex #colour.rgb_to_hex(swatch)
if paletteOb:
for i, f in enumerate(paletteOb.data.vertex_colors.active.data):
if i == j:
try:
f.color1 = f.color2 = f.color3 = f.color4 = swatch
except:
f.color1 = f.color2 = f.color3 = swatch
#paletteQuery = paletteQuery+")"
#self.ob['paletteQuery'] = paletteQuery
'''
INSERT INTO `admin_entoforms`.`ff_palettes` (`id`, `theme_id`, `name`, `creator`, `colour_1`, `colour_2`, `colour_3`, `colour_4`, `colour_5`) VALUES (NULL, '1373430', 'giblythe1', 'jakestolte', '3d3d3f', 'bf8c2f', 'bcbfbf', 'f2f2f2', 'f2dfba');
'''
# Geometry
if 'text-faces' in bpy.data.objects:
bpy.data.objects['text-faces'].data.body = str(len(self.ob.data.faces))
if 'text-edges' in bpy.data.objects:
bpy.data.objects['text-edges'].data.body = str(len(self.ob.data.edges))
if 'text-verts' in bpy.data.objects:
bpy.data.objects['text-verts'].data.body = str(len(self.ob.data.vertices))
# Frame number
fr = bpy.context.scene.frame_current
if 'text-frame' in bpy.data.objects:
bpy.data.objects['text-frame'].data.body = str(fr)
# it means fr % 360
# while fr > 360:
# fr -= 360
fr = fr % 360
if 'text-angle' in bpy.data.objects:
bpy.data.objects['text-angle'].data.body = str(fr)
# Reset everything at the very end
def reset(self, context):
print("\n Cleanup\n")
if not self.keepgroups:
for g in self.newGroups:
self.ob.vertex_groups.active_index = g.index
bpy.ops.object.vertex_group_remove()
# Return selection mode to previous value
bpy.context.tool_settings.mesh_select_mode[:] = self.lastSelectioMode
print("->-> Finished Entorform <-<-\n")
# Scale the selection (always relative to the normal)
# val = (0.5, 0.5, 0.5)
def scale(self, val):
bpy.ops.transform.resize(value=val, constraint_axis=(False, False, False), constraint_orientation='GLOBAL', mirror=False, proportional=bpy.context.tool_settings.proportional_edit, proportional_edit_falloff=bpy.context.tool_settings.proportional_edit_falloff, proportional_size=1, snap=bpy.context.tool_settings.use_snap, snap_target=bpy.context.tool_settings.snap_target, snap_point=(0, 0, 0), snap_align=False, snap_normal=(0, 0, 0), release_confirm=False)
# Mark this point in time and print if we want... see how long it takes
def mark(self,desc):
if self.debug:
now = time.time()
jump = now - self.markTime
self.markTime = now
print(desc.rjust(10, ' '),jump)
class Entoform_init(bpy.types.Operator):
'''Build an Entoform'''
bl_idname = 'object.entoform'
bl_label = 'Entoform'
bl_options = {'REGISTER', 'UNDO'}
d=''
dnaString = StringProperty(name="DNA", description="DNA string to define your shape", default=d, maxlen=100)
subdivide = IntProperty(name='Subdivide', default=0, min=0, max=10, soft_min=0, soft_max=100)
keepgroups = BoolProperty(name='Keep groups', description='Do not remove the added vertex groups', default=True)
finish = BoolProperty(name='Finish', description='Do some final touches', default=True)
run = BoolProperty(name='Execute', description='Go and actually do this', default=False)
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj and obj.type == 'MESH')
def execute(self, context):
ENTOFORM = Entoform(context, self.dnaString, self.subdivide, self.keepgroups, self.finish, self.run)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(Entoform_init.bl_idname, text="Entoform")
def register():
bpy.utils.register_module(__name__)
bpy.types.VIEW3D_MT_object.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.VIEW3D_MT_object.remove(menu_func)
if __name__ == "__main__":
register()
| 29.452739
| 460
| 0.598128
|
4a0242f7830a95375ee89d99185a219d87ab4b01
| 615
|
py
|
Python
|
OA/image/migrations/0001_initial.py
|
CircularWorld/AutoOffice
|
052b95e094ed8790abb9bf22683006d8f3307ee4
|
[
"Apache-2.0"
] | null | null | null |
OA/image/migrations/0001_initial.py
|
CircularWorld/AutoOffice
|
052b95e094ed8790abb9bf22683006d8f3307ee4
|
[
"Apache-2.0"
] | null | null | null |
OA/image/migrations/0001_initial.py
|
CircularWorld/AutoOffice
|
052b95e094ed8790abb9bf22683006d8f3307ee4
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.12 on 2020-09-22 06:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='images')),
('sign', models.BooleanField(default=False)),
],
),
]
| 25.625
| 114
| 0.569106
|
4a024335182116f840c27c8e518ebf7578e7577e
| 465
|
py
|
Python
|
authlib/jose/rfc7516/__init__.py
|
minddistrict/authlib
|
7bfd5590cc365803633c56e784b43494589abff2
|
[
"BSD-3-Clause"
] | 3,172
|
2017-11-11T05:54:14.000Z
|
2022-03-31T23:59:59.000Z
|
authlib/jose/rfc7516/__init__.py
|
minddistrict/authlib
|
7bfd5590cc365803633c56e784b43494589abff2
|
[
"BSD-3-Clause"
] | 397
|
2017-11-11T02:49:06.000Z
|
2022-03-31T21:02:37.000Z
|
authlib/jose/rfc7516/__init__.py
|
minddistrict/authlib
|
7bfd5590cc365803633c56e784b43494589abff2
|
[
"BSD-3-Clause"
] | 387
|
2017-11-18T08:59:56.000Z
|
2022-03-15T18:37:37.000Z
|
"""
authlib.jose.rfc7516
~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of
JSON Web Encryption (JWE).
https://tools.ietf.org/html/rfc7516
"""
from .jwe import JsonWebEncryption
from .models import JWEAlgorithm, JWEAlgorithmWithTagAwareKeyAgreement, JWEEncAlgorithm, JWEZipAlgorithm
__all__ = [
'JsonWebEncryption',
'JWEAlgorithm', 'JWEAlgorithmWithTagAwareKeyAgreement', 'JWEEncAlgorithm', 'JWEZipAlgorithm'
]
| 24.473684
| 104
| 0.72043
|
4a02435e80add7291637473c683a7b0282955d82
| 888
|
py
|
Python
|
sandbox/extract-with-median-count.py
|
maarten1983/khmer
|
417aaa57f0659685c01887a6910de1c08d0a73e5
|
[
"BSD-3-Clause"
] | 1
|
2019-11-02T15:12:44.000Z
|
2019-11-02T15:12:44.000Z
|
sandbox/extract-with-median-count.py
|
ibest/khmer
|
fbc307abd64363b329745709846d77444ce0c025
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/extract-with-median-count.py
|
ibest/khmer
|
fbc307abd64363b329745709846d77444ce0c025
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python2
import screed
import sys
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('min_count', type=int)
parser.add_argument('max_count', type=int)
parser.add_argument('filenames', nargs='+')
args = parser.parse_args()
min_count = args.min_count
max_count = args.max_count
for filename in args.filenames:
for n, record in enumerate(screed.open(filename, parse_description=False)):
if n % 10000 == 0:
print >>sys.stderr, '...', filename, n
kmed = record.name.split()[-1]
assert kmed.startswith('kmed'), record.name
kmed = kmed.split('=')[1]
kmed = int(kmed)
if kmed >= min_count and kmed < max_count:
print '>%s\n%s' % (record.name, record.sequence)
if __name__ == '__main__':
main()
| 27.75
| 83
| 0.600225
|
4a02440f55e2f35bef4da0ecebfb1bb0748401dc
| 12,684
|
py
|
Python
|
fairseq/criterions/label_smoothed_cross_entropy.py
|
mcao610/off-policy-RL-for-text-gen
|
cbc6b2172f7bcf14adf97cdce4aa71cc2902a441
|
[
"MIT"
] | null | null | null |
fairseq/criterions/label_smoothed_cross_entropy.py
|
mcao610/off-policy-RL-for-text-gen
|
cbc6b2172f7bcf14adf97cdce4aa71cc2902a441
|
[
"MIT"
] | null | null | null |
fairseq/criterions/label_smoothed_cross_entropy.py
|
mcao610/off-policy-RL-for-text-gen
|
cbc6b2172f7bcf14adf97cdce4aa71cc2902a441
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True,
probs_old=None, probs_mle=None, config=None, sample=None):
"""
Args:
lprobs: torch tensor [batch_size * seq_length, vocab_size (50264)]
target: torch tensor [batch_size * seq_length, 1]
"""
from fairseq_cli.train import model_old, model_mle
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target) # nll_loss: [batch_size * seq_length, 1]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True) # smooth_loss: [batch_size * seq_length, 1]
batch_size = sample['target'].shape[0] # sample['target']: [batch_size, seq_length]
# importance sampling; theta below is the same thing as pi in the paper
if probs_old is None or probs_mle is None:
weight_theta_hat = 1.0 # the weight correspond to a slightly old version of pi
else:
weight_theta_hat = probs_old.gather(dim=-1, index=target) # weight_theta_hat: [batch_size * seq_length, 1]
weight_mle = probs_mle.gather(dim=-1, index=target) # weight_mle: [batch_size * seq_length, 1]
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.)
smooth_loss.masked_fill_(pad_mask, 0.)
if probs_old is not None or probs_mle is not None:
weight_theta_hat.masked_fill_(pad_mask, 1.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
nll_loss_old = nll_loss_old.squeeze(-1)
raise NotImplementedError
# if probs_old is not None or probs_mle is not None:
# with torch.no_grad():
# # the below code hardcodes for now
# if config.suffix_num > 0:
# def obtain_suffix_weights_kk(weight_fn, kk):
# """
# Example:
# kk = 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] * (gamma ** 0)
# kk = 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 1] * (gamma ** 1)
# kk = 2: [2, 3, 4, 5, 6, 7, 8, 9, 1, 1] * (gamma ** 2)
# kk = 3: [3, 4, 5, 6, 7, 8, 9, 1, 1, 1] * (gamma ** 3)
# kk = 4: [4, 5, 6, 7, 8, 9, 1, 1, 1, 1] * (gamma ** 4)
# kk = 5: [5, 6, 7, 8, 9, 1, 1, 1, 1, 1] * (gamma ** 5)
# """
# fn_weight_original = weight_fn.clone()
# if kk == 0:
# fn_weight_nextk = fn_weight_original
# else:
# fn_weight_nextk = fn_weight_original.clone()
# fn_weight_nextk = fn_weight_nextk.reshape(batch_size, -1)
# fn_weight_original = fn_weight_original.reshape(batch_size, -1)
# fn_weight_nextk[:, :-kk] = fn_weight_original[:, kk:].clone()
# for aa in range(1,kk+1):
# if aa <= fn_weight_nextk.shape[1]:
# fn_weight_nextk[:, -aa].fill_(1.0)
# if config.reward_type == 'sump':
# fn_weight_nextk = fn_weight_nextk
# elif config.reward_type == 'logp':
# fn_weight_nextk = torch.log(fn_weight_nextk+1e-10) - config.q_baseline
# fn_weight_nextk = torch.clamp(fn_weight_nextk, min=config.trunc_min) # warning
# return fn_weight_nextk.reshape(-1, 1)
# if config.suffix_num == 5:
# try:
# weight_suffix = obtain_suffix_weights_kk(weight_mle, 0) + \
# (config.gamma ** 1) * obtain_suffix_weights_kk(weight_mle, 1) + \
# (config.gamma ** 2) * obtain_suffix_weights_kk(weight_mle, 2) + \
# (config.gamma ** 3) * obtain_suffix_weights_kk(weight_mle, 3) + \
# (config.gamma ** 4) * obtain_suffix_weights_kk(weight_mle, 4) + \
# (config.gamma ** 5) * obtain_suffix_weights_kk(weight_mle, 5)
# except: # check sequence length; should never come here!
# weight_suffix = obtain_suffix_weights_kk(weight_mle, 0) + \
# (config.gamma ** 1) * obtain_suffix_weights_kk(weight_mle, 1) + \
# (config.gamma ** 2) * obtain_suffix_weights_kk(weight_mle, 2) + \
# (config.gamma ** 3) * obtain_suffix_weights_kk(weight_mle, 3)
# else:
# # Can implement much more elegantly for longer suffix_num!!
# raise NotImplementedError(config.suffix_num)
# b1 = torch.clamp(weight_theta_hat, min=config.iw_min, max=1.0) # warning
# b2 = weight_suffix
# else:
# b1 = 1.0
# b2 = 1.0
# ============================ MY IMPLEMENTATION ============================
with torch.no_grad():
if probs_old is not None or probs_mle is not None:
assert config.suffix_num > 0
def shit_left(weight_fn, kk, padding_type='ones'):
assert kk >= 0, "kk should always be positive!!!"
if kk == 0:
return weight_fn.clone()
else:
weight_shifted = weight_fn.view(batch_size, -1) # [batch_size, seq_length]
if padding_type == "ones":
right_padding = torch.ones(batch_size, kk).to(weight_shifted.device)
elif padding_type == "zeros":
right_padding = torch.zeros(batch_size, kk).to(weight_shifted.device)
else:
raise Exception("Unknown padding type!!!")
return torch.cat([weight_shifted, right_padding], dim=1)[:, kk:].reshape(-1, 1)
def convert_to_reward(weight_fn, kk):
if config.reward_type == 'sump':
reward = shit_left(weight_fn, kk, padding_type='zeros')
elif config.reward_type == 'logp':
reward = torch.log(shit_left(weight_fn, kk, padding_type='ones') + 1e-10) - config.q_baseline
return torch.clamp(reward, min=config.trunc_min)
def calculate_return(weight_fn, steps):
"""
Args:
weight_fn: torch.Tensor [batch_size * seq_length, 1]
steps: int, future trajectory steps for reward calculation
"""
gamma = config.gamma
seq_length = weight_fn.view(batch_size, -1).shape[1]
if seq_length <= steps:
steps = seq_length - 1
assert seq_length > steps, "Sequence length ({}) should be larger than steps ({})!!!".format(seq_length, steps)
return_values = convert_to_reward(weight_fn, 0)
for kk in range(1, steps + 1):
return_values += (gamma ** kk) * convert_to_reward(weight_fn, kk)
return return_values
b1_ = torch.clamp(weight_theta_hat, min=config.iw_min, max=1.0) # warning
b2_ = calculate_return(weight_mle, config.suffix_num)
else:
b1_ = 1.0
b2_ = 1.0
# ===========================================================================
# assert torch.equal(b1, b1_)
# assert torch.equal(b2, b2_)
nll_loss_new = (b1_ * b2_) * nll_loss
# Can also adjust smooth loss accordingly; but no big impact
if reduce:
nll_loss = nll_loss.sum()
nll_loss_new = nll_loss_new.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1. - epsilon) * nll_loss_new + eps_i * smooth_loss
return loss, nll_loss
@register_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg, label_smoothing):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
# fmt: on
def forward(self, model, sample, reduce=True):
from fairseq_cli.train import model_old, model_mle
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# ===================================================================
model_old.eval()
model_mle.eval()
with torch.no_grad():
net_output_old = model_old(**sample['net_input'])
net_output_mle = model_mle(**sample['net_input'])
# ===================================================================
net_output = model(**sample['net_input'])
if model.args.use_is_obj: # value: 1
loss, nll_loss = self.compute_loss(
model, net_output, sample, reduce=reduce,
output_old=net_output_old, output_mle=net_output_mle,
)
else:
loss, nll_loss = self.compute_loss(
model, net_output, sample, reduce=reduce,
output_old=None, output_mle=net_output_mle,
)
sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']
logging_output = {
'loss': loss.data,
'nll_loss': nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True, output_old=None, output_mle=None):
from fairseq_cli.train import model_old, model_mle
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
with torch.no_grad():
if output_old is not None:
probs_old = model_old.get_normalized_probs(output_old, log_probs=False)
probs_old = probs_old.view(-1, lprobs.size(-1))
probs_mle = model_mle.get_normalized_probs(output_mle, log_probs=False)
probs_mle = probs_mle.view(-1, lprobs.size(-1))
else:
probs_old = None
probs_mle = None
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce,
probs_old=probs_old, probs_mle=probs_mle, config=model.args, sample=sample,
)
return loss, nll_loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
nll_loss_sum = sum(log.get('nll_loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('nll_loss', nll_loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 46.632353
| 127
| 0.554636
|
4a02446a019683903c53916a7ce7690861e27406
| 6,976
|
py
|
Python
|
lib/matplotlib/tests/test_simplification.py
|
stefanv/matplotlib
|
2c6c3695ef989ec86b7c43740462ef992685022f
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2016-08-09T14:14:42.000Z
|
2016-08-09T14:14:42.000Z
|
lib/matplotlib/tests/test_simplification.py
|
stefanv/matplotlib
|
2c6c3695ef989ec86b7c43740462ef992685022f
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_simplification.py
|
stefanv/matplotlib
|
2c6c3695ef989ec86b7c43740462ef992685022f
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
from matplotlib import patches, path, transforms
from nose.tools import raises
import cStringIO
nan = np.nan
Path = path.Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'])
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['overflow'], tol=1e-2)
def test_overflow():
x = np.array([1.0,2.0,3.0,2.0e5])
y = np.arange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y)
ax.set_xlim(xmin=2,xmax=6)
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['clipping_diamond'])
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_xlim(xmin=-0.6, xmax=0.6)
ax.set_ylim(ymin=-0.6, ymax=0.6)
ax.set_xticks([])
ax.set_yticks([])
@cleanup
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=(5000,)) * 50
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
ax.set_xticks([])
ax.set_yticks([])
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
print len(simplified)
assert len(simplified) == 3884
@cleanup
def test_sine_plus_noise():
np.random.seed(0)
x = np.sin(np.linspace(0, np.pi * 2.0, 1000)) + np.random.uniform(size=(1000,)) * 0.01
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
ax.set_xticks([])
ax.set_yticks([])
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
print len(simplified)
assert len(simplified) == 876
@image_comparison(baseline_images=['simplify_curve'])
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (nan, 1), (0, 0), (2, 0), (2, 2), (0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(pp1)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'])
def test_hatch():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'])
def test_fft_peaks():
fig = plt.figure()
t = arange(65536)
ax = fig.add_subplot(111)
p1 = ax.plot(abs(fft(sin(2*pi*.01*t)*blackman(len(t)))))
ax.set_xticks([])
ax.set_yticks([])
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
print len(simplified)
assert len(simplified) == 20
@cleanup
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = """
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
verts = np.fromstring(data.decode('base64'), dtype='<i4')
verts = verts.reshape((len(verts) / 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform, clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
@cleanup
@raises(OverflowError)
def test_throw_rendering_complexity_exceeded():
rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xx, yy)
try:
fig.savefig(cStringIO.StringIO())
except e:
raise e
else:
rcParams['path.simplify'] = True
@image_comparison(baseline_images=['clipper_edge'])
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left = 0, bottom = 0, wspace = 0, hspace = 0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim = (0, 5), autoscale_on = False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'])
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| 32
| 122
| 0.673308
|
4a0244a22d6a301d1db6de1d2e15b3f1f36be1d4
| 1,341
|
py
|
Python
|
migrations/versions/27a721daab2e_renamed_email_to_login_in_user_table.py
|
pombredanne/vulncode-db
|
bffd1467df54d98e5271ec977330365d5879b60d
|
[
"Apache-2.0"
] | 592
|
2019-03-05T13:39:57.000Z
|
2022-03-31T14:52:58.000Z
|
migrations/versions/27a721daab2e_renamed_email_to_login_in_user_table.py
|
pombredanne/vulncode-db
|
bffd1467df54d98e5271ec977330365d5879b60d
|
[
"Apache-2.0"
] | 91
|
2019-04-05T20:45:26.000Z
|
2021-12-24T02:10:50.000Z
|
migrations/versions/27a721daab2e_renamed_email_to_login_in_user_table.py
|
pombredanne/vulncode-db
|
bffd1467df54d98e5271ec977330365d5879b60d
|
[
"Apache-2.0"
] | 84
|
2019-03-31T03:55:56.000Z
|
2022-01-03T13:33:44.000Z
|
"""renamed email to login in user table
Revision ID: 27a721daab2e
Revises: 9d370f33f1a0
Create Date: 2020-12-04 16:14:19.390278
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "27a721daab2e"
down_revision = "9d370f33f1a0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"user",
"email",
new_column_name="login",
existing_type=sa.String(256),
existing_nullable=False,
)
# op.drop_index("email", table_name="user")
# op.create_unique_constraint("user_login_uniq", "user", ["login"])
# op.drop_column("user", "email")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"user",
"login",
new_column_name="email",
existing_type=sa.String(256),
existing_nullable=False,
)
# op.add_column("user", sa.Column("email", mysql.VARCHAR(length=256), nullable=False))
# op.drop_constraint("user_login_uniq", "user", type_="unique")
# op.create_index("email", "user", ["email"], unique=True)
# op.drop_column("user", "login")
# ### end Alembic commands ###
| 27.9375
| 90
| 0.651752
|
4a0246373b96f3200d7456d87614e2d9c271b45b
| 583
|
py
|
Python
|
profiles_api/views.py
|
paulofs321/profiles-rest-api
|
24cf74ae97c56b8349048408876dfdddee417bc4
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
paulofs321/profiles-rest-api
|
24cf74ae97c56b8349048408876dfdddee417bc4
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
paulofs321/profiles-rest-api
|
24cf74ae97c56b8349048408876dfdddee417bc4
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
class HelloApiView(APIView):
"""Test API View"""
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
"Uses HTTP methods as function (get, post, patch, put, delete)",
"Is similar to a traditional Django View",
"Gives you the most control over your application logic",
"Is mapped manually to URLs",
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
| 32.388889
| 76
| 0.634648
|
4a0246be39d7f4902006e7ac51644303e8f90fba
| 13,298
|
py
|
Python
|
ml3d/tf/pipelines/semantic_segmentation.py
|
amirshal/Open3D-ML
|
5549444280ca02f3881cdf06f15c1ed06efb7389
|
[
"MIT"
] | null | null | null |
ml3d/tf/pipelines/semantic_segmentation.py
|
amirshal/Open3D-ML
|
5549444280ca02f3881cdf06f15c1ed06efb7389
|
[
"MIT"
] | null | null | null |
ml3d/tf/pipelines/semantic_segmentation.py
|
amirshal/Open3D-ML
|
5549444280ca02f3881cdf06f15c1ed06efb7389
|
[
"MIT"
] | null | null | null |
import numpy as np
import logging
import sys
import warnings
from tqdm import tqdm
from datetime import datetime
from os.path import exists, join, isfile, dirname, abspath
from pathlib import Path
import tensorflow as tf
from .base_pipeline import BasePipeline
from ..modules.losses import SemSegLoss
from ..modules.metrics import SemSegMetric
from ..dataloaders import TFDataloader
from ...utils import make_dir, LogRecord, PIPELINE, get_runid, code2md
logging.setLogRecordFactory(LogRecord)
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(asctime)s - %(module)s - %(message)s',
)
log = logging.getLogger(__name__)
class SemanticSegmentation(BasePipeline):
def __init__(
self,
model,
dataset=None,
name='SemanticSegmentation',
batch_size=4,
val_batch_size=4,
test_batch_size=3,
max_epoch=100, # maximum epoch during training
learning_rate=1e-2, # initial learning rate
lr_decays=0.95,
save_ckpt_freq=20,
adam_lr=1e-2,
scheduler_gamma=0.95,
momentum=0.98,
main_log_dir='./logs/',
device='gpu',
split='train',
train_sum_dir='train_log',
**kwargs):
super().__init__(model=model,
dataset=dataset,
name=name,
batch_size=batch_size,
val_batch_size=val_batch_size,
test_batch_size=test_batch_size,
max_epoch=max_epoch,
learning_rate=learning_rate,
lr_decays=lr_decays,
save_ckpt_freq=save_ckpt_freq,
adam_lr=adam_lr,
scheduler_gamma=scheduler_gamma,
momentum=momentum,
main_log_dir=main_log_dir,
device=device,
split=split,
train_sum_dir=train_sum_dir,
**kwargs)
def run_inference(self, data):
cfg = self.cfg
model = self.model
# model.eval()
log.info("running inference")
model.inference_begin(data)
while True:
inputs = model.inference_preprocess()
results = model(inputs, training=False)
if model.inference_end(results):
break
return model.inference_result
def run_test(self):
model = self.model
dataset = self.dataset
cfg = self.cfg
self.load_ckpt(model.cfg.ckpt_path)
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
log_file_path = join(cfg.logs_dir, 'log_test_' + timestamp + '.txt')
log.info("Logging in file : {}".format(log_file_path))
log.addHandler(logging.FileHandler(log_file_path))
log.info("Started testing")
Metric = SemSegMetric(self, model, dataset)
Loss = SemSegLoss(self, model, dataset)
accs = []
ious = []
test_split = dataset.get_split('test')
for idx in tqdm(range(len(test_split)), desc='test'):
attr = test_split.get_attr(idx)
data = test_split.get_data(idx)
results = self.run_inference(data)
scores, labels = Loss.filter_valid_label(results['predict_scores'],
data['label'])
acc = Metric.acc(scores.numpy(), labels.numpy())
iou = Metric.iou(scores.numpy(), labels.numpy())
accs.append(acc)
ious.append(iou)
dataset.save_test_result(results, attr)
accs = np.nanmean(np.array(accs), axis=0)
ious = np.nanmean(np.array(ious), axis=0)
log.info("Per class Accuracy : {}".format(accs[:-1]))
log.info("Per class IOUs : {}".format(ious[:-1]))
log.info("Overall Accuracy : {:.3f}".format(accs[-1]))
log.info("Overall IOU : {:.3f}".format(ious[-1]))
def run_train(self):
model = self.model
dataset = self.dataset
cfg = self.cfg
log.info(model)
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
log_file_path = join(cfg.logs_dir, 'log_train_' + timestamp + '.txt')
log.info("Logging in file : {}".format(log_file_path))
log.addHandler(logging.FileHandler(log_file_path))
Loss = SemSegLoss(self, model, dataset)
Metric = SemSegMetric(self, model, dataset)
train_split = TFDataloader(dataset=dataset.get_split('training'),
model=model,
use_cache=dataset.cfg.use_cache,
steps_per_epoch=dataset.cfg.get(
'steps_per_epoch_train', None))
train_loader, len_train = train_split.get_loader(cfg.batch_size)
valid_split = TFDataloader(dataset=dataset.get_split('validation'),
model=model,
use_cache=dataset.cfg.use_cache,
steps_per_epoch=dataset.cfg.get(
'steps_per_epoch_valid', None))
valid_loader, len_val = valid_split.get_loader(cfg.val_batch_size)
dataset_name = dataset.name if dataset is not None else ''
tensorboard_dir = join(
self.cfg.train_sum_dir,
model.__class__.__name__ + '_' + dataset_name + '_tf')
runid = get_runid(tensorboard_dir)
self.tensorboard_dir = join(self.cfg.train_sum_dir,
runid + '_' + Path(tensorboard_dir).name)
writer = tf.summary.create_file_writer(self.tensorboard_dir)
self.save_config(writer)
log.info("Writing summary in {}.".format(self.tensorboard_dir))
self.optimizer = model.get_optimizer(cfg)
is_resume = model.cfg.get('is_resume', True)
self.load_ckpt(model.cfg.ckpt_path, is_resume=is_resume)
for epoch in range(0, cfg.max_epoch + 1):
log.info("=== EPOCH {}/{} ===".format(epoch, cfg.max_epoch))
# --------------------- training
self.accs = []
self.ious = []
self.losses = []
step = 0
for idx, inputs in enumerate(
tqdm(train_loader, total=len_train, desc='training')):
with tf.GradientTape(persistent=True) as tape:
results = model(inputs, training=True)
loss, gt_labels, predict_scores = model.get_loss(
Loss, results, inputs)
if len(predict_scores.shape) < 2:
continue
if predict_scores.shape[0] == 0:
continue
# params for deformable convolutions.
scaled_params = []
params = []
for val in model.trainable_weights:
if 'deform' in val.name:
scaled_params.append(val)
else:
params.append(val)
grads = tape.gradient(loss, params)
scaled_grads = tape.gradient(loss, scaled_params)
for i in range(len(scaled_grads)):
scaled_grads[i] *= 0.1
norm = cfg.get('grad_clip_norm', 100.0)
grads = [tf.clip_by_norm(g, norm) for g in grads]
scaled_grads = [tf.clip_by_norm(g, norm) for g in scaled_grads]
self.optimizer.apply_gradients(zip(grads, params))
if len(scaled_grads) > 0:
self.optimizer.apply_gradients(
zip(scaled_grads, scaled_params))
acc = Metric.acc(predict_scores, gt_labels)
iou = Metric.iou(predict_scores, gt_labels)
self.losses.append(loss.numpy())
self.accs.append(acc)
self.ious.append(iou)
step = step + 1
# --------------------- validation
self.valid_accs = []
self.valid_ious = []
self.valid_losses = []
step = 0
for idx, inputs in enumerate(
tqdm(valid_loader, total=len_val, desc='validation')):
with tf.GradientTape() as tape:
results = model(inputs, training=False)
loss, gt_labels, predict_scores = model.get_loss(
Loss, results, inputs)
if len(predict_scores.shape) < 2:
continue
acc = Metric.acc(predict_scores, gt_labels)
iou = Metric.iou(predict_scores, gt_labels)
self.valid_losses.append(loss.numpy())
self.valid_accs.append(acc)
self.valid_ious.append(iou)
step = step + 1
self.save_logs(writer, epoch)
if epoch % cfg.save_ckpt_freq == 0:
self.save_ckpt(epoch)
def save_logs(self, writer, epoch):
with warnings.catch_warnings(): # ignore Mean of empty slice.
warnings.simplefilter('ignore', category=RuntimeWarning)
accs = np.nanmean(np.array(self.accs), axis=0)
ious = np.nanmean(np.array(self.ious), axis=0)
valid_accs = np.nanmean(np.array(self.valid_accs), axis=0)
valid_ious = np.nanmean(np.array(self.valid_ious), axis=0)
loss_dict = {
'Training loss': np.mean(self.losses),
'Validation loss': np.mean(self.valid_losses)
}
acc_dicts = [{
'Training accuracy': acc,
'Validation accuracy': val_acc
} for acc, val_acc in zip(accs, valid_accs)]
iou_dicts = [{
'Training IoU': iou,
'Validation IoU': val_iou
} for iou, val_iou in zip(ious, valid_ious)]
log.info(f"loss train: {loss_dict['Training loss']:.3f} "
f" eval: {loss_dict['Validation loss']:.3f}")
log.info(f"acc train: {acc_dicts[-1]['Training accuracy']:.3f} "
f" eval: {acc_dicts[-1]['Validation accuracy']:.3f}")
log.info(f"iou train: {iou_dicts[-1]['Training IoU']:.3f} "
f" eval: {iou_dicts[-1]['Validation IoU']:.3f}")
# send results to tensorboard
with writer.as_default():
for key, val in loss_dict.items():
tf.summary.scalar(key, val, epoch)
for key, val in acc_dicts[-1].items():
tf.summary.scalar("{}/ Overall".format(key), val, epoch)
for key, val in iou_dicts[-1].items():
tf.summary.scalar("{}/ Overall".format(key), val, epoch)
# print(acc_dicts[-1])
def load_ckpt(self, ckpt_path=None, is_resume=True):
train_ckpt_dir = join(self.cfg.logs_dir, 'checkpoint')
make_dir(train_ckpt_dir)
if hasattr(self, 'optimizer'):
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=self.optimizer,
net=self.model)
else:
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1), net=self.model)
self.manager = tf.train.CheckpointManager(self.ckpt,
train_ckpt_dir,
max_to_keep=100)
if ckpt_path is not None:
self.ckpt.restore(ckpt_path).expect_partial()
log.info("Restored from {}".format(ckpt_path))
else:
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint and is_resume:
log.info("Restored from {}".format(
self.manager.latest_checkpoint))
else:
log.info("Initializing from scratch.")
def save_ckpt(self, epoch):
save_path = self.manager.save()
log.info("Saved checkpoint at: {}".format(save_path))
def save_config(self, writer):
'''
Save experiment configuration with tensorboard summary
'''
with writer.as_default():
with tf.name_scope("Description"):
tf.summary.text("Open3D-ML", self.cfg_tb['readme'], step=0)
tf.summary.text("Command line", self.cfg_tb['cmd_line'], step=0)
with tf.name_scope("Configuration"):
tf.summary.text('Dataset',
code2md(self.cfg_tb['dataset'],
language='json'),
step=0)
tf.summary.text('Model',
code2md(self.cfg_tb['model'], language='json'),
step=0)
tf.summary.text('Pipeline',
code2md(self.cfg_tb['pipeline'],
language='json'),
step=0)
PIPELINE._register_module(SemanticSegmentation, "tf")
| 37.994286
| 80
| 0.530005
|
4a0246f73d7744cc380bcb74720e378a56a1d9d6
| 3,794
|
py
|
Python
|
scripts/gv_record.py
|
DavidSlayback/gym-gridverse
|
b8916576a92489c030fb2c2060598c7f473f19f2
|
[
"MIT"
] | 6
|
2020-10-15T14:45:11.000Z
|
2021-02-20T12:11:32.000Z
|
scripts/gv_record.py
|
DavidSlayback/gym-gridverse
|
b8916576a92489c030fb2c2060598c7f473f19f2
|
[
"MIT"
] | 5
|
2021-01-20T15:34:29.000Z
|
2022-03-18T18:52:53.000Z
|
scripts/gv_record.py
|
DavidSlayback/gym-gridverse
|
b8916576a92489c030fb2c2060598c7f473f19f2
|
[
"MIT"
] | 2
|
2021-01-11T18:48:49.000Z
|
2021-01-26T15:45:38.000Z
|
#!/usr/bin/env python
from __future__ import annotations
import argparse
import itertools as itt
import sys
from typing import Tuple
import imageio
import numpy.random as rnd
from gym_gridverse.envs.inner_env import InnerEnv
from gym_gridverse.envs.yaml.factory import factory_env_from_yaml
from gym_gridverse.observation import Observation
from gym_gridverse.recording import Data, DataBuilder, generate_images, record
from gym_gridverse.state import State
def main():
args = get_args()
env = factory_env_from_yaml(args.yaml)
env.set_seed(args.seed)
rnd.seed(args.seed)
state_data, observation_data = make_data(env, args.discount)
if args.state:
images = list(generate_images(state_data))
filename = args.state
filenames = map(args.state.format, itt.count())
record(
args.mode,
images,
filename=filename,
filenames=filenames,
loop=args.gif_loop,
duration=args.gif_duration,
fps=args.gif_fps,
)
if args.observation:
images = list(generate_images(observation_data))
filename = args.observation
filenames = map(args.observation.format, itt.count())
record(
args.mode,
images,
filename=filename,
filenames=filenames,
loop=args.gif_loop,
duration=args.gif_duration,
fps=args.gif_fps,
)
def make_data(
env: InnerEnv, discount: float
) -> Tuple[Data[State], Data[Observation]]:
state_data_builder: DataBuilder[State] = DataBuilder(discount)
observation_data_builder: DataBuilder[Observation] = DataBuilder(discount)
env.reset()
state_data_builder.append0(env.state)
observation_data_builder.append0(env.observation)
done = False
while not done:
action = rnd.choice(env.action_space.actions)
reward, done = env.step(action)
state_data_builder.append(env.state, action, reward)
observation_data_builder.append(env.observation, action, reward)
return state_data_builder.build(), observation_data_builder.build()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=['images', 'gif', 'mp4'])
parser.add_argument('yaml', help='env YAML file')
parser.add_argument('--seed', type=int, default=None, help='env seed')
parser.add_argument(
'--gif-loop', type=int, default=0, help='gif loop count'
)
parser.add_argument(
'--gif-duration', type=float, default=None, help='gif duration'
)
parser.add_argument('--gif-fps', type=float, default=2.0, help='gif fps')
parser.add_argument(
'--discount', type=float, default=1.0, help='discount factor'
)
parser.add_argument(
'--max-steps', type=int, default=100, help='maximum number of steps'
)
parser.add_argument('--state', default=None, help='state filename')
parser.add_argument(
'--observation', default=None, help='observation filename'
)
imageio_help_sentinel = object() # used to detect no argument given
parser.add_argument(
'--imageio-help',
nargs='?',
const=imageio_help_sentinel,
help='run imageio.help(name) and exit',
)
args = parser.parse_args()
if args.imageio_help is not None:
name = (
args.imageio_help
if args.imageio_help is not imageio_help_sentinel
else None
)
imageio.help(name)
sys.exit(0)
if args.state is None and args.observation is None:
raise ValueError(
'you must give at least --state or --observation (or both)'
)
return args
if __name__ == '__main__':
main()
| 27.492754
| 78
| 0.651292
|
4a024702fdea77636ffb295948dc88c5fa1aab32
| 27
|
py
|
Python
|
doctorbot/fb_doctor_chatbot/__init__.py
|
zuxfoucault/DoctorBot_demo
|
82e24078da4d2e6caba728b959812401109e014d
|
[
"MIT"
] | 1
|
2020-09-24T07:26:14.000Z
|
2020-09-24T07:26:14.000Z
|
doctorbot/fb_doctor_chatbot/__init__.py
|
lintzuhsiang/Doctorbot
|
6be98bbf380d14bb789d30a137ded3b51b3f31fd
|
[
"MIT"
] | null | null | null |
doctorbot/fb_doctor_chatbot/__init__.py
|
lintzuhsiang/Doctorbot
|
6be98bbf380d14bb789d30a137ded3b51b3f31fd
|
[
"MIT"
] | null | null | null |
# from .views import Doctor
| 27
| 27
| 0.777778
|
4a0247040ed819a7251a7fa9adc3b61848d3e704
| 9,531
|
py
|
Python
|
sdk/python/pulumi_azure_native/kusto/v20200614/get_event_grid_data_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/kusto/v20200614/get_event_grid_data_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/kusto/v20200614/get_event_grid_data_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetEventGridDataConnectionResult',
'AwaitableGetEventGridDataConnectionResult',
'get_event_grid_data_connection',
]
@pulumi.output_type
class GetEventGridDataConnectionResult:
"""
Class representing an Event Grid data connection.
"""
def __init__(__self__, blob_storage_event_type=None, consumer_group=None, data_format=None, event_hub_resource_id=None, id=None, ignore_first_record=None, kind=None, location=None, mapping_rule_name=None, name=None, storage_account_resource_id=None, table_name=None, type=None):
if blob_storage_event_type and not isinstance(blob_storage_event_type, str):
raise TypeError("Expected argument 'blob_storage_event_type' to be a str")
pulumi.set(__self__, "blob_storage_event_type", blob_storage_event_type)
if consumer_group and not isinstance(consumer_group, str):
raise TypeError("Expected argument 'consumer_group' to be a str")
pulumi.set(__self__, "consumer_group", consumer_group)
if data_format and not isinstance(data_format, str):
raise TypeError("Expected argument 'data_format' to be a str")
pulumi.set(__self__, "data_format", data_format)
if event_hub_resource_id and not isinstance(event_hub_resource_id, str):
raise TypeError("Expected argument 'event_hub_resource_id' to be a str")
pulumi.set(__self__, "event_hub_resource_id", event_hub_resource_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ignore_first_record and not isinstance(ignore_first_record, bool):
raise TypeError("Expected argument 'ignore_first_record' to be a bool")
pulumi.set(__self__, "ignore_first_record", ignore_first_record)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if mapping_rule_name and not isinstance(mapping_rule_name, str):
raise TypeError("Expected argument 'mapping_rule_name' to be a str")
pulumi.set(__self__, "mapping_rule_name", mapping_rule_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_resource_id and not isinstance(storage_account_resource_id, str):
raise TypeError("Expected argument 'storage_account_resource_id' to be a str")
pulumi.set(__self__, "storage_account_resource_id", storage_account_resource_id)
if table_name and not isinstance(table_name, str):
raise TypeError("Expected argument 'table_name' to be a str")
pulumi.set(__self__, "table_name", table_name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="blobStorageEventType")
def blob_storage_event_type(self) -> Optional[str]:
"""
The name of blob storage event type to process.
"""
return pulumi.get(self, "blob_storage_event_type")
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> str:
"""
The event hub consumer group.
"""
return pulumi.get(self, "consumer_group")
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> Optional[str]:
"""
The data format of the message. Optionally the data format can be added to each message.
"""
return pulumi.get(self, "data_format")
@property
@pulumi.getter(name="eventHubResourceId")
def event_hub_resource_id(self) -> str:
"""
The resource ID where the event grid is configured to send events.
"""
return pulumi.get(self, "event_hub_resource_id")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ignoreFirstRecord")
def ignore_first_record(self) -> Optional[bool]:
"""
A Boolean value that, if set to true, indicates that ingestion should ignore the first record of every file
"""
return pulumi.get(self, "ignore_first_record")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of the endpoint for the data connection
Expected value is 'EventGrid'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mappingRuleName")
def mapping_rule_name(self) -> Optional[str]:
"""
The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
"""
return pulumi.get(self, "mapping_rule_name")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> str:
"""
The resource ID of the storage account where the data resides.
"""
return pulumi.get(self, "storage_account_resource_id")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> Optional[str]:
"""
The table where the data should be ingested. Optionally the table information can be added to each message.
"""
return pulumi.get(self, "table_name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetEventGridDataConnectionResult(GetEventGridDataConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventGridDataConnectionResult(
blob_storage_event_type=self.blob_storage_event_type,
consumer_group=self.consumer_group,
data_format=self.data_format,
event_hub_resource_id=self.event_hub_resource_id,
id=self.id,
ignore_first_record=self.ignore_first_record,
kind=self.kind,
location=self.location,
mapping_rule_name=self.mapping_rule_name,
name=self.name,
storage_account_resource_id=self.storage_account_resource_id,
table_name=self.table_name,
type=self.type)
def get_event_grid_data_connection(cluster_name: Optional[str] = None,
data_connection_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventGridDataConnectionResult:
"""
Class representing an Event Grid data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['dataConnectionName'] = data_connection_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kusto/v20200614:getEventGridDataConnection', __args__, opts=opts, typ=GetEventGridDataConnectionResult).value
return AwaitableGetEventGridDataConnectionResult(
blob_storage_event_type=__ret__.blob_storage_event_type,
consumer_group=__ret__.consumer_group,
data_format=__ret__.data_format,
event_hub_resource_id=__ret__.event_hub_resource_id,
id=__ret__.id,
ignore_first_record=__ret__.ignore_first_record,
kind=__ret__.kind,
location=__ret__.location,
mapping_rule_name=__ret__.mapping_rule_name,
name=__ret__.name,
storage_account_resource_id=__ret__.storage_account_resource_id,
table_name=__ret__.table_name,
type=__ret__.type)
| 41.620087
| 282
| 0.673382
|
4a024790c2c28f3a23745cd643c6e7acd943483c
| 459
|
py
|
Python
|
test/test_kmer.py
|
vladsaveliev/sequana
|
f6ee7fa7fb47ec179ceedf24684ba861a244656d
|
[
"BSD-3-Clause"
] | 138
|
2016-07-13T06:24:45.000Z
|
2022-03-28T13:12:03.000Z
|
test/test_kmer.py
|
vladsaveliev/sequana
|
f6ee7fa7fb47ec179ceedf24684ba861a244656d
|
[
"BSD-3-Clause"
] | 655
|
2016-03-10T17:33:40.000Z
|
2022-03-30T16:10:45.000Z
|
test/test_kmer.py
|
vladsaveliev/sequana
|
f6ee7fa7fb47ec179ceedf24684ba861a244656d
|
[
"BSD-3-Clause"
] | 39
|
2016-11-04T11:40:58.000Z
|
2022-03-15T08:12:29.000Z
|
from sequana.kmer import build_kmer, get_kmer
def test_build_kmer():
res = build_kmer(length=3, letters='GC')
assert len(res) == 8
assert 'CCG' in res
assert 'CCC' in res
assert 'CGC' in res
assert 'CGG' in res
assert 'GCC' in res
assert 'GCG' in res
assert 'GGC' in res
assert 'GGG' in res
def test_get_kmer():
res = list(get_kmer('ACGTAAAA', k=4))
assert res == ['ACGT', 'CGTA', 'GTAA', 'TAAA', 'AAAA']
| 20.863636
| 58
| 0.6122
|
4a024931a0297faac188611bfb81ef4c09bd8552
| 1,499
|
py
|
Python
|
alipay/aop/api/domain/AlipayMarketingToolFengdieEditorQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayMarketingToolFengdieEditorQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayMarketingToolFengdieEditorQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingToolFengdieEditorQueryModel(object):
def __init__(self):
self._activity_id = None
self._redirect_url = None
@property
def activity_id(self):
return self._activity_id
@activity_id.setter
def activity_id(self, value):
self._activity_id = value
@property
def redirect_url(self):
return self._redirect_url
@redirect_url.setter
def redirect_url(self, value):
self._redirect_url = value
def to_alipay_dict(self):
params = dict()
if self.activity_id:
if hasattr(self.activity_id, 'to_alipay_dict'):
params['activity_id'] = self.activity_id.to_alipay_dict()
else:
params['activity_id'] = self.activity_id
if self.redirect_url:
if hasattr(self.redirect_url, 'to_alipay_dict'):
params['redirect_url'] = self.redirect_url.to_alipay_dict()
else:
params['redirect_url'] = self.redirect_url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingToolFengdieEditorQueryModel()
if 'activity_id' in d:
o.activity_id = d['activity_id']
if 'redirect_url' in d:
o.redirect_url = d['redirect_url']
return o
| 26.767857
| 75
| 0.618412
|
4a024a5672ab0077d07c276976d32a9abd16c90f
| 244
|
py
|
Python
|
data_acquisition/cf_app_utils/auth/__init__.py
|
butla/PyDAS
|
39df5abbe9563b58da7caaa191b89852fb122ab7
|
[
"MIT"
] | 13
|
2016-06-29T13:35:05.000Z
|
2021-05-25T09:47:31.000Z
|
data_acquisition/cf_app_utils/auth/__init__.py
|
butla/PyDAS
|
39df5abbe9563b58da7caaa191b89852fb122ab7
|
[
"MIT"
] | 1
|
2016-07-11T23:11:33.000Z
|
2016-07-11T23:11:33.000Z
|
data_acquisition/cf_app_utils/auth/__init__.py
|
butla/PyDAS
|
39df5abbe9563b58da7caaa191b89852fb122ab7
|
[
"MIT"
] | 3
|
2017-10-17T15:54:25.000Z
|
2022-03-24T01:11:37.000Z
|
"""
Authorization utilities for microservices using JWT tokens.
"""
from .org_check import (UserOrgAccessChecker, NoOrgAccessError, PermissionServiceError,
USER_MANAGEMENT_PATH)
from .utils import get_uaa_key, UaaError
| 30.5
| 87
| 0.75
|
4a024b554d5ace777070ea8d76918b9fb2005d88
| 17,721
|
py
|
Python
|
test/python/ibmq/test_ibmqjob_states.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
test/python/ibmq/test_ibmqjob_states.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
test/python/ibmq/test_ibmqjob_states.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring,broad-except
"""IBMQJob states test-suite."""
import unittest
import time
from contextlib import suppress
from qiskit.providers.jobstatus import JobStatus
from qiskit.providers.ibmq.ibmqjob import IBMQJobPreQobj, IBMQJob, API_FINAL_STATES
from qiskit.providers.ibmq.api import ApiError
from qiskit.providers import JobError, JobTimeoutError
from ..common import JobTestCase
from .._mockutils import new_fake_qobj, FakeBackend
class TestIBMQJobStates(JobTestCase):
"""
Test ibmqjob module.
"""
def setUp(self):
self._current_api = None
self._current_qjob = None
def test_unrecognized_status(self):
job = self.run_with_api(UnknownStatusAPI())
with self.assertRaises(JobError):
self.wait_for_initialization(job)
def test_validating_job(self):
job = self.run_with_api(ValidatingAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.VALIDATING)
def test_error_while_creating_job(self):
job = self.run_with_api(ErrorWhileCreatingAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.ERROR)
def test_error_while_validating_job(self):
job = self.run_with_api(ErrorWhileValidatingAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.VALIDATING)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.ERROR)
def test_status_flow_for_non_queued_job(self):
job = self.run_with_api(NonQueuedAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.RUNNING)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.DONE)
def test_status_flow_for_queued_job(self):
job = self.run_with_api(QueuedAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.QUEUED)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.RUNNING)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.DONE)
def test_status_flow_for_cancellable_job(self):
job = self.run_with_api(CancellableAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.RUNNING)
can_cancel = job.cancel()
self.assertTrue(can_cancel)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.CANCELLED)
def test_status_flow_for_non_cancellable_job(self):
job = self.run_with_api(NonCancellableAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.RUNNING)
can_cancel = job.cancel()
self.assertFalse(can_cancel)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.RUNNING)
def test_status_flow_for_errored_cancellation(self):
job = self.run_with_api(ErroredCancellationAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.RUNNING)
can_cancel = job.cancel()
self.assertFalse(can_cancel)
self.assertEqual(job.status(), JobStatus.RUNNING)
def test_status_flow_for_unable_to_run_valid_qobj(self):
"""Contrary to other tests, this one is expected to fail even for a
non-job-related issue. If the API fails while sending a job, we don't
get an id so we can not query for the job status."""
job = self.run_with_api(UnavailableRunAPI())
with self.assertRaises(JobError):
self.wait_for_initialization(job)
with self.assertRaises(JobError):
job.status()
def test_api_throws_temporarily_but_job_is_finished(self):
job = self.run_with_api(ThrowingNonJobRelatedErrorAPI(errors_before_success=2))
# First time we query the server...
with self.assertRaises(JobError):
# The error happens inside wait_for_initialization, the first time
# it calls to status() after INITIALIZING.
self.wait_for_initialization(job)
# Also an explicit second time...
with self.assertRaises(JobError):
job.status()
# Now the API gets fixed and doesn't throw anymore.
self.assertEqual(job.status(), JobStatus.DONE)
def test_status_flow_for_unable_to_run_invalid_qobj(self):
job = self.run_with_api(RejectingJobAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.ERROR)
def test_error_while_running_job(self):
job = self.run_with_api(ErrorWhileRunningAPI())
self.wait_for_initialization(job)
self.assertEqual(job.status(), JobStatus.RUNNING)
self._current_api.progress()
self.assertEqual(job.status(), JobStatus.ERROR)
self.assertEqual(job.error_message(), 'Error running job')
def test_cancelled_result(self):
job = self.run_with_api(CancellableAPI())
self.wait_for_initialization(job)
job.cancel()
self._current_api.progress()
with self.assertRaises(JobError):
_ = job.result()
self.assertEqual(job.status(), JobStatus.CANCELLED)
def test_errored_result(self):
job = self.run_with_api(ThrowingGetJobAPI())
self.wait_for_initialization(job)
with self.assertRaises(JobError):
job.result()
def test_completed_result(self):
job = self.run_with_api(NonQueuedAPI())
self.wait_for_initialization(job)
self._current_api.progress()
self.assertEqual(job.result().success, True)
self.assertEqual(job.status(), JobStatus.DONE)
def test_block_on_result_waiting_until_completed(self):
from concurrent import futures
job = self.run_with_api(NonQueuedAPI())
with futures.ThreadPoolExecutor() as executor:
executor.submit(_auto_progress_api, self._current_api)
result = job.result()
self.assertEqual(result.success, True)
self.assertEqual(job.status(), JobStatus.DONE)
def test_block_on_result_waiting_until_cancelled(self):
from concurrent.futures import ThreadPoolExecutor
job = self.run_with_api(CancellableAPI())
with ThreadPoolExecutor() as executor:
executor.submit(_auto_progress_api, self._current_api)
with self.assertRaises(JobError):
job.result()
self.assertEqual(job.status(), JobStatus.CANCELLED)
def test_block_on_result_waiting_until_exception(self):
from concurrent.futures import ThreadPoolExecutor
job = self.run_with_api(ThrowingAPI())
with ThreadPoolExecutor() as executor:
executor.submit(_auto_progress_api, self._current_api)
with self.assertRaises(JobError):
job.result()
def test_never_complete_result_with_timeout(self):
job = self.run_with_api(NonQueuedAPI())
self.wait_for_initialization(job)
with self.assertRaises(JobTimeoutError):
job.result(timeout=0.2)
def test_cancel_while_initializing_fails(self):
job = self.run_with_api(CancellableAPI())
can_cancel = job.cancel()
self.assertFalse(can_cancel)
self.assertEqual(job.status(), JobStatus.INITIALIZING)
def test_only_final_states_cause_datailed_request(self):
from unittest import mock
# The state ERROR_CREATING_JOB is only handled when running the job,
# and not while checking the status, so it is not tested.
all_state_apis = {'COMPLETED': NonQueuedAPI,
'CANCELLED': CancellableAPI,
'ERROR_VALIDATING_JOB': ErrorWhileValidatingAPI,
'ERROR_RUNNING_JOB': ErrorWhileRunningAPI}
for status, api in all_state_apis.items():
with self.subTest(status=status):
job = self.run_with_api(api())
self.wait_for_initialization(job)
with suppress(BaseFakeAPI.NoMoreStatesError):
self._current_api.progress()
with mock.patch.object(self._current_api, 'get_job',
wraps=self._current_api.get_job):
job.status()
if status in API_FINAL_STATES:
self.assertTrue(self._current_api.get_job.called)
else:
self.assertFalse(self._current_api.get_job.called)
# TODO: Once qobj results come by default from all the simulator
# backends, move to integration tests in test_result.py
def test_qobj_result(self):
job = self.run_with_api(QObjResultAPI(), job_class=IBMQJob)
self.wait_for_initialization(job)
self._current_api.progress()
result = job.result()
self.assertEqual(result.success, True)
self.assertEqual(result.get_counts('Bell state'),
{'00': 480, '11': 490, '01': 20, '10': 34})
self.assertEqual(result.get_counts('Bell state XY'),
{'00': 29, '11': 15, '01': 510, '10': 480})
self.assertEqual(len(result.results), 2)
def run_with_api(self, api, job_class=IBMQJobPreQobj):
"""Creates a new ``IBMQJobPreQobj`` instance running with the provided API
object.
"""
backend = FakeBackend()
self._current_api = api
self._current_qjob = job_class(backend, None, api, False, qobj=new_fake_qobj())
self._current_qjob.submit()
return self._current_qjob
def _auto_progress_api(api, interval=0.2):
"""Progress a `BaseFakeAPI` instacn every `interval` seconds until reaching
the final state.
"""
with suppress(BaseFakeAPI.NoMoreStatesError):
while True:
time.sleep(interval)
api.progress()
class BaseFakeAPI():
"""Base class for faking the IBM-Q API."""
class NoMoreStatesError(Exception):
"""Raised when it is not possible to progress more."""
_job_status = []
_can_cancel = False
def __init__(self):
self._state = 0
self.config = {'hub': None, 'group': None, 'project': None}
if self._can_cancel:
self.config.update({
'hub': 'test-hub',
'group': 'test-group',
'project': 'test-project'
})
def get_job(self, job_id):
if not job_id:
return {'status': 'Error', 'error': 'Job ID not specified'}
return self._job_status[self._state]
def get_status_job(self, job_id):
summary_fields = ['status', 'error', 'infoQueue']
complete_response = self.get_job(job_id)
return {key: value for key, value in complete_response.items()
if key in summary_fields}
def run_job(self, *_args, **_kwargs):
time.sleep(0.2)
return {'id': 'TEST_ID'}
def cancel_job(self, job_id, *_args, **_kwargs):
if not job_id:
return {'status': 'Error', 'error': 'Job ID not specified'}
return {} if self._can_cancel else {
'error': 'testing fake API can not cancel'}
def progress(self):
if self._state == len(self._job_status) - 1:
raise self.NoMoreStatesError()
self._state += 1
class UnknownStatusAPI(BaseFakeAPI):
"""Class for emulating an API with unknown status codes."""
_job_status = [
{'status': 'UNKNOWN'}
]
class ValidatingAPI(BaseFakeAPI):
"""Class for emulating an API with job validation."""
_job_status = [
{'status': 'VALIDATING'},
{'status': 'RUNNING'}
]
class ErrorWhileValidatingAPI(BaseFakeAPI):
"""Class for emulating an API processing an invalid job."""
_job_status = [
{'status': 'VALIDATING'},
{'status': 'ERROR_VALIDATING_JOB'}
]
class NonQueuedAPI(BaseFakeAPI):
"""Class for emulating a successfully-completed non-queued API."""
_job_status = [
{'status': 'RUNNING'},
{'status': 'COMPLETED', 'qasms': []}
]
class ErrorWhileCreatingAPI(BaseFakeAPI):
"""Class emulating an API processing a job that errors while creating
the job.
"""
_job_status = [
{'status': 'ERROR_CREATING_JOB'}
]
class ErrorWhileRunningAPI(BaseFakeAPI):
"""Class emulating an API processing a job that errors while running."""
_job_status = [
{'status': 'RUNNING'},
{'status': 'ERROR_RUNNING_JOB', 'error': 'Error running job'}
]
class QueuedAPI(BaseFakeAPI):
"""Class for emulating a successfully-completed queued API."""
_job_status = [
{'status': 'RUNNING', 'infoQueue': {'status': 'PENDING_IN_QUEUE'}},
{'status': 'RUNNING'},
{'status': 'COMPLETED'}
]
class RejectingJobAPI(BaseFakeAPI):
"""Class for emulating an API unable of initializing."""
def run_job(self, *_args, **_kwargs):
return {'error': 'invalid qobj'}
class UnavailableRunAPI(BaseFakeAPI):
"""Class for emulating an API throwing before even initializing."""
def run_job(self, *_args, **_kwargs):
time.sleep(0.2)
raise ApiError()
class ThrowingAPI(BaseFakeAPI):
"""Class for emulating an API throwing in the middle of execution."""
_job_status = [
{'status': 'RUNNING'}
]
def get_job(self, job_id):
raise ApiError()
class ThrowingNonJobRelatedErrorAPI(BaseFakeAPI):
"""Class for emulating an scenario where the job is done but the API
fails some times for non job-related errors.
"""
_job_status = [
{'status': 'COMPLETED'}
]
def __init__(self, errors_before_success=2):
super().__init__()
self._number_of_exceptions_to_throw = errors_before_success
def get_job(self, job_id):
if self._number_of_exceptions_to_throw != 0:
self._number_of_exceptions_to_throw -= 1
raise ApiError()
return super().get_job(job_id)
class ThrowingGetJobAPI(BaseFakeAPI):
"""Class for emulating an API throwing in the middle of execution. But not in
get_status_job() , just in get_job().
"""
_job_status = [
{'status': 'COMPLETED'}
]
def get_status_job(self, job_id):
return self._job_status[self._state]
def get_job(self, job_id):
raise ApiError('Unexpected error')
class CancellableAPI(BaseFakeAPI):
"""Class for emulating an API with cancellation."""
_job_status = [
{'status': 'RUNNING'},
{'status': 'CANCELLED'}
]
_can_cancel = True
class NonCancellableAPI(BaseFakeAPI):
"""Class for emulating an API without cancellation running a long job."""
_job_status = [
{'status': 'RUNNING'},
{'status': 'RUNNING'},
{'status': 'RUNNING'}
]
class ErroredCancellationAPI(BaseFakeAPI):
"""Class for emulating an API with cancellation but throwing while
trying.
"""
_job_status = [
{'status': 'RUNNING'},
{'status': 'RUNNING'},
{'status': 'RUNNING'}
]
_can_cancel = True
def cancel_job(self, job_id, *_args, **_kwargs):
return {'status': 'Error', 'error': 'test-error-while-cancelling'}
# TODO: Remove once qobj results come by default from all the simulator
# backends.
class QObjResultAPI(BaseFakeAPI):
"""Class for emulating a successfully-completed non-queued API."""
_job_status = [
{'status': 'RUNNING'},
{
'status': 'COMPLETED',
'qObjectResult': {
'backend_name': 'ibmqx2',
'backend_version': '1.1.1',
'job_id': 'XC1323XG2',
'qobj_id': 'Experiment1',
'success': True,
'status': 'COMPLETED',
'results': [
{
'header': {
'name': 'Bell state',
'memory_slots': 2,
'creg_sizes': [['c', 2]],
'clbit_labels': [['c', 0], ['c', 1]],
'qubit_labels': [['q', 0], ['q', 1]]
},
'shots': 1024,
'status': 'DONE',
'success': True,
'data': {
'counts': {
'0x0': 480, '0x3': 490, '0x1': 20, '0x2': 34
}
}
},
{
'header': {
'name': 'Bell state XY',
'memory_slots': 2,
'creg_sizes': [['c', 2]],
'clbit_labels': [['c', 0], ['c', 1]],
'qubit_labels': [['q', 0], ['q', 1]]
},
'shots': 1024,
'status': 'DONE',
'success': True,
'data': {
'counts': {
'0x0': 29, '0x3': 15, '0x1': 510, '0x2': 480
}
}
}
]
}
}
]
if __name__ == '__main__':
unittest.main(verbosity=2)
| 31.758065
| 87
| 0.605045
|
4a024bbbc4c50f790f57c3fecebda41a1eeb1ec6
| 3,312
|
py
|
Python
|
OAuthBrowser/wait.py
|
Saadmairaj/OAuthBrowser
|
4de8ae83e8ad43f9c6333798d4046a0461a413fe
|
[
"MIT"
] | 2
|
2020-07-01T15:53:16.000Z
|
2021-07-11T10:25:28.000Z
|
OAuthBrowser/wait.py
|
Saadmairaj/OAuthBrowser
|
4de8ae83e8ad43f9c6333798d4046a0461a413fe
|
[
"MIT"
] | null | null | null |
OAuthBrowser/wait.py
|
Saadmairaj/OAuthBrowser
|
4de8ae83e8ad43f9c6333798d4046a0461a413fe
|
[
"MIT"
] | null | null | null |
import time
import subprocess
from OAuthBrowser.util import threaded
from urllib.parse import urlparse, parse_qs
class Wait:
"""
Wait class for web brower.
"""
def __init__(self, browser, pause_time=0.7):
"""Waits until an event occurs.
Arguments:
browser {[type]} -- give the browser class instance.
Keyword Arguments:
pause_time {float} -- time interval to check for event. (default: {0.7})
"""
super().__init__()
self.browser = browser
self.pause_time = pause_time
@threaded
def until_url_netloc_changes(self):
"Waits till netloc stays same."
url = urlparse(self.browser.get_current_url())
while url.netloc == urlparse(self.browser.get_current_url()).netloc:
time.sleep(self.pause_time)
@threaded
def until_url_changes_times(self, count):
"Waits till given number of times the url changes."
url = urlparse(self.browser.get_current_url())
while count > 0:
if url != urlparse(self.browser.get_current_url()):
url = urlparse(self.browser.get_current_url())
count -= 1
time.sleep(self.pause_time)
@threaded
def until_url_match(self, url):
"Waits till given url matches with the current url."
current_url = urlparse(self.browser.get_current_url())
while urlparse(url) != current_url:
time.sleep(self.pause_time)
@threaded
def until_timeout(self, seconds):
"Waits for given seconds."
time.sleep(seconds)
@threaded
def until_closed(self):
"Waits until the window is closed."
cmd = """on is_running(appName)
tell application "System Events" to (name of processes) contains appName
end is_running
set isRunning to is_running("%s")""" % (self.browser.browser)
while True:
p = subprocess.Popen(["osascript", "-"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True, )
stdout, stderr = p.communicate(cmd)
if "false" in stdout:
break
time.sleep(self.pause_time)
return stdout, stderr, p.returncode
@threaded
def until_inactive_timeout(self, seconds):
"Waits on each url for given seconds till timer is 0. \
If the url changes before timer goes 0 then timer will reset."
timer = seconds
current_url = urlparse(self.browser.get_current_url())
while timer > 0:
if current_url != urlparse(self.browser.get_current_url()):
current_url = urlparse(self.browser.get_current_url())
timer = seconds
else:
timer -= 1
time.sleep(1)
@threaded
def until_present_query(self, item):
"Waits till activation code page comes."
while True:
time.sleep(self.pause_time)
cur_url = urlparse(self.browser.get_current_url())
query = parse_qs(cur_url.query)
if item in query.keys():
break
| 34.5
| 92
| 0.581824
|
4a024e49c4223cddffa4da5dd14580d206619204
| 654
|
py
|
Python
|
exam_preparation/Lists.py
|
bozhikovstanislav/Python-Fundamentals
|
072fd2c8bc962d20d4c526947349fdeae0bc94a5
|
[
"MIT"
] | null | null | null |
exam_preparation/Lists.py
|
bozhikovstanislav/Python-Fundamentals
|
072fd2c8bc962d20d4c526947349fdeae0bc94a5
|
[
"MIT"
] | null | null | null |
exam_preparation/Lists.py
|
bozhikovstanislav/Python-Fundamentals
|
072fd2c8bc962d20d4c526947349fdeae0bc94a5
|
[
"MIT"
] | null | null | null |
def inc_2(el):
if el % 2 == 0:
return el + 2
return el
def inc_3(el):
if not el % 2 == 0:
return el + 3
return el
data = input()
while not data == 'stop playing':
nums_list = list(map(int, data.split()))
if len(nums_list) == len(set(nums_list)):
nums_list = sorted(list(map(inc_2, nums_list)))
print(f'Unique list: {",".join(list(map(str, nums_list)))}')
else:
nums_list = sorted(list(map(inc_3, nums_list)))
print(f'Non-unique list: {":".join(list(map(str, nums_list)))}')
result = sum(nums_list) / len(nums_list)
print(f'Output: {result:.2f}')
data = input()
| 25.153846
| 72
| 0.571865
|
4a024ebbe619af9dafb3caad3f7cdf3399de5839
| 952
|
py
|
Python
|
lib/galaxy/model/migrate/versions/0151_add_worker_process.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/migrate/versions/0151_add_worker_process.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | 2
|
2017-05-18T16:12:55.000Z
|
2022-03-08T12:08:43.000Z
|
lib/galaxy/model/migrate/versions/0151_add_worker_process.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Add table for worker processes
"""
import logging
from sqlalchemy import (
Column,
DateTime,
Integer,
MetaData,
String,
Table,
UniqueConstraint,
)
from galaxy.model.migrate.versions.util import (
create_table,
drop_table,
)
from galaxy.model.orm.now import now
log = logging.getLogger(__name__)
metadata = MetaData()
WorkerProcess_table = Table(
"worker_process",
metadata,
Column("id", Integer, primary_key=True),
Column("server_name", String(255), index=True),
Column("hostname", String(255)),
Column("update_time", DateTime, default=now, onupdate=now),
UniqueConstraint("server_name", "hostname"),
)
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
create_table(WorkerProcess_table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_table(WorkerProcess_table)
| 18.666667
| 63
| 0.702731
|
4a024f01a2490ccdf81095b62de9c04f0efa803e
| 25,562
|
py
|
Python
|
scripts/old_scripts/STEP7_ESSAY_1_Descriptive_Statistics.py
|
nz44/phd_dissertation
|
cad5220db27e2e4230f2be2b63140a0e2fc877cc
|
[
"Apache-2.0"
] | null | null | null |
scripts/old_scripts/STEP7_ESSAY_1_Descriptive_Statistics.py
|
nz44/phd_dissertation
|
cad5220db27e2e4230f2be2b63140a0e2fc877cc
|
[
"Apache-2.0"
] | null | null | null |
scripts/old_scripts/STEP7_ESSAY_1_Descriptive_Statistics.py
|
nz44/phd_dissertation
|
cad5220db27e2e4230f2be2b63140a0e2fc877cc
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################################################
# Generate Descriptive Stats Latex Tables
###########################################################################################################
var_latex_map = {
'const': 'Constant',
'score': 'Rating',
'DeMeanedscore': 'Demeaned Rating',
'reviews': 'Reviews',
'ZSCOREreviews': 'Z Score Reviews',
'DeMeanedZSCOREreviews': 'Demeaned Z Score Reviews',
'minInstallsTop': '\makecell[l]{High Level \\\ Minimum Installs}',
'DeMeanedminInstallsTop': '\makecell[l]{Demeaned High Level \\\ Minimum Installs}',
'minInstallsMiddle': '\makecell[l]{Medium Level \\\ Minimum Installs}',
'DeMeanedminInstallsMiddle': '\makecell[l]{Demeaned Medium Level \\\ Minimum Installs}',
'minInstallsBottom': '\makecell[l]{Low Level \\\ Minimum Installs}',
'DeMeanedminInstallsBottom': '\makecell[l]{Demeaned Low Level \\\ Minimum Installs}',
'niche_app': 'Niche',
'genreIdGame': 'Hedonic',
'contentRatingAdult': 'Age Restrictive',
'DaysSinceReleased': 'Released',
'paidTrue': 'Paid',
'offersIAPTrue': 'Offers IAP',
'containsAdsTrue': 'Contains ads',
'price': 'Price',
'F stat': 'F statistic',
'P-value': 'P Value',
'rsquared': 'R Squared',
'nobs': '\makecell[l]{number of \\\ observations}',
'_cov_type': 'Covariance Type'}
var_definition = {
'Rating_{i,t}': '\makecell[l]{Weighted average (from 1 to 5) of cumulative consumer ratings of app $i$}',
'Demeaned Rating_{i,t}': '\makecell[l]{Time demean $Rating_{i,t}$ by subtracting \\\ the mean of 7 consecutive monthly periods}',
'Reviews_{i,t}': '\makecell[l]{Number of cumulative consumer reviews \\\ for the app $i$ between its release and period $t$}',
'Z Score Reviews_{i,t}': 'Normalize number of reviews for App $i$ in period $t$ using Z-Score',
'Demeaned Z Score Reviews_{i,t}': '\makecell[l]{Time demeaned z-score reviews \\\ by subtracting the mean from 7 consecutive periods}',
'\makecell[l]{High Level \\\ Minimum Installs_{i,t}}': '\makecell[l]{Dummy variable, which equals to 1 if \\\ the minimum cumulative installs of the app $i$ in \\\ period $t$ is above 10,000,000, otherwise 0.}',
'\makecell[l]{Demeaned High Level \\\ Minimum Installs_{i,t}}': '\makecell[l]{Time demean High Level Minimum $Installs_{i,t}$ \\\ by subtracting the mean from 7 consecutive periods}',
'\makecell[l]{Medium Level \\\ Minimum Installs_{i,t}}': '\makecell[l]{Dummy variable, which equals to 1 if \\\ the minimum cumulative installs of the app $i$ \\\ in period $t$ is between 10,000 and 10,000,000, otherwise 0.}',
'\makecell[l]{Demeaned Medium Level \\\ Minimum Installs_{i,t}}': '\makecell[l]{Time demean Medium Level Minimum $Installs_{i,t}$ \\\ by subtracting the mean from 7 consecutive periods}',
'\makecell[l]{Low Level \\\ Minimum Installs_{i,t}}': '\makecell[l]{Dummy variable, which equals to 1 if \\\ the minimum cumulative installs of the app $i$ \\\ in period $t$ is below 10,000, otherwise 0.}',
'\makecell[l]{Demeaned Low Level \\\ Minimum Installs_{i,t}}': '\makecell[l]{Time demean Low Level Minimum $Installs_{i,t}$ \\\ by subtracting the mean from 7 consecutive periods}',
'Niche_{i}': '\makecell[l]{Time invariant dummy variable which \\\ equals to 1 if App $i$ is niche, otherwise 0}',
'Hedonic_{i}': '\makecell[l]{Time invariant dummy variable which \\\ equals to 1 if App $i$ is in the category GAME, otherwise 0}',
'Age Restrictive_{i}': '\makecell[l]{Time invariant dummy variable which \\\ equals to 1 if App $i$ contains mature (17+) \\\ or adult (18+) content, otherwise 0}',
'Released_{i}': '\makecell[l]{The number of days \\\ since App $i$ was released}',
'Paid_{i,t}': '\makecell[l]{Dummy variable, which equals to 1 if \\\ the App $i$ is paid in period $t$}',
'Offers IAP_{i,t}': '\makecell[l]{Dummy variable, which equals to 1 if \\\ the App $i$ offers within app purchases (IAP)}',
'Contains ads_{i,t}': '\makecell[l]{Dummy variable, which equals to 1 if \\\ the App $i$ contains advertisement in period $t$}',
'Price_{i,t}': 'Price of App $i$ in period $t$'}
descriptive_stats_table_row_order = {
'niche_app': 0,
'price': 1,
'paidTrue': 2,
'offersIAPTrue': 3,
'containsAdsTrue': 4,
'genreIdGame': 5,
'contentRatingAdult': 6,
'DaysSinceReleased': 7,
'minInstallsTop': 8,
'DeMeanedminInstallsTop': 9,
'minInstallsMiddle': 10,
'DeMeanedminInstallsMiddle': 11,
'minInstallsBottom': 12,
'DeMeanedminInstallsBottom': 13,
'score': 14,
'DeMeanedscore': 15,
'reviews': 16,
'ZSCOREreviews': 17,
'DeMeanedZSCOREreviews': 18,
}
descriptive_stats_table_column_map = {
'mean': 'Mean',
'median': 'Median',
'std': '\makecell[l]{Standard \\\ Deviation}',
'min': 'Min',
'max': 'Max',
'count': '\makecell[l]{Total \\\ Observations}',
'0_Count': '\makecell[l]{False \\\ Observations}',
'1_Count': '\makecell[l]{True \\\ Observations}',
}
time_variant_vars = ['score', 'reviews', ]
def correlation_matrix(self, time_variant_vars, time_invariant_vars, the_panel):
"""
This is for the purpose of checking multicollinearity between independent variables
"""
df = self.select_vars(time_variant_vars_list=time_variant_vars,
time_invariant_vars_list=time_invariant_vars,
the_panel=the_panel)
df_corr = df.corr()
print(df_corr)
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
def bar_chart_a_dummy_against_dummy_or_cat(self, df, dummy1, dummy2):
fig, ax = plt.subplots()
df2 = df.groupby([dummy1, dummy2]).size()
ax = df2.unstack().plot.bar(stacked=True, ax=ax)
total_1 = 0
total_2 = 0
for p in ax.patches:
if p.xy[0] == -0.25:
total_1 += p.get_height()
elif p.xy[0] == 0.75:
total_2 += p.get_height()
for p in ax.patches:
if p.xy[0] == -0.25:
percentage = '{:.1f}%'.format(100 * p.get_height() / total_1)
elif p.xy[0] == 0.75:
percentage = '{:.1f}%'.format(100 * p.get_height() / total_2)
x = p.get_x() + p.get_width() + 0.02
y = p.get_y() + p.get_height() / 2
ax.annotate(percentage, (x, y))
filename = self.initial_panel + '_' + dummy1 + '_' + dummy2 + '.png'
fig.savefig(reg_preparation.descriptive_stats_graphs / filename,
facecolor='white',
dpi=300)
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
def kde_plot_by_dummy(self, df, dummy1, continuous1):
fig, ax = plt.subplots()
ax = sns.kdeplot(data=df, x=continuous1, hue=dummy1,
fill=True, common_norm=False,
# palette="crest", remove palette because the color contrast is too low
alpha=.4, linewidth=0, ax=ax)
ax.set_title(self.initial_panel + ' Dataset' + ' ' + dummy1 + ' against ' + continuous1)
filename = self.initial_panel + '_' + dummy1 + '_' + continuous1 + '.png'
fig.savefig(reg_preparation.descriptive_stats_graphs / filename,
facecolor='white',
dpi=300)
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
def ONEDummy_relationship_to_keyvars(self, NicheDummy, the_panel):
"""
NicheDummy is one of the NicheDummies for different subsamples
"""
# ----------------- select relationship with key variables -----------------------------
key_vars = ['score',
'ratings',
'reviews',
'minInstalls',
'minInstallsTop',
'minInstallsMiddle',
'minInstallsBottom',
'CategoricalminInstalls',
'price',
'paidTrue',
'containsAdsTrue',
'offersIAPTrue']
kvars = [i + '_' + the_panel for i in key_vars]
time_invariants_vars = [
'genreIdGame',
'contentRatingAdult',
'DaysSinceReleased']
kvars.extend(time_invariants_vars)
nichedummies = [i + 'NicheDummy' for i in self.ssnames]
kvars.extend(nichedummies)
# we are comparing niche dummies (under different samples) against all other dummies
compare_against1 = ['genreIdGame',
'contentRatingAdult',
'paidTrue_' + the_panel,
'offersIAPTrue_' + the_panel,
'containsAdsTrue_' + the_panel,
'CategoricalminInstalls_' + the_panel]
compare_against2 = ['score_' + the_panel,
'ratings_' + the_panel,
'reviews_' + the_panel]
# --------------- LOOPING THROUGH EACH SUBSAMPLE ---------------------------------
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
def key_var_definition(self):
df = pd.Series(reg_preparation.var_definition).to_frame().reset_index()
df.columns = ['Variable', 'Definition']
df.set_index('Variable', inplace=True)
# -------------- convert to latex --------------------------------------------------
filename = self.initial_panel + '_variable_definition.tex'
df2 = df.to_latex(buf=reg_preparation.descriptive_stats_tables / filename,
multirow=True,
multicolumn=True,
caption=('Descriptive Statistics of Key Variables'),
position='h!',
label='table:1',
na_rep='',
escape=False)
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
def add_sum_row(self, df):
sum_row = df.sum(axis=0)
sum_row = sum_row.to_frame().T
sum_row.index = ['sum']
df = pd.concat([df, sum_row], join="inner")
return df
def add_sum_col(self, df):
sum_row = df.sum(axis=1)
df['sum'] = sum_row
return df
def descriptive_stats_for_single_panel(self,
continuous_vars,
dummy_vars,
cat_vars,
time_invar_dum,
time_invar_con,
the_panel,
add_sum_row_col=True):
"""
This is must be run after self.create_new_dummies_from_cat_var to get updated self.df
"""
# ----- Select Vars --------------------------------------------------------------
con_vars = [i + '_' + the_panel for i in continuous_vars]
con_vars.extend(time_invar_con)
dum_vars = [i + '_' + the_panel for i in dummy_vars]
dum_vars.extend(time_invar_dum)
cat_vars = [i + '_' + the_panel for i in cat_vars]
con_and_dum_vars = copy.deepcopy(con_vars)
con_and_dum_vars.extend(dum_vars)
# ----- Select DFs ---------------------------------------------------------------
new_df = self.df.copy(deep=True)
con_vars_df = new_df[con_vars]
dum_vars_df = new_df[dum_vars]
cat_vars_df = new_df[cat_vars]
con_and_dum_df = new_df[con_and_dum_vars]
# ----- Continuous Variables Summary Stats ---------------------------------------
con_vars_sum_stats = con_vars_df.agg(['mean', 'std', 'min', 'median', 'max', 'count'], axis=0)
# ----- Dummy Variables Count ----------------------------------------------------
dum_stats_dfs = []
for i in dum_vars:
dum_vars_df['Count'+i] = 0
df = dum_vars_df[[i, 'Count'+i]].groupby(i).count()
dum_stats_dfs.append(df)
dum_vars_sum_stats = functools.reduce(lambda a, b: a.join(b, how='inner'), dum_stats_dfs)
if add_sum_row_col is True:
dum_vars_sum_stats = self.add_sum_row(dum_vars_sum_stats)
# ----- Continuous and Dummy Variables Together ----------------------------------
con_and_dum_vars_stats = con_and_dum_df.agg(['mean', 'std', 'min', 'median', 'max', 'count'], axis=0)
con_and_dum_vars_stats = con_and_dum_vars_stats.T
con_and_dum_vars_stats['count'] = con_and_dum_vars_stats['count'].astype(int)
dum_stats_dfs = []
for i in dum_vars:
dum_vars_df['Count_' + i] = 0
df = dum_vars_df[[i, 'Count_' + i]].groupby(i).count()
dum_stats_dfs.append(df)
dum_vars_sum_stats = functools.reduce(lambda a, b: a.join(b, how='inner'), dum_stats_dfs)
for i in dum_vars_sum_stats.columns:
dum_vars_sum_stats.rename(columns={i: i.lstrip('Count').lstrip('_')}, inplace=True)
for i in dum_vars_sum_stats.index:
dum_vars_sum_stats.rename(index={i: str(i) + '_Count'}, inplace=True)
dum_vars_sum_stats = dum_vars_sum_stats.T
cd_sum_stats = con_and_dum_vars_stats.join(dum_vars_sum_stats, how='left')
# ---- Categorical Variables Count -----------------------------------------------
cat_stats_dict = dict.fromkeys(cat_vars)
for i in cat_vars:
cat_vars_df['Count'+i] = 0
df = cat_vars_df[[i, 'Count'+i]].groupby(i).count()
if 'minInstalls' in i:
df.sort_index(inplace=True)
else:
df.sort_values(by='Count'+i, ascending=False, inplace=True)
if add_sum_row_col is True:
df = self.add_sum_row(df)
cat_stats_dict[i] = df
# ----- Dummy by Dummy and Dummy by Category --------------------------------------
dummy_cat_dfs = []
for i in dum_vars:
sub_dummy_cat_dfs = []
for j in cat_vars:
df = new_df[[i, j]]
df2 = pd.crosstab(df[i], df[j])
df2.columns = [str(c) + '_' + the_panel for c in df2.columns]
sub_dummy_cat_dfs.append(df2)
df3 = functools.reduce(lambda a, b: a.join(b, how='inner'), sub_dummy_cat_dfs)
df3.index = [i + '_' + str(j) for j in df3.index]
dummy_cat_dfs.append(df3)
dummy_cat_cocat_df = functools.reduce(lambda a, b: pd.concat([a,b], join='inner'), dummy_cat_dfs)
if add_sum_row_col is True:
dummy_cat_cocat_df = self.add_sum_col(dummy_cat_cocat_df)
# ----- Categorical Var by Categorical Var ----------------------------------------
i, j = cat_vars[0], cat_vars[1]
df = new_df[[i, j]]
cc_df = pd.crosstab(df[i], df[j])
cc_df.columns = [str(c) + '_' + the_panel for c in cc_df.columns]
cc_df.index = [str(c) + '_' + the_panel for c in cc_df.index]
if add_sum_row_col is True:
cc_df = self.add_sum_row(cc_df)
cc_df = self.add_sum_col(cc_df)
# ----- Continuous Variables by Dummy ---------------------------------------------
continuous_by_dummies = dict.fromkeys(con_vars)
for i in con_vars:
sub_groupby_dfs = []
for j in dum_vars:
df = new_df[[i, j]]
agg_func_math = {i:['mean', 'std', 'min', 'median', 'max', 'count']}
df2 = df.groupby([j]).agg(agg_func_math, axis=0)
df2.columns = ['mean', 'std', 'min', 'median', 'max', 'count']
sub_groupby_dfs.append(df2)
df3 = functools.reduce(lambda a, b: pd.concat([a, b], join='inner'), sub_groupby_dfs)
df3.index = [j + '_' + str(z) for z in df3.index]
continuous_by_dummies[i] = df3
# ----- Continuous Variables by Categorical ---------------------------------------
groupby_cat_dfs = dict.fromkeys(cat_vars)
for i in cat_vars:
sub_groupby_dfs = []
for j in con_vars:
df = new_df[[i, j]]
agg_func_math = {j:['mean', 'std', 'min', 'median', 'max', 'count']}
df2 = df.groupby([i]).agg(agg_func_math, axis=0)
sub_groupby_dfs.append(df2)
df3 = functools.reduce(lambda a, b: a.join(b, how='inner'), sub_groupby_dfs)
df3.index = [str(c) + '_' + the_panel for c in df3.index]
groupby_cat_dfs[i] = df3
# ----- Update Instance Attributes ------------------------------------------------
self.descriptive_stats_tables = {'continuous_vars_stats': con_vars_sum_stats,
'dummy_vars_stats': dum_vars_sum_stats,
'continuous_and_dummy_vars_stats': cd_sum_stats,
'categorical_vars_count': cat_stats_dict,
'crosstab_dummy_categorical_vars': dummy_cat_cocat_df,
'crosstab_two_categorical_vars': cc_df,
'continuous_vars_by_dummies': continuous_by_dummies,
'continuous_vars_by_categorical': groupby_cat_dfs}
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
def customize_and_output_descriptive_stats_pandas_to_latex(self, the_panel):
"""
:param df_dict: self.descriptive_stats_tables, the output of self.descriptive_stats_for_single_panel()
since 'continuous_and_dummy_vars_stats' already included all the variables of interest to show their summary stats
so I will not select more varibales.
:return:
"""
df2 = self.descriptive_stats_tables['continuous_and_dummy_vars_stats'].copy(deep=True)
# -------------- round -------------------------------------------------------------
for i in df2.columns:
if i not in ['1_Count', '0_Count', 'count']:
df2[i] = df2[i].astype(float).round(decimals=2)
# for i in df2.columns:
# if i in ['DaysSinceReleased', 'reviews_'+the_panel]:
# df2[i] = df2[i].apply(lambda x: int(x) if not math.isnan(x) else x)
# df2 = df2.T
# -------------- adjust the order of rows and columns to display --------------------
def set_row_order(x, the_panel):
if the_panel in x:
x = x.rstrip(the_panel).rstrip('_')
for k in reg_preparation.descriptive_stats_table_row_order.keys():
if k == x:
return reg_preparation.descriptive_stats_table_row_order[k]
df2 = df2.reset_index()
df2.rename(columns={'index': 'Variable'}, inplace=True)
df2['row_order'] = df2['Variable'].apply(lambda x: set_row_order(x, the_panel))
df2.sort_values(by='row_order', inplace=True)
df2.set_index('Variable', inplace=True)
df2.drop(['row_order'], axis=1, inplace=True)
df2 = df2[['mean', 'std', 'min', 'median', 'max', '1_Count', '0_Count', 'count']]
# -------------- change row and column names ---------------------------------------
for i in df2.columns:
for j in reg_preparation.descriptive_stats_table_column_map.keys():
if j == i:
df2.rename(columns={i: reg_preparation.descriptive_stats_table_column_map[j]}, inplace=True)
def set_row_names(x, the_panel):
if the_panel in x:
x = x.rstrip(the_panel).rstrip('_')
for j in reg_preparation.var_latex_map.keys():
if j == x:
return reg_preparation.var_latex_map[j]
df2 = df2.reset_index()
df2['Variable'] = df2['Variable'].apply(lambda x: set_row_names(x, the_panel))
df2 = df2.set_index('Variable')
# -------------- convert to latex --------------------------------------------------
filename = self.initial_panel + '_descriptive_stats_for_' + the_panel + '.tex'
df3 = df2.to_latex(buf=reg_preparation.descriptive_stats_tables / filename,
multirow=True,
multicolumn=True,
caption=('Descriptive Statistics of Key Variables'),
position='h!',
label='table:2',
na_rep='',
escape=False)
return reg_preparation(initial_panel=self.initial_panel,
all_panels=self.all_panels,
tcn=self.tcn,
subsample_names=self.ssnames,
df=self.df,
text_label_df=self.text_label_df,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
nicheDummy_labels=self.nicheDummy_labels,
long_cdf=self.long_cdf,
individual_dummies_df=self.i_dummies_df)
| 55.812227
| 230
| 0.514592
|
4a024f308ea8d0349f56675a715bd7b6c269ca59
| 1,629
|
py
|
Python
|
tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_get_instance_async.py
|
BenRKarl/gapic-generator-python
|
e4f92bd988a5b955ede88a9a10163010aae825f1
|
[
"Apache-2.0"
] | 86
|
2018-09-28T11:46:15.000Z
|
2022-03-27T19:25:09.000Z
|
tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_get_instance_async.py
|
BenRKarl/gapic-generator-python
|
e4f92bd988a5b955ede88a9a10163010aae825f1
|
[
"Apache-2.0"
] | 1,054
|
2018-04-19T18:35:05.000Z
|
2022-03-30T14:12:38.000Z
|
tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_get_instance_async.py
|
BenRKarl/gapic-generator-python
|
e4f92bd988a5b955ede88a9a10163010aae825f1
|
[
"Apache-2.0"
] | 47
|
2018-04-26T22:08:56.000Z
|
2022-03-22T22:18:00.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-redis
# [START redis_generated_redis_v1_CloudRedis_GetInstance_async]
from google.cloud import redis_v1
async def sample_get_instance():
"""Snippet for get_instance"""
# Create a client
client = redis_v1.CloudRedisAsyncClient()
# Initialize request argument(s)
project = "my-project-id"
location = "us-central1"
instance = "instance_value"
name = f"projects/{project}/locations/{location}/instances/{instance}"
request = redis_v1.GetInstanceRequest(
name=name,
)
# Make the request
response = await client.get_instance(request=request)
# Handle response
print(response)
# [END redis_generated_redis_v1_CloudRedis_GetInstance_async]
| 30.735849
| 85
| 0.744015
|
4a024f76a8afec5024fb2106e3967f9bd0e73229
| 3,694
|
py
|
Python
|
tests/markup/test_tag_forcing.py
|
ReimarBauer/flatland
|
aeb081f5000a315e08dfed6f2bb9ad166e74f4b1
|
[
"MIT"
] | 7
|
2018-10-29T21:17:26.000Z
|
2020-03-12T21:32:26.000Z
|
tests/markup/test_tag_forcing.py
|
ReimarBauer/flatland
|
aeb081f5000a315e08dfed6f2bb9ad166e74f4b1
|
[
"MIT"
] | 35
|
2018-10-29T21:37:30.000Z
|
2021-05-13T07:57:52.000Z
|
tests/markup/test_tag_forcing.py
|
ReimarBauer/flatland
|
aeb081f5000a315e08dfed6f2bb9ad166e74f4b1
|
[
"MIT"
] | 6
|
2018-10-29T21:38:40.000Z
|
2020-05-25T08:51:13.000Z
|
from flatland import String
from tests.markup._util import desired_output
schema = String.named(u'element').using(default=u'val').from_defaults
### value
@desired_output(u'html', schema)
def value_bound():
"""<div value="val"></div>"""
@value_bound.genshi
def test_value_bound_genshi():
"""<div form:bind="form" form:auto-value="on" />"""
@value_bound.markup
def test_value_bound_markup(gen, el):
return gen.tag(u'div', el, auto_value=True)
@desired_output(u'html', None)
def value_unbound():
"""<div></div>"""
@value_unbound.genshi
def test_value_unbound_genshi():
"""<div form:auto-value="on" />"""
@value_unbound.markup
def test_value_unbound_markup(gen, el):
return gen.tag(u'div', auto_value=True)
### name
@desired_output(u'html', schema)
def name_bound():
"""<div name="element"></div>"""
@name_bound.genshi
def test_name_bound_genshi():
"""<div form:bind="form" form:auto-name="on" />"""
@name_bound.markup
def test_name_bound_markup(gen, el):
return gen.tag(u'div', el, auto_name=True)
@desired_output(u'html', None)
def name_unbound():
"""<div></div>"""
@name_unbound.genshi
def test_name_unbound_genshi():
"""<div form:auto-name="on" />"""
@name_unbound.markup
def test_name_unbound_markup(gen, el):
return gen.tag(u'div', auto_name=True)
### domid
@desired_output(u'html', schema)
def domid_bound():
"""<div id="f_element"></div>"""
@domid_bound.genshi
def test_domid_bound_genshi():
"""<div form:bind="form" form:auto-domid="on" />"""
@domid_bound.markup
def test_domid_bound_markup(gen, el):
return gen.tag(u'div', el, auto_domid=True)
@desired_output(u'html', None)
def domid_unbound():
"""<div></div>"""
@domid_unbound.genshi
def test_domid_unbound_genshi():
"""<div form:auto-domid="on" />"""
@domid_unbound.markup
def test_domid_unbound_markup(gen, el):
return gen.tag(u'div', auto_domid=True)
### for
@desired_output(u'html', schema)
def for_bound():
"""<div for="f_element"></div>"""
@for_bound.genshi
def test_for_bound_genshi():
"""<div form:bind="form" form:auto-for="on" />"""
@for_bound.markup
def test_for_bound_markup(gen, el):
return gen.tag(u'div', el, auto_for=True)
@desired_output(u'html', None)
def for_unbound():
"""<div></div>"""
@for_unbound.genshi
def test_for_unbound_genshi():
"""<div form:auto-for="on" />"""
@for_unbound.markup
def test_for_unbound_markup(gen, el):
return gen.tag(u'div', auto_for=True)
### tabindex
@desired_output(u'html', schema)
def tabindex_bound():
"""<div tabindex="1"></div>"""
@tabindex_bound.genshi
def test_tabindex_bound_genshi():
"""
<form:set tabindex="1"/>
<div form:bind="form" form:auto-tabindex="on" />
"""
@tabindex_bound.markup
def test_tabindex_bound_markup(gen, el):
gen.set(tabindex=1)
return gen.tag(u'div', el, auto_tabindex=True)
@desired_output(u'html', None)
def tabindex_unbound():
"""<div tabindex="1"></div>"""
@tabindex_unbound.genshi
def test_tabindex_unbound_genshi():
"""
<form:set tabindex="1"/>
<div form:auto-tabindex="on" />
"""
@tabindex_unbound.markup
def test_tabindex_unbound_markup(gen, el):
gen.set(tabindex=1)
return gen.tag(u'div', auto_tabindex=True)
### combo
@desired_output(u'html', schema)
def combo_unbound():
"""<div tabindex="1"></div>"""
@combo_unbound.genshi
def test_combo_unbound_genshi():
"""
<form:set tabindex="1"/>
<div form:auto-tabindex="on" form:auto-domid="on" />
"""
@combo_unbound.markup
def test_combo_unbound_markup(gen, el):
gen.set(tabindex=1)
return gen.tag(u'div', auto_tabindex=True, auto_domid=True)
| 18.47
| 69
| 0.673795
|
4a025194923fc605313b1b0db0a8366b2bea3740
| 5,514
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
coinwebfactory/aiascoin
|
c8741cad5264a2d4c0bbca7813c4f4ad390915ae
|
[
"MIT"
] | 1
|
2021-02-26T00:23:51.000Z
|
2021-02-26T00:23:51.000Z
|
contrib/seeds/makeseeds.py
|
coinwebfactory/aiascoin
|
c8741cad5264a2d4c0bbca7813c4f4ad390915ae
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
coinwebfactory/aiascoin
|
c8741cad5264a2d4c0bbca7813c4f4ad390915ae
|
[
"MIT"
] | 4
|
2018-07-30T04:58:29.000Z
|
2020-05-26T17:54:59.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/AiasCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.05814
| 186
| 0.566921
|
4a02532273734ab06cc6d3e0c68064319dffad7b
| 15,119
|
py
|
Python
|
nipype/external/cloghandler.py
|
demianw/nipype
|
52d64c30d96ecd94f1833156e28dce32c4f05ebe
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/external/cloghandler.py
|
demianw/nipype
|
52d64c30d96ecd94f1833156e28dce32c4f05ebe
|
[
"BSD-3-Clause"
] | 2
|
2017-10-05T21:08:38.000Z
|
2018-10-09T23:01:23.000Z
|
nipype/external/cloghandler.py
|
effigies/nipype
|
18fe222557cf3b9627e06b2a66fba589feaca581
|
[
"Apache-2.0"
] | 1
|
2016-10-11T19:18:53.000Z
|
2016-10-11T19:18:53.000Z
|
# Copyright 2008 Lowell Alleman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" cloghandler.py: A smart replacement for the standard RotatingFileHandler
ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in
replacement for the python standard log handler 'RotateFileHandler', the primary
difference being that this handler will continue to write to the same file if
the file cannot be rotated for some reason, whereas the RotatingFileHandler will
strictly adhere to the maximum file size. Unfortunately, if you are using the
RotatingFileHandler on Windows, you will find that once an attempted rotation
fails, all subsequent log messages are dropped. The other major advantage of
this module is that multiple processes can safely write to a single log file.
To put it another way: This module's top priority is preserving your log
records, whereas the standard library attempts to limit disk usage, which can
potentially drop log messages. If you are trying to determine which module to
use, there are number of considerations: What is most important: strict disk
space usage or preservation of log messages? What OSes are you supporting? Can
you afford to have processes blocked by file locks?
Concurrent access is handled by using file locks, which should ensure that log
messages are not dropped or clobbered. This means that a file lock is acquired
and released for every log message that is written to disk. (On Windows, you may
also run into a temporary situation where the log file must be opened and closed
for each log message.) This can have potentially performance implications. In my
testing, performance was more than adequate, but if you need a high-volume or
low-latency solution, I suggest you look elsewhere.
This module currently only support the 'nt' and 'posix' platforms due to the
usage of the portalocker module. I do not have access to any other platforms
for testing, patches are welcome.
See the README file for an example usage of this module.
"""
from builtins import range
__version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $"
__author__ = "Lowell Alleman"
__all__ = [
"ConcurrentRotatingFileHandler",
]
import os
import sys
from random import randint
from logging import Handler
from logging.handlers import BaseRotatingHandler
try:
import codecs
except ImportError:
codecs = None
# Question/TODO: Should we have a fallback mode if we can't load portalocker /
# we should still be better off than with the standard RotattingFileHandler
# class, right? We do some rename checking... that should prevent some file
# clobbering that the builtin class allows.
# sibling module than handles all the ugly platform-specific details of file locking
from .portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException
# A client can set this to true to automatically convert relative paths to
# absolute paths (which will also hide the absolute path warnings)
FORCE_ABSOLUTE_PATH = False
class ConcurrentRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file to the
next when the current file reaches a certain size. Multiple processes can
write to the log file concurrently, but this may mean that the file will
exceed the given size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, debug=True, supress_abs_warn=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
On Windows, it is not possible to rename a file that is currently opened
by another process. This means that it is not possible to rotate the
log files if multiple processes is using the same log file. In this
case, the current log file will continue to grow until the rotation can
be completed successfully. In order for rotation to be possible, all of
the other processes need to close the file first. A mechanism, called
"degraded" mode, has been created for this scenario. In degraded mode,
the log file is closed after each log message is written. So once all
processes have entered degraded mode, the next rotate log attempt should
be successful and then normal logging can be resumed.
This log handler assumes that all concurrent processes logging to a
single file will are using only this class, and that the exact same
parameters are provided to each instance of this class. If, for
example, two different processes are using this class, but with
different values for 'maxBytes' or 'backupCount', then odd behavior is
expected. The same is true if this class is used by one application, but
the RotatingFileHandler is used by another.
NOTE: You should always provide 'filename' as an absolute path, since
this class will need to re-open the file during rotation. If your
application call os.chdir() then subsequent log files could be created
in the wrong directory.
"""
# The question of absolute paths: I'm not sure what the 'right thing' is
# to do here. RotatingFileHander simply ignores this possibility. I was
# going call os.path.abspath(), but that potentially limits uses. For
# example, on Linux (any posix system?) you can rename a directory of a
# running app, and the app wouldn't notice as long as it only opens new
# files using relative paths. But since that's not a "normal" thing to
# do, and having an app call os.chdir() is a much more likely scenario
# that should be supported. For the moment, we are just going to warn
# the user if they provide a relative path and do some other voodoo
# logic that you'll just have to review for yourself.
# if the given filename contains no path, we make an absolute path
if not os.path.isabs(filename):
if FORCE_ABSOLUTE_PATH or \
not os.path.split(filename)[0]:
filename = os.path.abspath(filename)
elif not supress_abs_warn:
from warnings import warn
warn("The given 'filename' should be an absolute path. If your "
"application calls os.chdir(), your logs may get messed up. "
"Use 'supress_abs_warn=True' to hide this message.")
try:
BaseRotatingHandler.__init__(self, filename, mode, encoding)
except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?)
BaseRotatingHandler.__init__(self, filename, mode)
self.encoding = encoding
self._rotateFailed = False
self.maxBytes = maxBytes
self.backupCount = backupCount
# Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.)
if filename.endswith(".log"):
lock_file = filename[:-4]
else:
lock_file = filename
self.stream_lock = open(lock_file + ".lock", "w")
# For debug mode, swap out the "_degrade()" method with a more a verbose one.
if debug:
self._degrade = self._degrade_debug
def _openFile(self, mode):
if self.encoding:
self.stream = codecs.open(self.baseFilename, mode, self.encoding)
else:
self.stream = open(self.baseFilename, mode)
def acquire(self):
""" Acquire thread and file locks. Also re-opening log file when running
in 'degraded' mode. """
# handle thread lock
Handler.acquire(self)
lock(self.stream_lock, LOCK_EX)
if self.stream.closed:
self._openFile(self.mode)
def release(self):
""" Release file and thread locks. Flush stream and take care of closing
stream in 'degraded' mode. """
try:
if not self.stream.closed:
self.stream.flush()
if self._rotateFailed:
self.stream.close()
except IOError:
if self._rotateFailed:
self.stream.close()
finally:
try:
unlock(self.stream_lock)
finally:
# release thread lock
Handler.release(self)
def close(self):
"""
Closes the stream.
"""
if not self.stream.closed:
self.stream.flush()
self.stream.close()
Handler.close(self)
def flush(self):
""" flush(): Do nothing.
Since a flush is issued in release(), we don't do it here. To do a flush
here, it would be necessary to re-lock everything, and it is just easier
and cleaner to do it all in release(), rather than requiring two lock
ops per handle() call.
Doing a flush() here would also introduces a window of opportunity for
another process to write to the log file in between calling
stream.write() and stream.flush(), which seems like a bad thing. """
pass
def _degrade(self, degrade, msg, *args):
""" Set degrade mode or not. Ignore msg. """
self._rotateFailed = degrade
del msg, args # avoid pychecker warnings
def _degrade_debug(self, degrade, msg, *args):
""" A more colorful version of _degade(). (This is enabled by passing
"debug=True" at initialization).
"""
if degrade:
if not self._rotateFailed:
sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = True
else:
if self._rotateFailed:
sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = False
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.backupCount <= 0:
# Don't keep any backups, just overwrite the existing backup file
# Locking doesn't much matter here; since we are overwriting it anyway
self.stream.close()
self._openFile("w")
return
self.stream.close()
try:
# Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable
tmpname = None
while not tmpname or os.path.exists(tmpname):
tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0, 99999999))
try:
# Do a rename test to determine if we can successfully rename the log file
os.rename(self.baseFilename, tmpname)
except (IOError, OSError):
exc_value = sys.exc_info()[1]
self._degrade(True, "rename failed. File in use? "
"exception=%s", exc_value)
return
# Q: Is there some way to protect this code from a KeboardInterupt?
# This isn't necessarily a data loss issue, but it certainly would
# break the rotation process during my stress testing.
# There is currently no mechanism in place to handle the situation
# where one of these log files cannot be renamed. (Example, user
# opens "logfile.3" in notepad)
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
# print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(tmpname, dfn)
# print "%s -> %s" % (self.baseFilename, dfn)
self._degrade(False, "Rotation completed")
finally:
self._openFile(self.mode)
def shouldRollover(self, record):
"""
Determine if rollover should occur.
For those that are keeping track. This differs from the standard
library's RotatingLogHandler class. Because there is no promise to keep
the file size under maxBytes we ignore the length of the current record.
"""
del record # avoid pychecker warnings
if self._shouldRollover():
# if some other process already did the rollover we might
# checked log.1, so we reopen the stream and check again on
# the right log file
self.stream.close()
self._openFile(self.mode)
return self._shouldRollover()
return False
def _shouldRollover(self):
if self.maxBytes > 0: # are we rolling over?
try:
self.stream.seek(0, 2) # due to non-posix-compliant Windows feature
except IOError:
return True
if self.stream.tell() >= self.maxBytes:
return True
else:
self._degrade(False, "Rotation done or not needed at this time")
return False
# Publish this class to the "logging.handlers" module so that it can be use
# from a logging config file via logging.config.fileConfig().
import logging.handlers
logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler
| 45.131343
| 117
| 0.647265
|
4a0253873ad879e7bff11e8d0436c8011b6b20ea
| 24
|
py
|
Python
|
starphish/defaults.py
|
agraubert/starphish-api
|
168406caad185702526c66f7359de182d3c2864b
|
[
"BSD-3-Clause"
] | null | null | null |
starphish/defaults.py
|
agraubert/starphish-api
|
168406caad185702526c66f7359de182d3c2864b
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T17:41:19.000Z
|
2021-04-20T17:41:19.000Z
|
starphish/defaults.py
|
agraubert/starphish-api
|
168406caad185702526c66f7359de182d3c2864b
|
[
"BSD-3-Clause"
] | null | null | null |
MAX_CONTENT_LENGTH=4096
| 12
| 23
| 0.916667
|
4a0254cf9dfa41af18c36c0e66531b69bc4bc0a5
| 42,163
|
py
|
Python
|
code/old_models/variational_inference_DP_topic_ngram_mix_indep_Poisson_distinct_final.py
|
tkc-morita/variational_inference_DP_mix_HDP_topic_ngram
|
95d6c8ab2956501fc82b416bf423ee57fe77c73f
|
[
"MIT"
] | 4
|
2021-03-27T18:28:23.000Z
|
2022-01-10T23:32:29.000Z
|
code/old_models/variational_inference_DP_topic_ngram_mix_indep_Poisson_distinct_final.py
|
stannam/variational_inference_DP_mix_HDP_topic_ngram
|
95d6c8ab2956501fc82b416bf423ee57fe77c73f
|
[
"MIT"
] | null | null | null |
code/old_models/variational_inference_DP_topic_ngram_mix_indep_Poisson_distinct_final.py
|
stannam/variational_inference_DP_mix_HDP_topic_ngram
|
95d6c8ab2956501fc82b416bf423ee57fe77c73f
|
[
"MIT"
] | 1
|
2022-01-10T23:45:54.000Z
|
2022-01-10T23:45:54.000Z
|
# coding: utf-8
import numpy as np
import scipy.special as sps
import scipy.misc as spm
import itertools,sys,datetime,os
from logging import getLogger,FileHandler,DEBUG,Formatter
import pandas as pd
# import warnings
logger = getLogger(__name__)
def update_log_handler(log_path):
current_handlers=logger.handlers[:]
for h in current_handlers:
logger.removeHandler(h)
handler = FileHandler(filename=os.path.join(log_path,'VI_DP_ngram.log')) #Define the handler.
handler.setLevel(DEBUG)
formatter = Formatter('%(asctime)s - %(levelname)s - %(message)s') #Define the log format.
handler.setFormatter(formatter)
logger.setLevel(DEBUG)
logger.addHandler(handler) #Register the handler for the logger.
logger.info("Logger (re)set up.")
class VariationalInference(object):
def __init__(
self,
num_sublex,
customers,
n,
T_base,
concent_priors,
dirichlet_concentration,
result_path,
inventory_size=None # without START or END
):
update_log_handler(result_path)
logger.info('DP mixture of words.')
self.n=n
logger.info('The base distribution is INDEPENDENT %i-gram with DP backoff with Poisson length and distinct word-final ngram backoff.' % n)
logger.info('Log files prior to 2017/10/20 have incorrectly state the base distribution is SHARED.')
logger.info('Results prior to 2017/10/20 have wrong columns on Poisson variational parameters: shape and rate are flipped.')
logger.info('Long vowels and geminates are now (from 03/16/2017) treated as independent segments.')
logger.info('Script last updated at %s'
% datetime.datetime.fromtimestamp(
os.stat(sys.argv[0]).st_mtime
).strftime('%Y-%m-%d-%H:%M:%S')
)
if inventory_size is None:
num_symbols = len(set(reduce(lambda x,y: x+y, customers)))
else:
num_symbols = inventory_size
logger.info('# of symbols: %i' % num_symbols)
dirichlet_base_counts = dirichlet_concentration * np.ones(num_symbols)
self.customers = [Word(word, n, id, num_symbols) for id,word in enumerate(customers)]
num_customers = len(customers)
logger.info('# of words: %i' % num_customers)
last_segments = []
non_last_segments = []
for word in self.customers:
last_segments.append(word.ngrams[-1])
non_last_segments += word.ngrams[:-1]
lengths = np.array([len(word) for word in customers])
mean_length = np.mean(lengths)
self.varpar_assignment = np.random.dirichlet(np.ones(num_sublex), num_customers) # phi in Blei and Jordan.
self.varpar_concent =VarParConcent(concent_priors)
self.varpar_stick = np.random.gamma(1,#self.concent_priors[0],
20,#self.concent_priors[1],
size=(num_sublex-1,2)
) # gamma in Blei and Jordan.
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:, np.newaxis]
self.varpar_concent.add_dp(self)
self.num_clusters = num_sublex
logger.info('(max) # of tables for sublexicalization: %i' % num_sublex)
self.hdp_ngram = HDPNgram(
T_base,
n,
concent_priors,
dirichlet_base_counts,
num_sublex,
num_symbols,
last_segments,
non_last_segments,
self
)
self.poisson_length = Poisson_length((mean_length,1), lengths, num_sublex, self)
self.hdp_ngram.set_varpar_assignment()
self._update_word_likelihood()
logger.info('# of tables: %i' % T_base)
logger.info('Gamma priors on concent_priorsration: (%f,%f)'
% (concent_priors[0],concent_priors[1]**-1)
)
logger.info('Base count of top level Dirichlet: %s' % str(dirichlet_base_counts))
self.result_path = result_path
logger.info('Initialization complete.')
def train(self, max_iters, min_increase):
logger.info("Main loop started.")
logger.info("Max iteration is %i." % max_iters)
logger.info("Will be terminated if variational bound is only improved by <=%f." % min_increase)
converged=False
iter_id=0
self.current_var_bound = self.get_var_bound()
while iter_id<max_iters:
iter_id+=1
self.update_varpars()
logger.info("Variational parameters updated (Iteration ID: %i)." % iter_id)
new_var_bound = self.get_var_bound()
improvement = new_var_bound-self.current_var_bound
logger.info("Current var_bound is %0.12f (%+0.12f)." % (new_var_bound,improvement))
if np.isnan(new_var_bound):
raise Exception("nan detected.")
if improvement<0:
logger.error("variational bound decreased. Something wrong.")
raise Exception("variational bound decreased. Something wrong.")
elif improvement<=min_increase:
converged = True
break
else:
self.current_var_bound=new_var_bound
if converged:
logger.info('Converged after %i iterations.' % iter_id)
else:
logger.error('Failed to converge after max iterations.')
logger.info('Final variational bound is %f.' % self.current_var_bound)
def get_log_posterior_pred(self, test_data):
pass
# log_pred_prob_list=[]
# log_stick_weights=(
# np.append(
# np.log(self.varpar_stick[:,0])
# -
# np.log(self.sum_stick)
# ,
# 0
# )
# +
# np.append(
# 0,
# np.cumsum(
# np.log(self.varpar_stick[:,1])
# -
# np.log(self.sum_stick)
# )
# )
# )
# [sublex.set_log_posterior_expectation() for seblex in self.sublex_ngrams]
# for string in test_data:
# test_word = Word(string, self.n, -1)
# log_pred_prob=spm.logsumexp(
# log_stick_weights
# +
# np.array(
# [sublex.get_log_posterior_pred_per_word(test_word)
# for sublex in self.sublex_ngrams
# ]
# )
# )
# log_pred_prob_list.append(log_pred_prob)
# return log_pred_prob_list
def save_results(self, decoder):
inventory=[('symbol_code_%i' % code) for code in decoder.keys()]#[symbol.encode('utf-8') for symbol in decoder.values()]
# Shared HDP ngram
with pd.HDFStore(
os.path.join(self.result_path,'variational_parameters.h5')
) as hdf5_store:
df_concent = pd.DataFrame(columns = ['shape', 'rate', 'DP_name'])
for context_length,level in self.hdp_ngram.last_tree.iteritems():
vpc = self.hdp_ngram.varpar_concents[context_length]
df_concent_sub = pd.DataFrame(
vpc.rate[:,np.newaxis]
,
columns=['rate']
)
df_concent_sub['shape'] = vpc.shape
df_concent_sub['DP_name'] = [('%igram_%i' % (context_length+1,sublex_id))
for sublex_id
in xrange(self.num_clusters)
]
df_concent = df_concent.append(df_concent_sub, ignore_index=True)
for context,rst in level.iteritems():
coded_context = '_'.join(map(str,context))#.encode('utf-8')
if context_length != self.n-1:
children_contexts = pd.Series(['_'.join([str(code) for code in child_dp.context])
for child_dp in rst.children])
df_assignment = pd.DataFrame(
rst.varpar_assignment.flatten()[:,np.newaxis]
,
columns=["p"]
)
df_assignment['children_DP_context']=children_contexts.iloc[
np.repeat(
np.arange(rst.varpar_assignment.shape[0])
,
np.prod(
rst.varpar_assignment.shape[1:]
)
)
].reset_index(drop=True)
df_assignment['sublex_id'] = np.tile(
np.repeat(
np.arange(rst.varpar_assignment.shape[1])
,
np.prod(rst.varpar_assignment.shape[2:])
)
,
rst.varpar_assignment.shape[0]
)
df_assignment['children_cluster_id']=np.tile(
np.repeat(
np.arange(rst.varpar_assignment.shape[2])
,
rst.varpar_assignment.shape[3]
)
,
np.prod(rst.varpar_assignment.shape[:2])
)
df_assignment['cluster_id']=np.tile(
np.arange(rst.varpar_assignment.shape[3])
,
np.prod(rst.varpar_assignment.shape[:-1])
)
hdf5_store.put(
("sublex/last/_%igram/context_%s/assignment"
% (context_length+1,coded_context))
,
df_assignment
,
# encoding="utf-8"
)
num_sublex = rst.varpar_stick.shape[0]
num_clusters = rst.varpar_stick.shape[1]
df_stick=pd.DataFrame(rst.varpar_stick.reshape(
num_sublex*num_clusters
,
2
),
columns=('beta_par1','beta_par2')
)
df_stick['sublex_id'] = np.repeat(np.arange(num_sublex), num_clusters)
df_stick['cluster_id'] = np.tile(np.arange(num_clusters), num_sublex)
hdf5_store.put(
("sublex/last/_%igram/context_%s/stick"
% (context_length+1,coded_context))
,
df_stick
,
# encoding="utf-8"
)
for context_length,level in self.hdp_ngram.non_last_tree.iteritems():
vpc = self.hdp_ngram.varpar_concents[context_length]
df_concent_sub = pd.DataFrame(
vpc.rate[:,np.newaxis]
,
columns=['rate']
)
df_concent_sub['shape'] = vpc.shape
df_concent_sub['DP_name'] = [('%igram_%i' % (context_length+1,sublex_id))
for sublex_id
in xrange(self.num_clusters)
]
df_concent = df_concent.append(df_concent_sub, ignore_index=True)
for context,rst in level.iteritems():
coded_context = '_'.join(map(str,context))#.encode('utf-8')
if context_length != self.n-1:
children_contexts = pd.Series(['_'.join([str(code) for code in child_dp.context])
for child_dp in rst.children])
df_assignment = pd.DataFrame(
rst.varpar_assignment.flatten()[:,np.newaxis]
,
columns=["p"]
)
df_assignment['children_DP_context']=children_contexts.iloc[
np.repeat(
np.arange(rst.varpar_assignment.shape[0])
,
np.prod(
rst.varpar_assignment.shape[1:]
)
)
].reset_index(drop=True)
df_assignment['sublex_id'] = np.tile(
np.repeat(
np.arange(rst.varpar_assignment.shape[1])
,
np.prod(rst.varpar_assignment.shape[2:])
)
,
rst.varpar_assignment.shape[0]
)
df_assignment['children_cluster_id']=np.tile(
np.repeat(
np.arange(rst.varpar_assignment.shape[2])
,
rst.varpar_assignment.shape[3]
)
,
np.prod(rst.varpar_assignment.shape[:2])
)
df_assignment['cluster_id']=np.tile(
np.arange(rst.varpar_assignment.shape[3])
,
np.prod(rst.varpar_assignment.shape[:-1])
)
hdf5_store.put(
("sublex/non_last/_%igram/context_%s/assignment"
% (context_length+1,coded_context))
,
df_assignment
,
# encoding="utf-8"
)
num_sublex = rst.varpar_stick.shape[0]
num_clusters = rst.varpar_stick.shape[1]
df_stick=pd.DataFrame(rst.varpar_stick.reshape(
num_sublex*num_clusters
,
2
),
columns=('beta_par1','beta_par2')
)
df_stick['sublex_id'] = np.repeat(np.arange(num_sublex), num_clusters)
df_stick['cluster_id'] = np.tile(np.arange(num_clusters), num_sublex)
hdf5_store.put(
("sublex/non_last/_%igram/context_%s/stick"
% (context_length+1,coded_context))
,
df_stick
,
# encoding="utf-8"
)
df_atom = pd.DataFrame(
self.hdp_ngram.last_tree[0][()].varpar_atom.flatten()[:,np.newaxis],
columns=['dirichlet_par']
)
num_sublex,num_clusters,num_symbols = self.hdp_ngram.last_tree[0][()].varpar_atom.shape
df_atom['sublex_id']=np.repeat(
np.arange(num_sublex)
,
num_clusters*num_symbols
)
df_atom['cluster_id'] = np.tile(
np.repeat(
np.arange(num_clusters),
num_symbols
)
,
num_sublex
)
df_atom['value']=pd.Series(
np.tile(
np.arange(num_symbols)
,
num_sublex*num_clusters
)
)
hdf5_store.put(
'sublex/last/_1gram/context_/atom'
,
df_atom
)
df_atom = pd.DataFrame(
self.hdp_ngram.non_last_tree[0][()].varpar_atom.flatten()[:,np.newaxis],
columns=['dirichlet_par']
)
num_sublex,num_clusters,num_symbols = self.hdp_ngram.non_last_tree[0][()].varpar_atom.shape
df_atom['sublex_id']=np.repeat(
np.arange(num_sublex)
,
num_clusters*num_symbols
)
df_atom['cluster_id'] = np.tile(
np.repeat(
np.arange(num_clusters),
num_symbols
)
,
num_sublex
)
df_atom['value']=pd.Series(
np.tile(
np.arange(num_symbols)
,
num_sublex*num_clusters
)
)
hdf5_store.put(
'sublex/non_last/_1gram/context_/atom'
,
df_atom
)
# Sublex
df_assignment_sl = pd.DataFrame(
self.varpar_assignment
,
columns=[("sublex_%i" % table_id)
for table_id in range(self.num_clusters)
]
)
df_assignment_sl['most_probable_sublexicon']=df_assignment_sl.idxmax(axis=1)
df_assignment_sl['customer_id']=df_assignment_sl.index
df_assignment_sl.to_csv(os.path.join(self.result_path, "SubLexica_assignment.csv"), index=False, encoding='utf-8')
hdf5_store.put(
"sublex/assignment",
df_assignment_sl,
# encoding='utf-8'
)
df_stick_sl = pd.DataFrame(self.varpar_stick, columns=('beta_par1','beta_par2'))
df_stick_sl['cluster_id']=df_stick_sl.index
hdf5_store.put(
"sublex/stick",
df_stick_sl,
)
df_concent = df_concent.append(
pd.DataFrame(
[[
self.varpar_concent.rate,
self.varpar_concent.shape,
'word_sublexicalization'
]]
,
columns=['rate', 'shape', 'DP_name']
)
)
hdf5_store.put(
"sublex/concentration",
df_concent,
# encoding='utf-8'
)
df_length = pd.DataFrame(self.poisson_length.varpar_length, columns=['shape','rate'])
df_length['sublex_id'] = df_length.index
hdf5_store.put(
"sublex/length",
df_length,
)
pd.DataFrame(decoder.items(),
columns=('code','symbol')
).to_csv(
os.path.join(
self.result_path,
"symbol_coding.csv"
),
encoding='utf-8'
,
index=False
)
def _update_varpar_stick(self):
self.varpar_stick[...,0] = np.sum(self.varpar_assignment[...,:-1], axis=0)+1
self.varpar_stick[...,1] = np.cumsum(
np.sum(
self.varpar_assignment[:,:0:-1],
axis=0
)
)[::-1]+self.varpar_concent.mean
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:, np.newaxis]
def _update_varpar_assignment(self):
log_varpar_assignment = (
np.append(
self.E_log_stick[:,0],
0
)[np.newaxis,:]
+np.append(
0,
np.cumsum(
self.E_log_stick[:,1]
)
)[np.newaxis,:]
+
self.word_likelihood
)
self.varpar_assignment=np.exp(log_varpar_assignment-spm.logsumexp(log_varpar_assignment, axis=-1)[:,np.newaxis])
def _update_word_likelihood(self):
self.word_likelihood = (
np.array(
[
word.get_E_log_likelihoods() # Output an array of length num_sublex
for word in self.customers
]
)
+
self.poisson_length.get_log_like()
)
def update_varpars(self):
self.varpar_concent.update()
self._update_varpar_stick()
self.hdp_ngram.update_varpars()
self.poisson_length.update_varpars()
self._update_word_likelihood()
self._update_varpar_assignment()
def get_var_bound(self):
"""
Calculate the KL divergence bound based on the current variational parameters.
We ignore the constant terms.
"""
return (
self.varpar_concent.get_var_bound()
+
self.hdp_ngram.get_var_bound()
+
self.poisson_length.get_var_bound()
+
self.get_sum_E_log_p_varpars()
-
self.get_E_log_q_varpar()
)
def get_sum_E_log_p_varpars(self):
return (
(self.varpar_concent.mean-1)*np.sum(self.E_log_stick[:,1]) # E[alpha-1]*E[log (1-V)]
+
np.sum(
self.E_log_stick[:,1]*np.cumsum(
np.sum(
self.varpar_assignment[...,:0:-1]
,
axis=0
)
)[::-1]
+
self.E_log_stick[:,0]*np.sum(self.varpar_assignment[:,:-1], axis=0)
) # E[log p(Z | V)]
+
np.sum(
self.word_likelihood # num_words x num_sublex
*
self.varpar_assignment
) # E[log p(X | Z,eta)]
) # E[log p(V, alpha)]
def get_E_log_q_varpar(self):
return (
# E[log q(V)] below
np.sum(self.E_log_stick[:,0]*(self.varpar_stick[:,0]-1))
+
np.sum(self.E_log_stick[:,1]*(self.varpar_stick[:,1]-1))
-
np.sum(
sps.gammaln(self.varpar_stick),
)
+
np.sum(sps.gammaln(self.sum_stick))
+
np.sum(
self.varpar_assignment*np.ma.log(self.varpar_assignment)
) # E[log q(Z)]
)
class HDPNgram(object):
def __init__(
self,
num_clusters,
n,
concent_priors,
dirichlet_base_counts,
num_sublex,
num_symbols,
last_segments,
non_last,
wrapper):
self.wrapper=wrapper
self.n = n
last_contexts = set([ngram.context
for ngram in last_segments
])
non_last_contexts = set([ngram.context
for ngram in non_last
])
self.last_tree = {k:{} for k in xrange(n)}
self.non_last_tree = {k:{} for k in xrange(n)}
self.varpar_concents = [VarParConcent_sublex(concent_priors, num_sublex) for context_length in xrange(n)]
self.last_tree[0][()]=DP_top(
num_clusters,
0,
(),
self.varpar_concents[0],
dirichlet_base_counts,
num_sublex,
num_symbols,
self
)
self.non_last_tree[0][()]=DP_top(
num_clusters,
0,
(),
self.varpar_concents[0],
dirichlet_base_counts,
num_sublex,
num_symbols,
self
)
for context_length, vpc in enumerate(self.varpar_concents[1:-1], start=1):
for context in set(fc[n-1-context_length:] for fc in last_contexts):
self.last_tree[context_length][context]\
= DP(
num_clusters,
self.last_tree[context_length-1][context[1:]],
context_length,
context,
vpc,
num_sublex,
self
)
for context in set(fc[n-1-context_length:] for fc in non_last_contexts):
self.non_last_tree[context_length][context]\
= DP(
num_clusters,
self.non_last_tree[context_length-1][context[1:]],
context_length,
context,
vpc,
num_sublex,
self
)
for context in last_contexts:
self.last_tree[self.n-1][context]\
= DP_bottom(
num_clusters,
self.last_tree[n-2][context[1:]],
n-1,
context,
self.varpar_concents[-1],
num_sublex,
num_symbols,
self
)
for context in non_last_contexts:
self.non_last_tree[self.n-1][context]\
= DP_bottom(
num_clusters,
self.non_last_tree[n-2][context[1:]],
n-1,
context,
self.varpar_concents[-1],
num_sublex,
num_symbols,
self
)
[ngram.enter_a_restaurant(self.last_tree[self.n-1][ngram.context])
for ngram in last_segments
]
[ngram.enter_a_restaurant(self.non_last_tree[self.n-1][ngram.context])
for ngram in non_last
]
def set_varpar_assignment(self):
[restaurant.set_varpar_assignment()
for level in self.last_tree.itervalues()
for restaurant in level.itervalues()
]
[restaurant.set_varpar_assignment()
for level in self.non_last_tree.itervalues()
for restaurant in level.itervalues()
]
def update_varpars(self):
[vpc.update() for vpc in self.varpar_concents]
[restaurant.update_varpars()
for level in self.last_tree.itervalues()
for restaurant in level.itervalues()
]
[restaurant.update_varpars()
for level in self.non_last_tree.itervalues()
for restaurant in level.itervalues()
]
def get_var_bound(self):
return (
np.sum([restaurant.get_var_bound()
for level in reversed(self.non_last_tree.values())
for restaurant in level.itervalues()
])
+
np.sum([restaurant.get_var_bound()
for level in reversed(self.last_tree.values())
for restaurant in level.itervalues()
])
+
np.sum([vpc.get_var_bound() for vpc in self.varpar_concents])
)
def get_word_assignment_weights(self, word_ids):
return self.wrapper.varpar_assignment[word_ids,:]
class VarParConcent(object):
def __init__(self, priors):
self.prior_inv_scale = priors[1]
self.dps = []
self.shape = priors[0]
self.rate = np.random.gamma(1,20)#(self.prior_inv_scale**-1)
def add_dp(self, dp):
self.dps.append(dp)
self.shape+=dp.varpar_stick.shape[0]
self.mean=self.shape/self.rate
def update(self):
self.rate = (self.prior_inv_scale
-
np.sum(
[
dp.E_log_stick[:,1]
for dp in self.dps
]
)
)
self.mean=self.shape/self.rate
def get_var_bound(self):
return -(
self.shape*np.log(self.rate)
+
self.prior_inv_scale*self.mean
)
class VarParConcent_sublex(object):
def __init__(self, priors, num_sublex):
self.prior_rate = priors[1]
self.dps = []
self.shape = priors[0]
self.rate = np.random.gamma(1,20, size=num_sublex)
def add_dp(self, dp):
self.dps.append(dp)
self.shape += dp.varpar_stick.shape[1]
self.mean = self.shape/self.rate
def update(self):
self.rate = (self.prior_rate
-
np.sum(
[
dp.E_log_stick[...,1]
for dp in self.dps
]
,
axis=(0, -1)
)
)
self.mean = self.shape/self.rate
def get_var_bound(self):
return -(
self.shape*np.sum(np.log(self.rate))
+
self.prior_rate*np.sum(self.mean)
)
class DP_bottom(object):
def __init__(self, num_clusters, mother, context_length, context, varpar_concent, num_sublex, num_symbols, hdp):
self.hdp=hdp
self.mother = mother
self.context_length=context_length
self.context = context
self.num_symbols = num_symbols
self.varpar_concent = varpar_concent
self.varpar_stick = np.random.gamma(1,#10,
20,#0.1,
size=(num_sublex,num_clusters-1,2)
) # gamma in Blei and Jordan.
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:,:, np.newaxis]
self.varpar_concent.add_dp(self)
self.num_clusters = num_clusters
self.new_id = 0
self.customers = []
self.word_ids = []
self.id = self.mother.add_customer(self)
def add_customer(self, target_value, word_id):
issued_id = self.new_id
self.new_id+=1
self.customers.append(target_value)
self.word_ids.append(word_id)
return issued_id
def set_varpar_assignment(self):
self._update_log_assignment()
self._update_log_like()
def _update_varpar_stick(self):
self.varpar_stick[...,0] = np.sum(
self.expected_customer_counts[:,:-1,:] # num_sublex x num_clusters x num_symbols
,
axis=-1
)+1
self.varpar_stick[...,1] = np.cumsum(
np.sum(
self.expected_customer_counts[:,:0:-1,:],
axis=-1
)
,
axis=1
)[:,::-1]+self.varpar_concent.mean[:,np.newaxis]
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:, :, np.newaxis]
def _update_log_assignment(self):
appendix = np.zeros((self.E_log_stick.shape[0],1))
self.log_assignment=(
np.append(
self.E_log_stick[...,0],
appendix
,
axis=1
)[:,:,np.newaxis]
+np.append(
appendix,
np.cumsum(
self.E_log_stick[...,1]
,
axis=1
)
,
axis=1
)[:,:,np.newaxis]
+
self.mother.log_like_top_down[self.id] # num_sublex x num_clusters x num_symbols
)
def _update_log_like(self):
self.log_like = spm.logsumexp(self.log_assignment, axis=1)
def update_varpars(self):
self._update_varpar_stick()
self._update_log_assignment()
self._update_log_like()
def _update_expected_customer_counts(self):
expected_customers_per_sublex_and_symbol = np.zeros((self.num_symbols, self.varpar_stick.shape[0]))
np.add.at(
expected_customers_per_sublex_and_symbol,
self.customers,
self.hdp.get_word_assignment_weights(self.word_ids) # data_size x num_sublex
)
self.expected_customer_counts=( # num_sublex x num_clusters x num_symbols
expected_customers_per_sublex_and_symbol.T[:,np.newaxis,:]
*
self._get_assign_probs()
)
def _get_assign_probs(self):
return np.exp(
self.log_assignment # num_sublex x num_clusters x num_symbols
-
spm.logsumexp(self.log_assignment, axis=1)[:,np.newaxis,:]
)
def _update_log_like_bottom_up(self):
self.log_like_bottom_up=self.expected_customer_counts # num_sublex x num_clusters x num_symbols
def get_var_bound(self):
self._update_expected_customer_counts()
self._update_log_like_bottom_up()
return (
self.get_sum_E_log_p_varpars()
-self.get_E_log_q_varpar()
)
def get_sum_E_log_p_varpars(self):
return np.sum(
(self.varpar_concent.mean-1)*np.sum(self.E_log_stick[:,:,1], axis=1) # E[alpha-1]*E[log (1-V)]
) # E[log p(V, alpha)] joint distr is easier to compute than likelihood and prior independently.
def get_E_log_q_varpar(self):
return(
(
np.sum(self.E_log_stick*(self.varpar_stick-1))
-
np.sum(
sps.gammaln(self.varpar_stick),
)
+
np.sum(sps.gammaln(self.sum_stick))
) # E[log q(V)]
)
def set_log_posterior_expectation(self):
pass
# self.log_posterior_expectation = spm.logsumexp(np.append(
# np.log(self.varpar_stick[:,0])
# -
# np.log(self.sum_stick)
# ,
# 0
# )[:,np.newaxis]
# +
# np.append(
# 0,
# np.cumsum(
# np.log(self.varpar_stick[:,1])
# -
# np.log(self.sum_stick)
# )
# )[:,np.newaxis]
# +
# self.mother.log_posterior_expectation_top_down[self.id]
# ,
# axis=0
# )
def get_log_posterior_pred(self, value):
pass
# return self.log_posterior_expectation[value]
class DP(DP_bottom):
def __init__(self, num_clusters, mother, context_length, context, varpar_concent, num_sublex, hdp):
self.mother = mother
self.hdp=hdp
self.context_length=context_length
self.context = context
self.varpar_concent = varpar_concent
self.varpar_stick = np.random.gamma(1,#10,
20,#0.1,
size=(num_sublex,num_clusters-1,2)
) # gamma in Blei and Jordan.
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:,:,np.newaxis]
self.varpar_concent.add_dp(self)
self.num_clusters = num_clusters
self.children = []
self.new_id = 0
self.id = self.mother.add_customer(self)
def add_customer(self, child):
self.children.append(child)
issued_id = self.new_id
self.new_id+=1
return issued_id
def set_varpar_assignment(self):
num_children = len(self.children)
num_tables_child = self.children[0].num_clusters # num_clusters for children.
self.varpar_assignment = np.random.dirichlet(
np.ones(self.num_clusters),
(num_children, self.varpar_stick.shape[0], num_tables_child)
) # phi in Blei and Jordan.
assert self.varpar_assignment.size, 'Empty restaurant created.'
self._update_log_like_top_down()
def _update_varpar_stick(self):
self.varpar_stick[...,0] = np.sum(self.varpar_assignment[...,:-1], axis=(0, 2))+1
self.varpar_stick[...,1] = np.cumsum(
np.sum(
self.varpar_assignment[...,:0:-1],
axis=(0, 2)
)
,
axis=-1
)[...,::-1]+self.varpar_concent.mean[:,np.newaxis]
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:,:,np.newaxis]
def _update_varpar_assignment(self):
appendix = np.zeros((self.E_log_stick.shape[0],1))
log_varpar_assignment = (
np.append(
self.E_log_stick[:,:,0],
appendix
,
axis=1
)[np.newaxis,:,np.newaxis,:]
+np.append(
appendix,
np.cumsum(
self.E_log_stick[:,:,1]
,
axis=1
)
,
axis=1
)[np.newaxis,:,np.newaxis,:]
+
np.sum(
self.child_log_like_bottom_up[:,:,:,np.newaxis,:] # num_children x num_sublex x num_customers x num_symbols
*
self.mother.log_like_top_down[self.id,np.newaxis,:,np.newaxis,:,:] # num_sublex x num_clusters x num_symbols
,
axis=-1
)
)
self.varpar_assignment=np.exp(log_varpar_assignment-spm.logsumexp(log_varpar_assignment, axis=-1)[:,:,:,np.newaxis])
def update_varpars(self):
self._update_varpar_stick()
self._update_varpar_assignment()
self._update_log_like_top_down()
def _set_child_log_like_bottom_up(self):
self.child_log_like_bottom_up = np.array(
[child.log_like_bottom_up
for child in self.children
]
)
def get_var_bound(self):
self._update_log_like_bottom_up()
return self.get_sum_E_log_p_varpars()-self.get_E_log_q_varpar()
def _update_log_like_top_down(self):
self.log_like_top_down = np.sum(
self.varpar_assignment[:,:,:,:,np.newaxis]
*
self.mother.log_like_top_down[self.id,np.newaxis,:,np.newaxis,:,:] # 1 x num_sublex x 1 x num_clusters x num_symbols
,
axis=-2
)
def _update_log_like_bottom_up(self):
self._set_child_log_like_bottom_up()
self.log_like_bottom_up=np.sum(
self.child_log_like_bottom_up[:,:,:,np.newaxis,:] # num_children x num_sublex x num_customers x num_symbols
*
self.varpar_assignment[:,:,:,:,np.newaxis]
,
axis=(0,2)
) # num_sublex x num_clusters x num_symbols
def get_sum_E_log_p_varpars(self):
return (
np.sum(
(self.varpar_concent.mean-1)*np.sum(self.E_log_stick[:,:,1], axis=-1) # E[alpha-1]*E[log (1-V)]
) # E[log p(V, alpha)] joint distr is easier to compute than likelihood and prior independently.
+
np.sum(
self.E_log_stick[:,:,1]*np.cumsum(
np.sum(
self.varpar_assignment[...,:0:-1],
axis=(0,2)
)
,
axis=-1
)[...,::-1]
+
self.E_log_stick[:,:,0]*np.sum(self.varpar_assignment[...,:-1], axis=(0,2))
) # E[log p(Z | V)]
)
def get_E_log_q_varpar(self):
return(
(
np.sum(self.E_log_stick*(self.varpar_stick-1))
-
np.sum(
sps.gammaln(self.varpar_stick),
)
+
np.sum(sps.gammaln(self.sum_stick))
) # E[log q(V)]
+
np.sum(self.varpar_assignment*np.ma.log(self.varpar_assignment)) # E[log q(Z)]
)
def set_log_posterior_expectation(self):
pass
# self.log_posterior_expectation = spm.logsumexp(np.append(
# np.log(self.varpar_stick[:,0])
# -
# np.log(self.sum_stick)
# ,
# 0
# )[:,np.newaxis]
# +
# np.append(
# 0,
# np.cumsum(
# np.log(self.varpar_stick[:,1])
# -
# np.log(self.sum_stick)
# )
# )[:,np.newaxis]
# +
# self.mother.log_posterior_expectation_top_down[self.id]
# ,
# axis=0
# )
# self.log_posterior_expectation_top_down = spm.logsumexp(
# np.log(self.varpar_assignment)[:,:,:,np.newaxis] # num_children x num_customers x num_clusters
# +
# self.mother.log_posterior_expectation_top_down[self.id,np.newaxis,np.newaxis,:,:] # num_clusters x num_symbols
# ,
# axis=-2
# )
class DP_top(DP):
def __init__(self, num_clusters, context_length, context, varpar_concent, atom_base_counts, num_sublex, num_symbols, hdp):
self.hdp=hdp
self.context_length=context_length
self.context = context
self.varpar_concent = varpar_concent
self.varpar_stick = np.random.gamma(1,#10,
20,#0.1,
size=(num_sublex, num_clusters-1,2)
) # gamma in Blei and Jordan.
self.sum_stick = np.sum(self.varpar_stick, axis=-1)
self.E_log_stick = sps.digamma(self.varpar_stick)-sps.digamma(self.sum_stick)[:, :, np.newaxis]
self.atom_base_counts = atom_base_counts
self.varpar_atom = np.random.gamma(1, 20, size=(num_sublex, num_clusters, num_symbols))
self.sum_atom=np.sum(self.varpar_atom, axis=-1)
self.E_log_atom = (
sps.digamma(self.varpar_atom)
-
sps.digamma(self.sum_atom)[:,:,np.newaxis]
)
self.varpar_concent.add_dp(self)
self.num_clusters = num_clusters
self.new_id=0
self.children=[]
def add_customer(self, child):
self.children.append(child)
issued_id = self.new_id
self.new_id+=1
return issued_id
def _update_varpar_assignment(self):
appendix = np.zeros((self.E_log_stick.shape[0],1))
log_varpar_assignment = (
np.append(
self.E_log_stick[:,:,0],
appendix
,
axis=1
)[np.newaxis,:,np.newaxis,:]
+np.append(
appendix,
np.cumsum(
self.E_log_stick[:,:,1]
,
axis=1
)
,
axis=1
)[np.newaxis,:,np.newaxis,:]
+
np.sum(
self.child_log_like_bottom_up[:,:,:,np.newaxis,:] # num_children x num_sublex x num_customers x num_symbols
*
self.E_log_atom[np.newaxis,:,np.newaxis,:,:] # num_sublex x num_clusters x num_symbols
,
axis=-1
)
)
self.varpar_assignment=np.exp(log_varpar_assignment-spm.logsumexp(log_varpar_assignment, axis=-1)[:,:,:,np.newaxis])
def _update_varpar_atom(self):
self.varpar_atom=self.log_like_bottom_up + self.atom_base_counts[np.newaxis,np.newaxis,:]
self.sum_atom = np.sum(self.varpar_atom, axis=-1)
self.E_log_atom = (
sps.digamma(self.varpar_atom)
-
sps.digamma(self.sum_atom)[:,:,np.newaxis]
)
def _update_log_like_top_down(self):
self.log_like_top_down = np.sum(
self.varpar_assignment[:,:,:,:,np.newaxis] # num_children x num_sublex x num_child_clusters x num_clusters
*
self.E_log_atom[np.newaxis,:,np.newaxis,:,:] # num_sublex x num_clusters x num_symbols
,
axis=-2
)
def update_varpars(self):
self._update_varpar_stick()
self._update_varpar_atom()
self._update_varpar_assignment()
self._update_log_like_top_down()
def get_sum_E_log_p_varpars(self):
return (
np.sum(
(self.varpar_concent.mean-1)*np.sum(self.E_log_stick[:,:,1], axis=-1) # E[alpha-1]*E[log (1-V)]
) # E[log p(V, alpha)] joint distr is easier to compute than likelihood and prior independently.
+
np.sum(
self.E_log_stick[:,:,1]*np.cumsum(
np.sum(
self.varpar_assignment[...,:0:-1],
axis=(0,2)
)
,
axis=-1
)[...,::-1]
+
self.E_log_stick[:,:,0]*np.sum(self.varpar_assignment[...,:-1], axis=(0,2))
) # E[log p(Z | V)]
+
np.sum(
(self.atom_base_counts-1)
*
np.sum(self.E_log_atom, axis=(0,1))
)
)
def get_E_log_q_varpar(self):
return(
(
np.sum(self.E_log_stick*(self.varpar_stick-1))
-
np.sum(
sps.gammaln(self.varpar_stick),
)
+
np.sum(sps.gammaln(self.sum_stick))
) # E[log q(V)]
+
np.sum(self.varpar_assignment*np.ma.log(self.varpar_assignment)) # E[log q(Z)]
+ # E[log q(U)] below.
np.sum(
self.E_log_atom*(self.varpar_atom-1)
)
-
np.sum(
sps.gammaln(self.varpar_atom)
)
+
np.sum(
sps.gammaln(self.sum_atom)
)
)
def set_log_posterior_expectation(self):
pass
# log_expectation_atom = np.log(self.varpar_atom)-np.log(self.sum_atom)[:,np.newaxis]
# self.log_posterior_expectation = spm.logsumexp(np.append(
# np.log(self.varpar_stick[:,0])
# -
# np.log(self.sum_stick)
# ,
# 0
# )[:,np.newaxis]
# +
# np.append(
# 0,
# np.cumsum(
# np.log(self.varpar_stick[:,1])
# -
# np.log(self.sum_stick)
# )
# )[:,np.newaxis]
# +
# log_expectation_atom
# ,
# axis=0
# )
# self.log_posterior_expectation_top_down = spm.logsumexp(
# np.log(self.varpar_assignment)[:,:,:,np.newaxis] # num_children x num_customers x num_clusters
# +
# log_expectation_atom[np.newaxis,np.newaxis,:,:] # num_clusters x num_symbols
# ,
# axis=-2
# )
class Poisson_length(DP_top):
def __init__(self, priors, data, num_sublex, wrapper):
self.prior_shape = priors[0]
self.prior_rate = priors[1]
self.wrapper = wrapper
self.varpar_length = np.random.gamma(
1, 20,
size=(num_sublex, 2)
)
self.E_length = self.varpar_length[:,0] / self.varpar_length[:,1]
self.E_log_length = sps.digamma(self.varpar_length[:,0]) - np.log(self.varpar_length[:,1])
self.data = data
def _update_varpar_length(self):
self.varpar_length[:,0] = self.sum_expected_length + self.prior_shape
self.varpar_length[:,1] = self.expected_num_obs + self.prior_rate
self.E_length = self.varpar_length[:,0] / self.varpar_length[:,1]
self.E_log_length = sps.digamma(self.varpar_length[:,0]) - np.log(self.varpar_length[:,1])
def get_log_like(self):
return (
self.data[:,np.newaxis]
*
self.E_log_length[np.newaxis,:]
-
self.E_length[np.newaxis,:]
)
def update_varpars(self):
self._update_expected_counts()
self._update_varpar_length()
def _update_expected_counts(self):
self.sum_expected_length = np.sum(
self.wrapper.varpar_assignment
*
self.data[:,np.newaxis]
,
axis=0
)
self.expected_num_obs = np.sum(self.wrapper.varpar_assignment, axis=0)
def get_var_bound(self):
return self.get_E_log_p() - self.get_E_log_q()
def get_E_log_p(self):
return np.sum(
(self.prior_shape - 1) * self.E_log_length
-
self.prior_rate * self.E_length
)
def get_E_log_q(self):
return np.sum(
self.varpar_length[:,0] * np.log(self.varpar_length[:,1])
-
sps.gammaln(self.varpar_length[:,0])
+
(self.varpar_length[:,0] - 1) * self.E_log_length
-
self.varpar_length[:,0]
)
class Word(object):
def __init__(self, string, n, id, num_symbols):
self.id=id
self.ngrams = [Ngram(window,self)
for window in zip(*[([num_symbols]*(n-1)+string)[i:] for i in range(n)])
]
def get_E_log_likelihoods(self):
return np.sum(
[
ngram.get_E_log_likelihoods() for ngram in self.ngrams
],
axis=0
)
class Ngram(object):
def __init__(self, window, word):
self.word=word
# self.ids = []
self.context = window[:-1]
self.target = window[-1]
def enter_a_restaurant(self, restaurant):
restaurant.add_customer(self.target, self.word.id)
self.restaurant = restaurant
def get_E_log_likelihoods(self):
return self.restaurant.log_like[...,self.target]
def code_data(training_data,test_data=None):
str_training_data=[word.split(',') for index, word in training_data.iteritems()]
if test_data is None:
str_data=str_training_data
else:
str_test_data=[word.split(',') for index, word in test_data.iteritems()]
str_data=str_training_data+str_test_data
inventory = list(set(itertools.chain.from_iterable(str_data)))
encoder = {symbol:code for code,symbol in enumerate(inventory)}
decoder = {code:symbol for code,symbol in enumerate(inventory)}
decoder[len(decoder)]='START' # Special code reserved for end symbol.
if test_data is None:
coded_data = [map(lambda s: encoder[s],phrase) for phrase in str_data]
return (coded_data,encoder,decoder)
else:
coded_training_data=[map(lambda s: encoder[s],phrase) for phrase in str_training_data]
coded_test_data=[map(lambda s: encoder[s],phrase) for phrase in str_test_data]
return (coded_training_data,coded_test_data,encoder,decoder)
| 27.702365
| 140
| 0.606005
|
4a0255851d85800038e0c72823698ff66e3e39e3
| 3,520
|
py
|
Python
|
src/module/nesteddicttools.py
|
kthewhispers/Nested-Dictionary-Tools-Python
|
6d82061d6d736b1a822c0b408ae2f2e9b20b1ab1
|
[
"MIT"
] | null | null | null |
src/module/nesteddicttools.py
|
kthewhispers/Nested-Dictionary-Tools-Python
|
6d82061d6d736b1a822c0b408ae2f2e9b20b1ab1
|
[
"MIT"
] | null | null | null |
src/module/nesteddicttools.py
|
kthewhispers/Nested-Dictionary-Tools-Python
|
6d82061d6d736b1a822c0b408ae2f2e9b20b1ab1
|
[
"MIT"
] | 1
|
2019-07-02T23:35:46.000Z
|
2019-07-02T23:35:46.000Z
|
'''
MIT License
Copyright (c) 2019 Keith Cronin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def get_value(keystring, dictionary):
amountkeys = keystring.count('.')+1
lastfoundindex = 0
counter = 0
while counter < amountkeys:
if counter == 0:
value = dictionary[keystring[lastfoundindex:keystring.find('.')]]
elif counter == amountkeys - 1:
value = value[keystring[lastfoundindex:]]
break
else:
value = value[keystring[lastfoundindex:keystring.find('.',lastfoundindex)]]
lastfoundindex = keystring.find('.',lastfoundindex)+1
counter += 1
return value
def set_value(keystring, dictionary, new_value):
amountkeys = keystring.count('.')+1
lastfoundindex = 0
counter = 0
while counter < amountkeys:
if counter == 0:
value = dictionary[keystring[lastfoundindex:keystring.find('.')]]
elif counter == amountkeys - 1:
value[keystring[lastfoundindex:]] = new_value
break
else:
value = value[keystring[lastfoundindex:keystring.find('.',lastfoundindex)]]
lastfoundindex = keystring.find('.',lastfoundindex)+1
counter += 1
value = new_value
return value
def del_entry(keystring, dictionary):
amountkeys = keystring.count('.')+1
lastfoundindex = 0
counter = 0
while counter < amountkeys:
if counter == 0:
value = dictionary[keystring[lastfoundindex:keystring.find('.')]]
elif counter == amountkeys - 1:
del value[keystring[lastfoundindex:]]
break
else:
value = value[keystring[lastfoundindex:keystring.find('.',lastfoundindex)]]
lastfoundindex = keystring.find('.',lastfoundindex)+1
counter += 1
def add_entry(keystring, dictionary, entry_name, entry_value = None):
amountkeys = keystring.count('.')+1
lastfoundindex = 0
counter = 0
while counter < amountkeys:
if counter == 0:
value = dictionary[keystring[lastfoundindex:keystring.find('.')]]
elif counter == amountkeys - 1:
value[keystring[lastfoundindex:]][entry_name] = entry_value
break
else:
value = value[keystring[lastfoundindex:keystring.find('.',lastfoundindex)]]
lastfoundindex = keystring.find('.',lastfoundindex)+1
counter += 1
| 34.509804
| 92
| 0.65142
|
4a0255ef54e54ab43755cc7f21826d22eb05c172
| 4,142
|
py
|
Python
|
src/simmer/tests/test_contrast.py
|
arjunsavel/SImMer
|
71d9bf0bf329f597426ebcd71dd0cda731592ec6
|
[
"MIT"
] | null | null | null |
src/simmer/tests/test_contrast.py
|
arjunsavel/SImMer
|
71d9bf0bf329f597426ebcd71dd0cda731592ec6
|
[
"MIT"
] | 119
|
2020-03-17T20:32:13.000Z
|
2022-03-28T17:02:19.000Z
|
src/simmer/tests/test_contrast.py
|
arjunsavel/SImMer
|
71d9bf0bf329f597426ebcd71dd0cda731592ec6
|
[
"MIT"
] | 2
|
2020-03-25T22:23:01.000Z
|
2021-07-14T23:45:05.000Z
|
"""
Module to compute contrast curves.
author: @holdengill
isort:skip_file
"""
import yaml
import numpy as np
import scipy as sp
from astropy.stats import SigmaClip
from astropy.stats import sigma_clipped_stats
from astropy.io import fits
import math
from numpy import random
import unittest
from photutils import datasets
from astropy.table import Table
from simmer.contrast import ConCur as cc
from simmer.contrast import twoD_weighted_std as wstd
from simmer.contrast import find_best_center
from simmer.contrast import background_calc
from simmer.contrast import hot_pixels
def all_same(items):
return np.all(x == items[0] for x in items)
class TestContrastCurve(unittest.TestCase):
def test_constant_flux(self):
arr = np.array([[1000] * 600] * 600)
result = cc(arr)
self.assertTrue(all_same(result[1]))
def test_zero_vals(self):
arr = np.array([[0] * 600] * 600)
result = cc(arr)
bools = []
for i in result[1]:
bools.append(np.isnan(i))
self.assertTrue(np.all(bools))
def test_radius_size(self):
arr = np.array([[1000] * 600] * 600)
rad1_result = cc(arr, radius_size=1)
rad3_result = cc(arr, radius_size=3)
rad6_result = cc(arr, radius_size=6)
self.assertTrue(
np.all(
[
len(rad1_result[0]) > len(rad3_result[0]),
len(rad3_result[0]) > len(rad6_result[0]),
]
)
)
def test_best_center(self):
table = Table()
table["flux"] = [1000]
table["x_mean"] = [299.5]
table["y_mean"] = [299.5]
arr = datasets.make_gaussian_sources_image((600, 600), table)
result = find_best_center(arr, 3, [299.5, 299.5])
self.assertTrue(np.allclose((result[0], result[1]), [299.5, 299.5]))
def test_twoD_weighted_std_constants(self):
numbers = np.array([[20] * 600] * 600)
weights = np.array([[1] * 600] * 600)
self.assertEqual(0, wstd(numbers, weights))
def test_twoD_weighted_std_known_simple(self):
numbers = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
weighers = np.array([[1, 1, 1], [0, 0, 0], [1, 1, 1]])
self.assertTrue(np.isclose(wstd(numbers, weighers), 1.095445115))
class TestHotPixels(unittest.TestCase):
"""
Test whether the hot pixel algorithm correctly identifies pixels
that aren't working well.
"""
def test_hot_pixels_one(self):
arr = np.array([[0] * 600] * 600)
arr[300, 300] = 1
background_mean = 1 / (3600)
background_std = 0
result = hot_pixels(arr, [300, 300], background_mean, background_std)
self.assertEqual(1, len(result))
def test_hot_pixels_multiple(self):
arr = np.array([[0] * 600] * 600)
arr[300:302, 300:302] = 1
background_mean = 4 / 3600
background_std = 0
result = hot_pixels(arr, [300, 300], background_mean, background_std)
self.assertEqual(4, len(result))
def test_hot_pixels_clump(self):
arr = np.array([[0] * 600] * 600)
arr[300:303, 300:303] = 1
background_mean = 4 / 3600
background_std = 0
result = hot_pixels(arr, [300, 300], background_mean, background_std)
self.assertEqual(0, len(result))
def test_hot_pixels_cold(self):
arr = np.array([[1] * 600] * 600)
background_mean = 1
background_std = 0
result = hot_pixels(arr, [300, 300], background_mean, background_std)
self.assertEqual(0, len(result))
class TestBackgroundMethods(unittest.TestCase):
def test_background_outside(self):
arr = np.array([[0] * 600] * 600)
arr[270:330, 270:330] = 1
result = background_calc(arr, "outside")
self.assertEqual(result[0], 0)
def test_background_boxes_vals(self):
arr = np.array([[0] * 600] * 600)
arr[100:150, 100:150], arr[400:450, 400:450] = 1, 1
result = background_calc(arr, "boxes")
self.assertTrue(result[0] > 0)
if __name__ == "__main__":
unittest.main()
| 31.142857
| 77
| 0.612989
|
4a02567b5a45463c0d1adb2d8dca56fb23baa301
| 703
|
py
|
Python
|
test_migrations/runners/unittest.py
|
swiatekm/django-test-migrations
|
628f650d2c076ce8ad44222d1a6c674ded90a48a
|
[
"MIT"
] | null | null | null |
test_migrations/runners/unittest.py
|
swiatekm/django-test-migrations
|
628f650d2c076ce8ad44222d1a6c674ded90a48a
|
[
"MIT"
] | null | null | null |
test_migrations/runners/unittest.py
|
swiatekm/django-test-migrations
|
628f650d2c076ce8ad44222d1a6c674ded90a48a
|
[
"MIT"
] | null | null | null |
from django.test import runner
from test_migrations import settings
class MigrationTestRunnerMixin:
def __init__(self, *args, tags=None, exclude_tags=None, **kwargs):
tags = tags or list()
if settings.MIGRATIONS_TEST_MARKER not in tags:
exclude_tags = set(exclude_tags or list())
exclude_tags.add(settings.MIGRATIONS_TEST_MARKER)
super().__init__(*args, tags=tags, exclude_tags=exclude_tags, **kwargs)
class DiscoverRunner(MigrationTestRunnerMixin, runner.DiscoverRunner):
"""DiscoverRunner ignoring all migration tests by default.
To run migrations test request only tests marked with
`settings.MIGRATIONS_TEST_MARKER` tag.
"""
| 33.47619
| 79
| 0.731152
|
4a02572cc8be15835054bff2eb2e19e6382c5c87
| 9,824
|
py
|
Python
|
generate.py
|
huez-dev/stylegan2-ada
|
ecea930ff4a5ad0a0c83f7097bca5680866f517d
|
[
"BSD-Source-Code"
] | null | null | null |
generate.py
|
huez-dev/stylegan2-ada
|
ecea930ff4a5ad0a0c83f7097bca5680866f517d
|
[
"BSD-Source-Code"
] | null | null | null |
generate.py
|
huez-dev/stylegan2-ada
|
ecea930ff4a5ad0a0c83f7097bca5680866f517d
|
[
"BSD-Source-Code"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import argparse
import logging
import math
import os
import pickle
import re
import sys
import time
import cv2
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
from pythonosc import dispatcher
from pythonosc import osc_server
from pythonosc import udp_client
def is_perfect_cube(x):
x = abs(x)
return int(round(x ** (1. / 3))) ** 3 == x
def get_cube(x):
return x ** (1. / 3)
def remap(value, from1, to1, from2, to2):
return (value - from1) / (to1 - from1) * (to2 - from2) + from2
def remap2(value, low1, low2, high1, high2):
return low2 + (value - low1) * (high2 - low2) / (high1 - low1)
# ----------------------------------------------------------------------------
def generate_images(network_pkl, seeds, truncation_psi, outdir, class_idx, dlatents_npz, image_sum):
# encoder(for mp4)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
# # output file name, encoder, fps, size(fit to image size)
# video_writer = cv2.VideoWriter('generate_gan.mp4', fourcc, 20.0, (128, 128))
# if not video_writer.isOpened():
# print("can't be opened")
# sys.exit()
tflib.init_tf()
print('Loading networks from "%s"...' % network_pkl)
with dnnlib.util.open_url(network_pkl) as fp:
_G, _D, Gs = pickle.load(fp)
os.makedirs(outdir, exist_ok=True)
# Remove this line if we can use 'out' directory to save generated images
os.makedirs("generate", exist_ok=True)
# Render images for a given dlatent vector.
if dlatents_npz is not None:
print(f'Generating images from dlatents file "{dlatents_npz}"')
dlatents = np.load(dlatents_npz)['dlatents']
assert dlatents.shape[1:] == (18, 512) # [N, 18, 512]
imgs = Gs.components.synthesis.run(dlatents,
output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
for i, img in enumerate(imgs):
fname = f'{outdir}/dlatent{i:02d}.png'
print(f'Saved {fname}')
PIL.Image.fromarray(img, 'RGB').save(fname)
return
# Render images for dlatents initialized from random seeds.
Gs_kwargs = {
'output_transform': dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True),
'randomize_noise': False
}
if truncation_psi is not None:
Gs_kwargs['truncation_psi'] = truncation_psi
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
label = np.zeros([1] + Gs.input_shapes[1][1:])
if class_idx is not None:
label[:, class_idx] = 1
rnd = np.random.RandomState(600)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
def pil2cv(image):
''' PIL型 -> OpenCV型 '''
new_image = np.array(image, dtype=np.uint8)
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
return new_image
# cap = cv2.VideoCapture("out.mp4")
# if not cap.isOpened():
# exit(0)
z[0] = [0.0 for val in range(512)]
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, label, **Gs_kwargs) # [minibatch, height, width, channel]
# isGotMessage = False
# seedsValues = []
# def messege_handler(unused_addr, *p):
# try:
# print(p)
#
# # tuple to list
# z[0] = [val for val in p]
#
# print(Gs)
#
# # generate image
# tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
# images = Gs.run(z, label, **Gs_kwargs) # [minibatch, height, width, channel]
# pil_image = PIL.Image.fromarray(images[0], 'RGB')
# pil_image.save("generate.jpg")
# print(f"save generate.jpg ")
# except ValueError:
# pass
#
# OSC_dispatcher = dispatcher.Dispatcher()
# OSC_dispatcher.map("/generate", messege_handler)
# server = osc_server.ThreadingOSCUDPServer(("127.0.0.1", 12000), OSC_dispatcher)
# server.serve_forever()
number = 0
while True:
if (number > image_sum):
number = 0
try:
data = []
f = open("request.txt")
for val in f.read().split(","):
if val is not "":
data.append(float(val))
f.close()
z[0] = data
except ValueError:
continue
# generate image
print("------------------------------------------------------")
print(data)
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, label, **Gs_kwargs) # [minibatch, height, width, channel]
pil_image = PIL.Image.fromarray(images[0], 'RGB')
try:
number += 1
# Use 'out' directory instead of 'generate' if we can use 'out' directory to save generated images
pil_image.save("generate/generate" + str(number) + ".jpg")
except PermissionError:
logging.error("can not generate generate.jpg, Permission Error")
# print(f"save generate.jpg ")
time.sleep(1.0 / 10)
# counter = 0
# while True:
# ret, frame = cap.read()
# if ret:
# new_z = []
# for rgb in frame:
# new_val = (rgb[0][0]+1) * (rgb[0][1]+1) * (rgb[0][2]+1)
# max = 256**3.0
# # after_range = 0.000005
# after_range = 1.0
# new_val = remap2(new_val, 0, max, -after_range, after_range)
# new_z.append(-new_val)
# z[0] = new_z
# tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
# images = Gs.run(z, label, **Gs_kwargs) # [minibatch, height, width, channel]
# pil_image = PIL.Image.fromarray(images[0], 'RGB')
# print(f"writing image: {outdir}/{counter}.png")
# counter = counter + 1
# cv2_image = pil2cv(pil_image)
# video_writer.write(cv2_image)
# else:
# cap.release()
# video_writer.release()
# break
# ----------------------------------------------------------------------------
def _parse_num_range(s):
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2)) + 1))
vals = s.split(',')
return [int(x) for x in vals]
# ----------------------------------------------------------------------------
_examples = '''examples:
# Generate curated MetFaces images without truncation (Fig.10 left)
python %(prog)s --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metfaces.pkl
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python %(prog)s --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metfaces.pkl
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python %(prog)s --outdir=out --trunc=1 --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/cifar10.pkl
# Render image from projected latent vector
python %(prog)s --outdir=out --dlatents=out/dlatents.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/ffhq.pkl
'''
# ----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='Generate images using pretrained network pickle.',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
g = parser.add_mutually_exclusive_group(required=True)
g.add_argument('--seeds', type=_parse_num_range, help='List of random seeds')
g.add_argument('--dlatents', dest='dlatents_npz', help='Generate images for saved dlatents')
parser.add_argument('--trunc', dest='truncation_psi', type=float, help='Truncation psi (default: %(default)s)',
default=0.5)
parser.add_argument('--class', dest='class_idx', type=int, help='Class label (default: unconditional)')
parser.add_argument('--outdir', help='Where to save the output images', required=True, metavar='DIR')
parser.add_argument('--sum', dest='image_sum', type=int, help='number to be generated',
default=100)
args = parser.parse_args()
generate_images(**vars(args))
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------
| 35.985348
| 120
| 0.579601
|
4a025937150593e5fc11350caf5ea66fb06cb50a
| 557
|
py
|
Python
|
env/Lib/site-packages/touch/__init__.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | 4
|
2022-02-06T00:54:58.000Z
|
2022-02-25T12:44:43.000Z
|
env/Lib/site-packages/touch/__init__.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/touch/__init__.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | 1
|
2022-02-08T13:43:20.000Z
|
2022-02-08T13:43:20.000Z
|
__all__ = ['touch']
import os
import values
def _fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def _mkdir(path):
if path.find("/") > 0 and not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
def _utime(path):
try:
os.utime(path, None)
except Exception:
open(path, 'a').close()
def touch(path):
"""mkdir + touch path(s)"""
for path in values.get(path):
if path:
path = _fullpath(path)
_mkdir(path)
_utime(path)
| 17.967742
| 72
| 0.583483
|
4a02595bfeed7fe1a670c06335bdebabd8fb14c8
| 15,156
|
py
|
Python
|
modelica_language/_parser.py
|
ijknabla/-ModelicaGrammarForPython
|
db316bd3d6eed984d519bf3160668c72b7fa353d
|
[
"MIT"
] | 1
|
2019-11-17T12:37:58.000Z
|
2019-11-17T12:37:58.000Z
|
modelica_language/_parser.py
|
ijknabla/-ModelicaGrammarForPython
|
db316bd3d6eed984d519bf3160668c72b7fa353d
|
[
"MIT"
] | null | null | null |
modelica_language/_parser.py
|
ijknabla/-ModelicaGrammarForPython
|
db316bd3d6eed984d519bf3160668c72b7fa353d
|
[
"MIT"
] | null | null | null |
__all__ = ("Parser",)
from arpeggio import (
Combine,
CrossRef,
EndOfFile,
Not,
OneOrMore,
Optional,
OrderedChoice,
PTNodeVisitor,
ParseTreeNode,
Parser as ArpeggioParser,
ParserPython as ArpeggioPythonParser,
ParsingExpression,
RegExMatch,
Sequence,
StrMatch,
ZeroOrMore,
visit_parse_tree,
)
from copy import copy
from typing import (
Any,
List,
MutableMapping,
Optional as NoneOr,
Set,
Tuple,
Union,
)
from typing_extensions import Final
import warnings
from .exceptions import ParserWarning, SemanticError
ParsingExpressionLike = Union[ParsingExpression, CrossRef]
# ## Lexical symbols & keywords
LEXICAL_ASSIGNMENT_OPERATOR: Final[List[str]] = ["=", "|="]
SYNTAX_ASSIGNMENT_OPERATOR: Final[List[str]] = [":", "|:"]
NOT_OPERATOR: Final[str] = "!"
OR_OPERATOR: Final[str] = "|"
KEYWORD_RULE_NAME: Final[str] = "$KEYWORD"
COMMENT_RULE_NAME: Final[str] = "$COMMENT"
EOF_RULE_NAME: Final[str] = "$EOF"
# ## Lexical rules
def KEYWORD() -> RegExMatch:
return RegExMatch("`[a-z]+`")
def TEXT() -> RegExMatch:
return RegExMatch(r'"[^"]*"+')
def REGEX() -> RegExMatch:
return RegExMatch(r"""r'[^'\\]*(?:\\.[^'\\]*)*'""")
def LEXICAL_RULE_IDENTIFIER() -> RegExMatch:
return RegExMatch("[A-Z]([0-9A-Z]|-)*")
def SYNTAX_RULE_IDENTIFIER() -> RegExMatch:
return RegExMatch("[a-z]([0-9a-z]|-)*")
def PART_OF_WORD_REFERENCE() -> ParsingExpression:
return Sequence(
OrderedChoice([LEXICAL_RULE_IDENTIFIER, KEYWORD_RULE_NAME]),
Not(LEXICAL_ASSIGNMENT_OPERATOR),
)
def WORD_REFERENCE() -> ParsingExpression:
return Sequence(LEXICAL_RULE_IDENTIFIER, Not(LEXICAL_ASSIGNMENT_OPERATOR))
def SYNTAX_REFERENCE() -> ParsingExpression:
return OrderedChoice(
[
Sequence(SYNTAX_RULE_IDENTIFIER, Not(SYNTAX_ASSIGNMENT_OPERATOR)),
EOF_RULE_NAME,
]
)
# ## Syntax rules
def grammar() -> ParsingExpression:
return Sequence(OneOrMore([lexical_rule, syntax_rule]), EndOfFile())
def lexical_rule() -> ParsingExpression:
# In the lexical rule, the special rules
# $KEYWORD and $COMMENT can be defined.
# However, $EOF cannot be defined.
return Sequence(
OrderedChoice(
[
KEYWORD_RULE_NAME,
COMMENT_RULE_NAME,
LEXICAL_RULE_IDENTIFIER,
]
),
LEXICAL_ASSIGNMENT_OPERATOR,
lexical_expression,
)
def syntax_rule() -> ParsingExpression:
return Sequence(
SYNTAX_RULE_IDENTIFIER,
SYNTAX_ASSIGNMENT_OPERATOR,
syntax_expression,
)
# ## expression rule
def lexical_expression() -> ParsingExpression:
return Sequence(
lexical_ordered_choice,
)
def lexical_ordered_choice() -> ParsingExpression:
return OneOrMore(
(Not(LEXICAL_ASSIGNMENT_OPERATOR), lexical_sequence),
sep=OR_OPERATOR,
)
def lexical_sequence() -> ParsingExpression:
return OneOrMore(lexical_quantity)
def lexical_quantity() -> ParsingExpression:
return OrderedChoice(
[
Sequence("[", lexical_expression, "]"),
Sequence("{", lexical_expression, "}"),
lexical_term,
]
)
def lexical_term() -> ParsingExpression:
return Sequence(Optional(NOT_OPERATOR), lexical_primary)
def lexical_primary() -> ParsingExpression:
return OrderedChoice(
[
Sequence("(", lexical_expression, ")"),
KEYWORD,
TEXT,
REGEX,
PART_OF_WORD_REFERENCE,
]
)
def syntax_expression() -> ParsingExpression:
return Sequence(
syntax_ordered_choice,
)
def syntax_ordered_choice() -> ParsingExpression:
return OneOrMore(syntax_sequence, sep=OR_OPERATOR)
def syntax_sequence() -> ParsingExpression:
return OneOrMore(syntax_quantity)
def syntax_quantity() -> ParsingExpression:
return OrderedChoice(
[
Sequence("[", syntax_expression, "]"),
Sequence("{", syntax_expression, "}"),
syntax_primary,
]
)
def syntax_primary() -> ParsingExpression:
return OrderedChoice(
[
Sequence("(", syntax_expression, ")"),
KEYWORD,
TEXT,
WORD_REFERENCE,
SYNTAX_REFERENCE,
]
)
# ## Comment rule
def comment() -> ParsingExpression:
return OrderedChoice(
[
RegExMatch(r"//.*"),
RegExMatch(r"/\*([^*]|\*[^/])*\*/"),
]
)
class GrammarVisitor(PTNodeVisitor):
__root_rule_name: str
__comment_rule_name: str
__ignore_case: bool
__rules: MutableMapping[str, ParsingExpressionLike]
__syntax_rule_names: Set[str]
__DEFAULT_RULES = {
EOF_RULE_NAME: EndOfFile(),
}
def __init__(
self,
root_rule_name: str,
comment_rule_name: str,
ignore_case: bool,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.__root_rule_name = self.hyphen2underscore(root_rule_name)
self.__comment_rule_name = self.hyphen2underscore(comment_rule_name)
self.__ignore_case = ignore_case
self.__rules = dict(self.__DEFAULT_RULES)
self.__syntax_rule_names = set()
def visit_KEYWORD(self, node: Any, children: Any) -> Any:
match = RegExMatch(
rf"{node.value[1:-1]}(?![0-9_a-zA-Z])",
ignore_case=self.__ignore_case,
)
match.compile()
return match
def visit_TEXT(self, node: Any, children: Any) -> Any:
return StrMatch(node.value[1:-1], ignore_case=self.__ignore_case)
def visit_REGEX(self, node: Any, children: Any) -> Any:
match = RegExMatch(node.value[2:-1], ignore_case=self.__ignore_case)
match.compile()
return match
def __visit_IDENTIFIER(self, node: Any, children: Any) -> Any:
return self.hyphen2underscore(node.value)
visit_LEXICAL_RULE_IDENTIFIER = __visit_IDENTIFIER
visit_SYNTAX_RULE_IDENTIFIER = __visit_IDENTIFIER
def __visit_REFERENCE(self, node: Any, children: Any) -> Any:
(identifier,) = children
assert "-" not in identifier
return CrossRef(identifier)
visit_PART_OF_WORD_REFERENCE = __visit_REFERENCE
def visit_WORD_REFERENCE(self, node: Any, children: Any) -> Any:
crossref = self.__visit_REFERENCE(node, children)
return Sequence(nodes=[self.skipws, crossref])
visit_SYNTAX_REFERENCE = __visit_REFERENCE
def visit_lexical_term(self, node: Any, children: Any) -> Any:
if len(children) == 2:
operator, child = children
else:
operator, (child,) = None, children
if operator is None:
return child
elif operator == "!":
return Not(nodes=[child])
raise NotImplementedError()
def __visit_quantity(self, node: Any, children: Any) -> Any:
(child,) = children
try:
L, _, R = node
except ValueError:
L, R = None, None
if (L, R) == (None, None):
return child
elif (L, R) == ("[", "]"):
return Optional(nodes=[child])
elif (L, R) == ("{", "}"):
return ZeroOrMore(nodes=[child])
raise NotImplementedError()
visit_lexical_quantity = __visit_quantity
visit_syntax_quantity = __visit_quantity
def __visit_sequence(self, node: Any, children: Any) -> Any:
head, *tail = children
if not tail:
return head
else:
return Sequence(nodes=[head, *tail])
def visit_lexical_sequence(self, node: Any, children: Any) -> Any:
head, *tail = children
if not tail:
return head
else:
return Combine(nodes=[Sequence(nodes=[head, *tail])])
visit_syntax_sequence = __visit_sequence
def __visit_ordered_choice(self, node: Any, children: Any) -> Any:
head, *tail = (
child
for child in children
if isinstance(child, (ParsingExpression, CrossRef))
)
if not tail:
return head
else:
return OrderedChoice(nodes=[head, *tail])
visit_lexical_ordered_choice = __visit_ordered_choice
visit_syntax_ordered_choice = __visit_ordered_choice
def __visit_rule(self, node: Any, children: Any) -> Any:
rule_name, operator, new_rule = children
assert "-" not in rule_name
if operator in {"=", ":"}:
rule = new_rule
elif operator in {"|=", "|:"}:
try:
previous_rule = self.__rules[rule_name]
except KeyError as keyError:
raise SemanticError(
f'Rule "{rule_name}" does not exists.'
) from keyError
rule = OrderedChoice(nodes=[previous_rule, new_rule])
else:
raise NotImplementedError()
# Keep a map of parser rules for cross reference
# resolving.
rule.rule_name = rule_name
rule.root = True
self.__rules[rule_name] = rule
return rule
visit_lexical_rule = __visit_rule
def visit_syntax_rule(self, node: Any, children: Any) -> Any:
rule_name, *_ = children
self.__syntax_rule_names.add(rule_name)
return self.__visit_rule(node, children)
def visit_grammar(
self, node: Any, children: Any
) -> Tuple[ParsingExpression, NoneOr[ParsingExpression]]:
resolved: Set[ParsingExpressionLike] = set()
def _resolve(
node: ParsingExpressionLike,
) -> ParsingExpression:
"""
Resolves CrossRefs from the parser model.
"""
if node in resolved:
# Why? : The rule already included in `resolved`
# has been determined to be ParsingExpression.
assert isinstance(node, ParsingExpression)
return node
resolved.add(node)
def get_rule_by_name(rule_name: str) -> ParsingExpressionLike:
try:
return self.__rules[rule_name]
except KeyError:
raise SemanticError(
'Rule "{}" does not exists.'.format(rule_name)
)
def resolve_rule_by_name(
rule_name: str,
) -> ParsingExpression:
if self.debug:
self.dprint("Resolving crossref {}".format(rule_name))
resolved_rule = get_rule_by_name(rule_name)
while isinstance(resolved_rule, CrossRef):
target_rule = resolved_rule.target_rule_name
resolved_rule = get_rule_by_name(target_rule)
# If resolved rule hasn't got the same name it
# should be cloned and preserved in the peg_rules cache
if resolved_rule.rule_name != rule_name:
resolved_rule = copy(resolved_rule)
resolved_rule.rule_name = rule_name
self.__rules[rule_name] = resolved_rule
if self.debug:
self.dprint(
"Resolving: cloned to {} = > {}".format(
resolved_rule.rule_name, resolved_rule.name
)
)
return resolved_rule
if isinstance(node, CrossRef):
# The root rule is a cross-ref
resolved_rule = resolve_rule_by_name(node.target_rule_name)
return _resolve(resolved_rule)
else:
# Resolve children nodes
for i, n in enumerate(node.nodes):
node.nodes[i] = _resolve(n)
resolved.add(node)
return node
# Find root and comment rules
assert self.__syntax_rule_names <= self.__rules.keys()
if self.__root_rule_name not in self.__syntax_rule_names:
raise SemanticError(
f'Root syntax rule "{self.__root_rule_name}" does not exists.'
)
else:
root_rule = _resolve(self.__rules[self.__root_rule_name])
comment_rule = None
for rule in children:
if rule.rule_name == self.__comment_rule_name:
comment_rule = _resolve(rule)
return root_rule, comment_rule
# Utilities
@property
def skipws(self) -> RegExMatch:
skipws = RegExMatch(r"\s*")
skipws.compile()
return skipws
@staticmethod
def hyphen2underscore(hyphen: str) -> str:
return hyphen.replace("-", "_")
class Parser(ArpeggioParser):
def __init__(
self,
language_def: str,
root_rule_name: str,
comment_rule_name: str = COMMENT_RULE_NAME,
*args: Any,
**kwargs: Any,
) -> None:
"""
Constructs parser from textual PEG definition.
Args:
language_def (str): A string describing language grammar using
PEG notation.
root_rule_name(str): The name of the root rule.
comment_rule_name(str): The name of the rule for comments.
"""
ignore_case = kwargs.get("ignore_case", None)
if ignore_case:
warnings.warn(
(
f"ignore_case is {ignore_case!r}\n"
"Modelica grammar should be Case sensitive."
),
ParserWarning,
)
super().__init__(*args, **kwargs)
self.root_rule_name = root_rule_name
self.comment_rule_name = comment_rule_name
# PEG Abstract Syntax Graph
self.parser_model, self.comments_model = self._from_peg(language_def)
# Comments should be optional and there can be more of them
if self.comments_model:
self.comments_model.root = True
self.comments_model.rule_name = comment_rule_name
# In debug mode export parser model to dot for
# visualization
if self.debug:
from arpeggio.export import PMDOTExporter
root_rule = self.parser_model.rule_name
PMDOTExporter().exportFile(
self.parser_model, "{}_peg_parser_model.dot".format(root_rule)
)
def _parse(self) -> ParseTreeNode:
return self.parser_model.parse(self)
def _from_peg(
self, language_def: str
) -> Tuple[ParsingExpression, NoneOr[ParsingExpression]]:
parser = ArpeggioPythonParser(
grammar, comment, reduce_tree=False, debug=self.debug
)
parse_tree = parser.parse(language_def)
return visit_parse_tree( # type: ignore
parse_tree,
GrammarVisitor(
self.root_rule_name,
self.comment_rule_name,
self.ignore_case,
debug=self.debug,
),
)
| 28.650284
| 78
| 0.590591
|
4a02595cf4f0f270692b3506aced43cba0bac0de
| 90
|
py
|
Python
|
psy/settings/cat.py
|
cegfdb/IRT
|
20fcde3b385bce1644fecab7cdc8bda5beacda03
|
[
"MIT"
] | 169
|
2017-08-29T01:35:49.000Z
|
2022-03-01T05:03:02.000Z
|
psy/settings/cat.py
|
a854367688/pypsy
|
f055fe1f4901b654d99d9a776152e8192e014f5f
|
[
"MIT"
] | 8
|
2017-12-05T05:20:35.000Z
|
2021-10-03T05:40:45.000Z
|
psy/settings/cat.py
|
a854367688/pypsy
|
f055fe1f4901b654d99d9a776152e8192e014f5f
|
[
"MIT"
] | 67
|
2017-09-01T04:18:54.000Z
|
2022-02-24T08:21:18.000Z
|
TRIPLETS_PERMUTATION = [[1, 1, 1], [1, 1, 0], [0, 1, 1], [0, 0, 1], [1, 0, 0], [0, 0, 0]]
| 45
| 89
| 0.422222
|
4a025a03f495be083d727741f9da1468d81aa7ac
| 14,104
|
py
|
Python
|
src/VarDACAE/train/trainer.py
|
scheng1992/Data_Assimilation
|
b4d43895229205ee2cd16b15ee20beccb33b71d6
|
[
"MIT"
] | 1
|
2021-11-25T12:46:48.000Z
|
2021-11-25T12:46:48.000Z
|
src/VarDACAE/train/trainer.py
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | null | null | null |
src/VarDACAE/train/trainer.py
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | 2
|
2021-03-02T13:29:34.000Z
|
2022-03-12T11:01:08.000Z
|
"""Run training for AE"""
import torch
import torch.optim as optim
import numpy as np
import pandas as pd
import pickle
from VarDACAE import ML_utils
from VarDACAE.AEs import Jacobian
from VarDACAE.utils.expdir import init_expdir
from VarDACAE.VarDA.batch_DA import BatchDA
import time
import os
BATCH_MULT = 1
BATCH_UNIT = 16
BATCH = BATCH_MULT * BATCH_UNIT #64
LARGE = 1e30
class TrainAE():
def __init__(self, AE_settings, expdir, batch_sz=BATCH,
model=None, start_epoch=None):
"""Initilaizes the AE training class.
::AE_settings - a settings.config.Config class with the DA settings
::expdir - a directory of form `experiments/<possible_path>` to keep logs
::calc_DA_MAE - boolean. If True, training will evaluate DA Mean Absolute Error
during the training cycle. Note: this is *MUCH* slower
"""
self.settings = AE_settings
err_msg = """AE_settings must be an AE configuration class"""
assert self.settings.COMPRESSION_METHOD == "AE", err_msg
if model is not None: #for retraining
assert start_epoch is not None, "If you are RE-training model you must pass start_epoch"
assert start_epoch >= 0
self.start_epoch = start_epoch
self.model = model
print("Loaded model, ", end="")
else:
self.start_epoch = 0
self.model = ML_utils.load_model_from_settings(AE_settings)
print("Initialized model, ", end="")
print("Number of parameters:", sum(p.numel() for p in self.model.parameters()))
self.batch_sz = batch_sz
self.settings.batch_sz = batch_sz
self.expdir = init_expdir(expdir)
self.settings_fp = self.expdir + "settings.txt"
if self.settings.SAVE == True:
with open(self.settings_fp, "wb") as f:
pickle.dump(self.settings, f)
ML_utils.set_seeds() #set seeds before init model
self.device = ML_utils.get_device()
self.columns = ["epoch","reconstruction_err","DA_MAE", "DA_ratio_improve_MAE", "time_DA(s)", "time_epoch(s)"]
def train(self, num_epochs = 100, learning_rate = 0.002, print_every=5,
test_every=5, num_epochs_cv=0, num_workers=4, small_debug=False,
calc_DA_MAE=False, loss="L2"):
if self.settings.SAVE:
self.test_fp = self.expdir + "{}-{}_test.csv".format(self.start_epoch, self.start_epoch + num_epochs)
self.train_fp = self.expdir + "{}-{}_train.csv".format(self.start_epoch, self.start_epoch + num_epochs)
self.calc_DA_MAE = calc_DA_MAE
self.learning_rate = learning_rate
self.num_epoch = num_epochs #TODO: remove this doubling up
self.print_every = print_every
self.test_every = test_every
self.small_debug = small_debug
settings = self.settings
if settings.SAVE == True:
self.model_dir = self.expdir
else:
self.model_dir = None
self.loader = settings.get_loader()
self.train_loader, self.test_loader = self.loader.get_train_test_loaders(settings,
self.batch_sz, num_workers=num_workers,
small_debug=small_debug)
if loss.upper() == "L2":
self.loss_fn = torch.nn.MSELoss(reduction="sum")
elif loss.upper() == "L1":
self.loss_fn = torch.nn.L1Loss(reduction='sum')
else:
raise ValueError("`loss` must be either `L1` or `L2`")
lr_res = self.__maybe_cross_val_lr(test_every=test_every, num_epochs_cv=num_epochs_cv)
if not isinstance(lr_res, float):
#unpack results from __maybe_cross_val_lr()
self.learning_rate, train_losses, test_losses = lr_res
else:
self.learning_rate = lr_res
train_losses, test_losses = [], []
#if only lr was returned, no model/optimizers were selected. Init:
self.optimizer = optim.Adam(self.model.parameters(), self.learning_rate)
settings.learning_rate = self.learning_rate #for logging
train_losses_, test_losses_ = self.training_loop_AE(self.device,
print_every=print_every, test_every=test_every,
model_dir = self.model_dir)
if train_losses_:
train_losses.extend(train_losses_)
if test_losses_:
test_losses.extend(test_losses_)
#Save results and settings file (so that it can be exactly reproduced)
if settings.SAVE == True:
self.to_csv(train_losses, self.train_fp)
self.to_csv(test_losses, self.test_fp)
with open(self.settings_fp, "wb") as f:
pickle.dump(settings, f)
self.start_epoch = self.end #in case we retrain again with the same TrainAE class
return self.model
def training_loop_AE(self, device=None, print_every=2,
test_every=5, save_every=5, model_dir=None):
"""Runs a torch AE model training loop.
NOTE: Ensure that the loss_fn is in mode "sum"
"""
model = self.model
self.model_dir = model_dir
if device == None:
device = ML_utils.get_device()
self.device = device
ML_utils.set_seeds()
train_losses = []
test_losses = []
self.start = self.num_epochs_cv + self.start_epoch
self.end = self.start_epoch + self.num_epoch
epoch = self.end - 1 #for case where no training occurs
for epoch in range(self.start, self.end):
self.epoch = epoch
train_loss, test_loss = self.train_one_epoch(epoch, print_every, test_every)
train_losses.append(train_loss)
if test_loss:
test_losses.append(test_loss)
if epoch % save_every != 0 and self.model_dir != None:
#Save model (if new model hasn't just been saved)
model_fp_new = "{}{}.pth".format(self.model_dir, epoch)
torch.save(model.state_dict(), model_fp_new)
return train_losses, test_losses
def train_one_epoch(self, epoch, print_every, test_every):
train_loss_res, test_loss_res = None, None
train_loss_res = self.train_loop(epoch, print_every, test_every)
test_loss_res = self.test_loop( epoch, print_every, test_every)
if epoch % test_every == 0 and self.model_dir != None:
model_fp_new = "{}{}.pth".format(self.model_dir, epoch)
torch.save(self.model.state_dict(), model_fp_new)
return train_loss_res, test_loss_res
def train_loop(self, epoch, print_every, test_every):
train_loss = 0
mean_diff = 0
self.model.to(self.device)
t_start = time.time()
for batch_idx, data in enumerate(self.train_loader):
self.model.train()
x, = data
x = x.to(self.device)
self.optimizer.zero_grad()
y = self.model(x)
loss = self.loss_fn(y, x)
loss.backward()
train_loss += loss.item()
mean_diff += torch.abs((x.mean() - y.mean())) * x.shape[0]
self.optimizer.step()
self.model.eval()
train_DA_MAE, train_DA_ratio, train_DA_time = self.maybe_eval_DA_MAE("train")
t_end = time.time()
train_loss_res = (epoch, train_loss / len(self.train_loader.dataset),
train_DA_MAE, train_DA_ratio, train_DA_time, t_end - t_start)
if epoch % print_every == 0 or epoch in [0, self.end - 1]:
out_str = 'epoch [{}/{}], TRAIN: -loss:{:.2f}, av_diff: {:.2f}'
out_str = out_str.format(epoch + 1, self.end,
train_loss / len(self.train_loader.dataset),
mean_diff / len(self.train_loader.dataset) )
if self.calc_DA_MAE and (epoch % test_every == 0):
out_str += ", -DA_%:{:.2f}%".format(train_DA_ratio * 100)
out_str += ", time taken (m): {:.2f}m".format( (t_end - t_start) / 60.)
print(out_str)
return train_loss_res
def test_loop(self, epoch, print_every, test_every):
self.model.eval()
if epoch % test_every == 0 or epoch == self.end - 1:
t_start = time.time()
test_loss = 0
for batch_idx, data in enumerate(self.test_loader):
x_test, = data
x_test = x_test.to(self.device)
y_test = self.model(x_test)
loss = self.loss_fn(y_test, x_test)
test_loss += loss.item()
test_DA_MAE, test_DA_ratio, test_DA_time = self.maybe_eval_DA_MAE("test")
t_end = time.time()
if epoch % print_every == 0 or epoch == self.end - 1:
out_str = "epoch [{}/{}], TEST: -loss:{:.4f}".format(epoch + 1, self.end, test_loss / len(self.test_loader.dataset))
if self.calc_DA_MAE and (epoch % test_every == 0):
out_str += ", -DA_%:{:.2f}%".format(test_DA_ratio * 100)
out_str += ", time taken(m): {:.2f}m".format( (t_end - t_start) / 60.)
print(out_str)
test_loss_res = (epoch, test_loss/len(self.test_loader.dataset),
test_DA_MAE, test_DA_ratio, test_DA_time, t_end - t_start)
return test_loss_res
def __maybe_cross_val_lr(self, test_every, num_epochs_cv = 8):
if not num_epochs_cv:
self.num_epochs_cv = 0
return self.learning_rate
elif self.num_epoch < num_epochs_cv:
self.num_epochs_cv = self.num_epoch
else:
self.num_epochs_cv = num_epochs_cv
mult = 1
if self.settings.BATCH_NORM: #i.e. generally larger learning_rate with BN
mult = 5
mult *= BATCH_MULT #linear multiply by size of batch: https://arxiv.org/abs/1706.02677
lrs_base = [0.0001, 0.0003, 0.001]
lrs = [mult * x for x in lrs_base]
res = []
optimizers = []
for idx, lr in enumerate(lrs):
ML_utils.set_seeds() #set seeds before init model
self.model = ML_utils.load_model_from_settings(self.settings)
self.optimizer = optim.Adam(self.model.parameters(), lr)
test_losses = []
train_losses = []
print("learning rate:", lr)
for epoch in range(self.start_epoch, self.num_epochs_cv + self.start_epoch):
self.epoch = epoch
train, test = self.train_one_epoch(epoch, self.print_every, test_every, self.num_epochs_cv)
if test:
test_losses.append(test)
train_losses.append(train)
df = pd.DataFrame(train_losses, columns = self.columns)
train_final = df.tail(1).reconstruction_err
res.append(train_final.values[0])
optimizers.append(self.optimizer)
#save model if best so far
if res[-1] == min(res):
best_test = test_losses
best_train = train_losses
best_idx = idx
model_fp_new = "{}{}-{}.pth".format(self.model_dir, epoch, lr)
torch.save(self.model.state_dict(), model_fp_new)
best_model = self.model
self.learning_rate = lrs[best_idx] * 0.8
self.optimizer = optimizers[best_idx]
self.model = best_model
test_loss = best_test
train_loss = best_train
return self.learning_rate, train_loss, test_loss
def maybe_eval_DA_MAE(self, test_valid):
"""As the DA procedure is so expensive, only eval on a single state.
By default this is the final element of the test or train set"""
if self.calc_DA_MAE and (self.epoch % self.test_every == 0 or self.epoch == self.end - 1):
if test_valid == "train":
u_c = self.loader.train_X.copy()
np.random.shuffle(u_c) #random shuffle
u_c = u_c[:64]
elif test_valid == "test":
u_c = self.loader.test_X
else:
raise ValueError("Can only evaluate DA_MAE on 'test' or 'train'")
if self.settings.THREE_DIM:
u_c = u_c.squeeze(1)
if self.small_debug:
u_c = u_c[:8]
if self.print_every >= 10:
DA_print = 200
else:
DA_print = self.print_every * 10
csv_fp = "{}{}_{}.csv".format(self.expdir, self.epoch, test_valid)
batcher = BatchDA(self.settings, u_c, csv_fp=csv_fp, AEModel=self.model,
reconstruction=True)
batch_res = batcher.run(DA_print, True)
results = batcher.get_tots(batch_res)
ref_mae = results["ref_MAE_mean"]
da_mae = results["da_MAE_mean"]
#Previously I was using:
ratio_improve_mae = (ref_mae - da_mae)/ref_mae
#but actually we need average ratio improvement:
ratio_improve_mae = results["percent_improvement"] / 100
time = results["time"]
return da_mae, ratio_improve_mae, time
else:
return "NO_CALC", "NO_CALC", "NO_CALC"
def slow_jac_wrapper(self, x):
return Jacobian.accumulated_slow_model(x, self.model, self.DA_data.get("device"))
def __da_data_wipe_some_values(self):
#Now wipe some key attributes to prevent overlap between
#successive calls to maybe_eval_DA_MAE()
self.DA_data["u_c"] = None
self.DA_data["w_0"] = None
self.DA_data["d"] = None
def to_csv(self, np_array, fp):
df = pd.DataFrame(np_array, columns = self.columns)
df.to_csv(fp)
if __name__ == "__main__":
settings = settings.config.ToyAEConfig
main(settings)
| 35.888041
| 132
| 0.58962
|
4a025b8da8c46dcdf6517286541703165331faaa
| 12,114
|
py
|
Python
|
final_mulit_thread.py
|
h1542462994/4wd
|
b936e96b0b91fab261120394eedd61aa5e074820
|
[
"MIT"
] | 1
|
2020-07-08T05:08:21.000Z
|
2020-07-08T05:08:21.000Z
|
final_mulit_thread.py
|
h1542462994/4wd
|
b936e96b0b91fab261120394eedd61aa5e074820
|
[
"MIT"
] | null | null | null |
final_mulit_thread.py
|
h1542462994/4wd
|
b936e96b0b91fab261120394eedd61aa5e074820
|
[
"MIT"
] | null | null | null |
# -*- coding:UTF-8 -*-
import RPi.GPIO as GPIO
import time
from threading import Thread
class Const:
def __init__(self):
# 超声波引脚定义
self.EchoPin = 0
self.TrigPin = 1
# 小车电机引脚定义
self.IN1 = 20
self.IN2 = 21
self.IN3 = 19
self.IN4 = 26
self.ENA = 16
self.ENB = 13
# RGB三色灯引脚定义
self.LED_R = 22
self.LED_G = 27
self.LED_B = 24
# 小车按键定义
self.KEY = 8
# 循迹红外引脚定义
# TrackSensorLeftPin1 TrackSensorLeft/Pin2 TrackSensorRightPin1 TrackSensorRightPin2
# 3 5 4 18
self.TrackSensorLeftPin1 = 3 # 定义左边第一个循迹红外传感器引脚为3口
self.TrackSensorLeftPin2 = 5 # 定义左边第二个循迹红外传感器引脚为5口
self.TrackSensorRightPin1 = 4 # 定义右边第一个循迹红外传感器引脚为4口
self.TrackSensorRightPin2 = 18 # 定义右边第二个循迹红外传感器引脚为18口
self.SPEED_FAST = 30 # 小车的快速
self.SPEED_MIDDLE = 25 # 小车的中速
self.SPEED_SLOW = 20 # 小车的慢速
self.SPEED_VERY_SLOW = 20
self.RUN_SLEEP_CATCH = 0.005
self.LIGHT_SLEEP = 0.01
self.TRACK_SENSOR_SLEEP = 0.01
self.SONIC_SLEEP = 0.01
self.SONIC_SLEEP_SPACE = 10
self.SONIC_DISTANCE = 10
self.DURING = 2
class ShareState:
def __init__(self):
self.STOP = False
self.car_state = 1
self.track_sensor = [1, 1, 1, 1]
self.track_sensor_old = [1, 1, 1, 1]
self.distance = 100
self.distance_old = 100
self.EVENT_TRACK_SENSOR = None
self.EVENT_SONIC = None
self.THREAD_LIGHT = LightThread()
self.TRACK_SENSOR_THREAD = TrackSensorThread()
self.SONIC_THREAD = SonicThread()
self.CAR_THREAD = CarThread()
self.time_old = 0
self.ENABLE_SONIC = 1
self.R = 0
self.G = 0
self.B = 0
def default_event_track_sensor(self):
print("track_sensor changed:" + ",".join(map(str, shareState.track_sensor)))
print("carstate" + str(self.car_state))
def default_event_sonic(self):
print("distance: " + str(shareState.distance))
def set_time(self, time):
if time - self.time_old > const.SONIC_SLEEP_SPACE:
self.ENABLE_SONIC = 1
self.time_old = time
else:
self.ENABLE_SONIC = 0
def is_in_track(self, track_sensor):
if track_sensor[1] == 0 or track_sensor[2] == 0:
return True
else:
return False
def set_color(self, r, g, b):
self.R = r
self.G = g
self.B = b
def set_track_sensor(self, track_sensor):
if track_sensor != self.track_sensor:
self.track_sensor_old = self.track_sensor
self.track_sensor = track_sensor
if self.is_in_track(self.track_sensor):
self.car_state = 0
elif self.is_in_track(self.track_sensor_old):
self.car_state = 2
else:
self.car_state = 1
if self.EVENT_TRACK_SENSOR is not None:
self.EVENT_TRACK_SENSOR()
else:
self.default_event_track_sensor()
def set_distance(self, distance):
self.distance_old = self.distance
self.distance = distance
if self.EVENT_SONIC is not None:
self.EVENT_SONIC()
else:
self.default_event_sonic()
class LightThread(Thread):
def __init__(self):
# RGB三色灯设置为输出模式
super(LightThread, self).__init__()
# 设置GPIO口为BCM编码方式
GPIO.setmode(GPIO.BCM)
# 忽略警告信息
GPIO.setwarnings(False)
GPIO.setup(const.LED_R, GPIO.OUT)
GPIO.setup(const.LED_G, GPIO.OUT)
GPIO.setup(const.LED_B, GPIO.OUT)
def run(self):
while not shareState.STOP:
self.__init__()
try:
if shareState.R == 0:
GPIO.output(const.LED_R, GPIO.LOW)
else:
GPIO.output(const.LED_R, GPIO.HIGH)
if shareState.G == 0:
GPIO.output(const.LED_G, GPIO.LOW)
else:
GPIO.output(const.LED_G, GPIO.HIGH)
if shareState.B == 0:
GPIO.output(const.LED_B, GPIO.LOW)
else:
GPIO.output(const.LED_B, GPIO.HIGH)
time.sleep(const.LIGHT_SLEEP)
except RuntimeError:
pass
class TrackSensorThread(Thread):
def __init__(self):
super(TrackSensorThread, self).__init__()
GPIO.setmode(GPIO.BCM)
# 忽略警告信息
GPIO.setwarnings(False)
GPIO.setup(const.TrackSensorLeftPin1, GPIO.IN)
GPIO.setup(const.TrackSensorLeftPin2, GPIO.IN)
GPIO.setup(const.TrackSensorRightPin1, GPIO.IN)
GPIO.setup(const.TrackSensorRightPin2, GPIO.IN)
def track_sensor(self):
return [
1 - GPIO.input(const.TrackSensorLeftPin1),
1 - GPIO.input(const.TrackSensorLeftPin2),
1 - GPIO.input(const.TrackSensorRightPin1),
1 - GPIO.input(const.TrackSensorRightPin2),
]
def run(self):
while not shareState.STOP:
self.__init__()
try:
time.sleep(const.TRACK_SENSOR_SLEEP)
shareState.set_track_sensor(self.track_sensor())
except RuntimeError:
pass
class SonicThread(Thread):
def __init__(self):
super(SonicThread, self).__init__()
GPIO.setmode(GPIO.BCM)
# 忽略警告信息
GPIO.setwarnings(False)
GPIO.setup(const.EchoPin, GPIO.IN)
GPIO.setup(const.TrigPin, GPIO.OUT)
@staticmethod
def get_distance():
GPIO.output(const.TrigPin, GPIO.HIGH)
time.sleep(0.000015)
GPIO.output(const.TrigPin, GPIO.LOW)
while not GPIO.input(const.EchoPin):
pass
t1 = time.time()
while GPIO.input(const.EchoPin):
pass
t2 = time.time()
time.sleep(const.TRACK_SENSOR_SLEEP)
value = ((t2 - t1) * 340 / 2) * 100
return value
def run(self):
while not shareState.STOP:
try:
self.__init__()
distances = []
for i in range(0, 3):
distances.append(self.get_distance())
value = min(distances)
shareState.set_distance(value)
except RuntimeError:
pass
class CarThread(Thread):
def __init__(self):
global pwm_ENA
global pwm_ENB
# 设置GPIO口为BCM编码方式
super(CarThread, self).__init__()
GPIO.setmode(GPIO.BCM)
# 忽略警告信息
GPIO.setwarnings(False)
GPIO.setup(const.ENA, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(const.IN1, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(const.IN2, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(const.ENB, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(const.IN3, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(const.IN4, GPIO.OUT, initial=GPIO.LOW)
# 设置pwm引脚和频率为2000hz
pwm_ENA = GPIO.PWM(const.ENA, 2000)
pwm_ENB = GPIO.PWM(const.ENB, 2000)
pwm_ENA.start(0)
pwm_ENB.start(0)
# 小车前进
def _run(self, leftspeed, rightspeed):
GPIO.output(const.IN1, GPIO.HIGH)
GPIO.output(const.IN2, GPIO.LOW)
GPIO.output(const.IN3, GPIO.HIGH)
GPIO.output(const.IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(leftspeed)
pwm_ENB.ChangeDutyCycle(rightspeed)
# 小车后退
def _back(self, leftspeed, rightspeed):
GPIO.output(const.IN1, GPIO.LOW)
GPIO.output(const.IN2, GPIO.HIGH)
GPIO.output(const.IN3, GPIO.LOW)
GPIO.output(const.IN4, GPIO.HIGH)
pwm_ENA.ChangeDutyCycle(leftspeed)
pwm_ENB.ChangeDutyCycle(rightspeed)
# 小车左转
def _left(self, leftspeed, rightspeed):
GPIO.output(const.IN1, GPIO.LOW)
GPIO.output(const.IN2, GPIO.LOW)
GPIO.output(const.IN3, GPIO.HIGH)
GPIO.output(const.IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(leftspeed)
pwm_ENB.ChangeDutyCycle(rightspeed)
# 小车右转
def _right(self, leftspeed, rightspeed):
GPIO.output(const.IN1, GPIO.HIGH)
GPIO.output(const.IN2, GPIO.LOW)
GPIO.output(const.IN3, GPIO.LOW)
GPIO.output(const.IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(leftspeed)
pwm_ENB.ChangeDutyCycle(rightspeed)
# 小车原地左转
def _spin_left(self, leftspeed, rightspeed):
GPIO.output(const.IN1, GPIO.LOW)
GPIO.output(const.IN2, GPIO.HIGH)
GPIO.output(const.IN3, GPIO.HIGH)
GPIO.output(const.IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(leftspeed)
pwm_ENB.ChangeDutyCycle(rightspeed)
# 小车原地右转
def _spin_right(self, leftspeed, rightspeed):
GPIO.output(const.IN1, GPIO.HIGH)
GPIO.output(const.IN2, GPIO.LOW)
GPIO.output(const.IN3, GPIO.LOW)
GPIO.output(const.IN4, GPIO.HIGH)
pwm_ENA.ChangeDutyCycle(leftspeed)
pwm_ENB.ChangeDutyCycle(rightspeed)
# 小车停止
def _brake(self):
GPIO.output(const.IN1, GPIO.LOW)
GPIO.output(const.IN2, GPIO.LOW)
GPIO.output(const.IN3, GPIO.LOW)
GPIO.output(const.IN4, GPIO.LOW)
def run(self):
while not shareState.STOP:
try:
if shareState.distance < const.SONIC_DISTANCE:
shareState.set_time(time.time())
if shareState.ENABLE_SONIC:
self._brake()
start = time.time()
shareState.set_color(1, 0, 0)
while True:
end = time.time()
if end - start > const.DURING:
break
shareState.set_color(0, 0, 0)
# X 0 1 X 左小弯
if shareState.track_sensor[1] == 0 and shareState.track_sensor[2] == 1:
self._left(0, const.SPEED_MIDDLE)
# X 1 0 X 右小弯
elif shareState.track_sensor[1] == 1 and shareState.track_sensor[2] == 0:
self._right(const.SPEED_MIDDLE, 0)
# X 0 0 X 直行
elif shareState.track_sensor[1] == 0 and shareState.track_sensor[2] == 0:
self._run(const.SPEED_FAST, const.SPEED_FAST)
# X X X 0 (old) 右直转
if shareState.track_sensor_old[3] == 0:
self._spin_right(const.SPEED_SLOW, const.SPEED_VERY_SLOW)
# 0 X X X (old) 左直转
# 1 X X 1 (old) 倒车
else:
self._spin_left(const.SPEED_VERY_SLOW, const.SPEED_SLOW)
# else 其他情况不变
except RuntimeError:
pass
pwm_ENA.stop()
pwm_ENB.stop()
class Environ:
def __init__(self):
# 设置GPIO口为BCM编码方式
GPIO.setmode(GPIO.BCM)
# 忽略警告信息
GPIO.setwarnings(False)
GPIO.setup(const.KEY, GPIO.IN)
def _key_scan(self):
while GPIO.input(const.KEY):
pass
while not GPIO.input(const.KEY):
time.sleep(0.01)
if not GPIO.input(const.KEY):
time.sleep(0.01)
while not GPIO.input(const.KEY):
pass
def task(self):
shareState.THREAD_LIGHT.start()
shareState.TRACK_SENSOR_THREAD.start()
shareState.SONIC_THREAD.start()
shareState.CAR_THREAD.run()
def stop(self):
shareState.STOP = True
def run(self):
self._key_scan()
try:
self.task()
while True:
print("doing main thread task")
time.sleep(10)
except KeyboardInterrupt:
pass
self.stop()
GPIO.cleanup()
const = Const()
shareState = ShareState()
if __name__ == '__main__':
environ = Environ()
environ.run()
| 30.824427
| 92
| 0.560013
|
4a025bb27d33a73fb78047e7a08e1247d8f53558
| 742
|
py
|
Python
|
packages/mbed-greentea/mbed_greentea/mbed_coverage_api.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 29
|
2018-11-30T19:45:22.000Z
|
2022-03-29T17:02:16.000Z
|
packages/mbed-greentea/mbed_greentea/mbed_coverage_api.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 160
|
2018-11-30T21:55:52.000Z
|
2022-01-18T10:58:09.000Z
|
packages/mbed-greentea/mbed_greentea/mbed_coverage_api.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 73
|
2018-11-30T21:34:41.000Z
|
2021-10-02T05:51:40.000Z
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
from mbed_os_tools.test.mbed_coverage_api import (
coverage_pack_hex_payload,
coverage_dump_file,
)
| 30.916667
| 72
| 0.784367
|
4a025c007445fec590274df61db6a1a334fab314
| 2,176
|
py
|
Python
|
mockito/__init__.py
|
kaste/mockito-python
|
27e5587ac683262aa883bcb48130ed1a974df517
|
[
"MIT"
] | 90
|
2016-04-24T08:10:54.000Z
|
2022-03-18T18:04:29.000Z
|
mockito/__init__.py
|
kaste/mockito-python
|
27e5587ac683262aa883bcb48130ed1a974df517
|
[
"MIT"
] | 39
|
2016-05-19T16:16:07.000Z
|
2021-12-10T16:57:39.000Z
|
mockito/__init__.py
|
kaste/mockito-python
|
27e5587ac683262aa883bcb48130ed1a974df517
|
[
"MIT"
] | 17
|
2016-05-19T10:29:46.000Z
|
2022-03-14T15:53:29.000Z
|
# Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Mockito is a Test Spy framework.'''
from .mockito import (
when,
when2,
patch,
expect,
unstub,
forget_invocations,
verify,
verifyNoMoreInteractions,
verifyZeroInteractions,
verifyNoUnwantedInteractions,
verifyStubbedInvocationsAreUsed,
ArgumentError,
)
from . import inorder
from .spying import spy, spy2
from .mocking import mock
from .verification import VerificationError
from .matchers import * # noqa: F401 F403
from .matchers import any, contains, times
from .verification import never
__version__ = '1.3.1-dev'
__all__ = [
'mock',
'spy',
'spy2',
'when',
'when2',
'patch',
'expect',
'verify',
'verifyNoMoreInteractions',
'verifyZeroInteractions',
'verifyNoUnwantedInteractions',
'verifyStubbedInvocationsAreUsed',
'inorder',
'unstub',
'forget_invocations',
'VerificationError',
'ArgumentError',
'any', # compatibility
'contains', # compatibility
'never', # compatibility
'times', # deprecated
]
| 30.222222
| 79
| 0.726563
|
4a025c3b1802173ac8b631919f97ee44f47fe148
| 3,753
|
py
|
Python
|
caffe2/contrib/aten/aten_test.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
caffe2/contrib/aten/aten_test.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
caffe2/contrib/aten/aten_test.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
from caffe2.python import core, dyndep
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestATen(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=2), **hu.gcs)
def test_add(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
def ref(X, Y):
return [X + Y]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=2, dtype=np.float16), **hu.gcs_gpu_only)
def test_add_half(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
def ref(X, Y):
return [X + Y]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_pow(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="pow", exponent=2.0)
def ref(X):
return [np.square(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(x=st.integers(min_value=2, max_value=8), **hu.gcs)
def test_sort(self, x, gc, dc):
inputs = [np.random.permutation(x)]
op = core.CreateOperator(
"ATen",
["S"],
["Z", "I"],
operator="sort")
def ref(X):
return [np.sort(X), np.argsort(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_sum(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="sum")
def ref(X):
return [np.sum(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(**hu.gcs)
def test_index_uint8(self, gc, dc):
# Indexing with uint8 is deprecated, but we need to provide backward compatibility for some old models exported through ONNX
op = core.CreateOperator(
"ATen",
['self', 'mask'],
["Z"],
operator="index")
def ref(self, mask):
return (self[mask.astype(np.bool_)],)
tensor = np.random.randn(2, 3, 4).astype(np.float32)
mask = np.array([[1, 0, 0], [1, 1, 0]]).astype(np.uint8)
self.assertReferenceChecks(gc, op, [tensor, mask], ref)
@given(**hu.gcs)
def test_index_put(self, gc, dc):
op = core.CreateOperator(
"ATen",
['self', 'indices', 'values'],
["Z"],
operator="index_put")
def ref(self, indices, values):
self[indices] = values
return (self,)
tensor = np.random.randn(3, 3).astype(np.float32)
mask = np.array([[True, True, True], [True, False, False], [True, True, False]])
values = np.random.randn(6).astype(np.float32)
self.assertReferenceChecks(gc, op, [tensor, mask, values], ref)
@given(**hu.gcs)
def test_unique(self, gc, dc):
op = core.CreateOperator(
"ATen",
['self'],
["output"],
sorted=True,
return_inverse=True,
# return_counts=False,
operator="_unique")
def ref(self):
index, _ = np.unique(self, return_index=False, return_inverse=True, return_counts=False)
return (index,)
tensor = np.array([1, 2, 6, 4, 2, 3, 2])
print(ref(tensor))
self.assertReferenceChecks(gc, op, [tensor], ref)
if __name__ == "__main__":
import unittest
unittest.main()
| 27.394161
| 132
| 0.526779
|
4a025c5e04402ff9c47954f12d3a5b74cdc1e254
| 437
|
py
|
Python
|
shelephant/external.py
|
tdegeus/shelephant
|
35727dce7ac88c1445d17642cd0af2da7714ac43
|
[
"MIT"
] | null | null | null |
shelephant/external.py
|
tdegeus/shelephant
|
35727dce7ac88c1445d17642cd0af2da7714ac43
|
[
"MIT"
] | 51
|
2020-11-12T15:02:03.000Z
|
2022-03-25T14:26:03.000Z
|
shelephant/external.py
|
tdegeus/shelephant
|
35727dce7ac88c1445d17642cd0af2da7714ac43
|
[
"MIT"
] | null | null | null |
import subprocess
def exec_cmd(cmd, verbose=False):
r"""
Run command, optionally verbose command and its output, and return output.
:type cmd: str
:param cmd: The command to run.
:type verbose: bool
:param verbose: Print command and its output.
"""
if verbose:
print(cmd)
ret = subprocess.check_output(cmd, shell=True).decode("utf-8")
if verbose:
print(ret)
return ret
| 18.208333
| 78
| 0.636156
|
4a025c7d0428bd87bbd98c6de54b424adde1e13b
| 44,160
|
py
|
Python
|
Python/libraries/recognizers-date-time/recognizers_date_time/resources/spanish_date_time.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | null | null | null |
Python/libraries/recognizers-date-time/recognizers_date_time/resources/spanish_date_time.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | null | null | null |
Python/libraries/recognizers-date-time/recognizers_date_time/resources/spanish_date_time.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# <auto-generated>
# This code was generated by a tool.
# Changes to this file may cause incorrect behavior and will be lost if
# the code is regenerated.
# </auto-generated>
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
from .base_date_time import BaseDateTime
# pylint: disable=line-too-long
class SpanishDateTime:
LangMarker = 'Spa'
CheckBothBeforeAfter = False
TillRegex = f'(?<till>\\b(hasta|hacia|al?)\\b(\\s+(el|la(s)?)\\b)?|{BaseDateTime.RangeConnectorSymbolRegex})'
StrictTillRegex = f'(?<till>\\b(hasta|hacia|al?)(\\s+(el|la(s)?))?\\b|{BaseDateTime.RangeConnectorSymbolRegex}(?!\\s*[qt][1-4](?!(\\s+de|\\s*,\\s*))))'
RangeConnectorRegex = f'(?<and>\\b(y\\s*(el|(la(s)?)?))\\b|{BaseDateTime.RangeConnectorSymbolRegex})'
WrittenDayRegex = f'(?<day>uno|dos|tres|cuatro|cinco|seis|siete|ocho|nueve|diez|once|doce|trece|catorce|quince|dieciséis|diecisiete|dieciocho|diecinueve|veinte|veintiuno|veintidós|veintitrés|veinticuatro|veinticinco|veintiséis|veintisiete|veintiocho|veintinueve|treinta(\\s+y\\s+uno)?)'
DayRegex = f'\\b(?<day>01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|1|20|21|22|23|24|25|26|27|28|29|2|30|31|3|4|5|6|7|8|9)(?:\\.[º°])?(?=\\b|t)'
MonthNumRegex = f'(?<month>1[0-2]|(0)?[1-9])\\b'
OclockRegex = f'(?<oclock>en\\s+punto)'
AmDescRegex = f'({BaseDateTime.BaseAmDescRegex})'
PmDescRegex = f'({BaseDateTime.BasePmDescRegex})'
AmPmDescRegex = f'({BaseDateTime.BaseAmPmDescRegex})'
DescRegex = f'(?<desc>({AmDescRegex}|{PmDescRegex}))'
OfPrepositionRegex = f'(\\bd(o|al?|el?)\\b)'
AfterNextSuffixRegex = f'\\b(despu[eé]s\\s+de\\s+la\\s+pr[oó]xima)\\b'
NextSuffixRegex = f'\\b(que\\s+viene|pr[oó]xim[oa]|siguiente)\\b'
PreviousSuffixRegex = f'\\b(pasad[ao]|anterior(?!\\s+(al?|del?)\\b))\\b'
RelativeSuffixRegex = f'({AfterNextSuffixRegex}|{NextSuffixRegex}|{PreviousSuffixRegex})'
RangePrefixRegex = f'((de(l|sde)?|entre)(\\s+la(s)?)?)'
TwoDigitYearRegex = f'\\b(?<![$])(?<year>([0-9]\\d))(?!(\\s*((\\:\\d)|{AmDescRegex}|{PmDescRegex}|\\.\\d))|\\.?[º°ª])\\b'
RelativeRegex = f'(?<rela>est[ae]|pr[oó]xim[oa]|siguiente|(([uú]ltim|pasad)(o|as|os)))\\b'
StrictRelativeRegex = f'(?<rela>est[ae]|pr[oó]xim[oa]|siguiente|(([uú]ltim|pasad)(o|as|os)))\\b'
WrittenOneToNineRegex = f'(un[ao]?|dos|tres|cuatro|cinco|seis|siete|ocho|nueve)'
WrittenOneHundredToNineHundredRegex = f'(doscient[oa]s|trescient[oa]s|cuatrocient[ao]s|quinient[ao]s|seiscient[ao]s|setecient[ao]s|ochocient[ao]s|novecient[ao]s|cien(to)?)'
WrittenOneToNinetyNineRegex = f'(((treinta|cuarenta|cincuenta|sesenta|setenta|ochenta|noventa)(\\s+y\\s+{WrittenOneToNineRegex})?)|diez|once|doce|trece|catorce|quince|dieciséis|dieciseis|diecisiete|dieciocho|diecinueve|veinte|veintiuno|veintiún|veintiun|veintiuna|veintidós|veintidos|veintitrés|veintitres|veinticuatro|veinticinco|veintiséis|veintisiete|veintiocho|veintinueve|un[ao]?|dos|tres|cuatro|cinco|seis|siete|ocho|nueve)'
FullTextYearRegex = f'\\b(?<fullyear>((dos\\s+)?mil)(\\s+{WrittenOneHundredToNineHundredRegex})?(\\s+{WrittenOneToNinetyNineRegex})?)'
YearRegex = f'({BaseDateTime.FourDigitYearRegex}|{FullTextYearRegex})'
RelativeMonthRegex = f'(?<relmonth>(de\\s+)?((este|pr[oó]ximo|([uú]ltim(o|as|os)))\\s+mes)|(del\\s+)?(mes\\s+((que\\s+viene)|pasado)))\\b'
MonthRegex = f'\\b(?<month>abr(\\.|(il)?\\b)|ago(\\.|(sto)?\\b)|dic(\\.|(iembre)?\\b)|feb(\\.|(rero)?\\b)|ene(\\.|(ro)?\\b)|ju[ln](\\.|(io)?\\b)|mar(\\.|(zo)?\\b)|may(\\.|(o)?\\b)|nov(\\.|(iembre)?\\b)|oct(\\.|(ubre)?\\b)|sep?t(\\.|(iembre)?\\b)|sep(\\.|\\b))'
MonthSuffixRegex = f'(?<msuf>((del?|la|el)\\s+)?({RelativeMonthRegex}|{MonthRegex}))'
DateUnitRegex = f'(?<unit>años?|mes(es)?|semanas?|d[ií]as?(?<business>\\s+(h[aá]biles|laborales))?)\\b'
PastRegex = f'(?<past>\\b(pasad(a|o)(s)?|[uú]ltim[oa](s)?|anterior(es)?|previo(s)?)\\b)'
FutureRegex = f'\\b(siguiente(s)?|pr[oó]xim[oa](s)?|dentro\\s+de|en)\\b'
SimpleCasesRegex = f'\\b((desde(\\s+el)?|entre|del?)\\s+)?({DayRegex})\\s*{TillRegex}\\s*({DayRegex})\\s+{MonthSuffixRegex}((\\s+|\\s*,\\s*)((en|del?)\\s+)?{YearRegex})?\\b'
MonthFrontSimpleCasesRegex = f'\\b{MonthSuffixRegex}\\s+((desde(\\s+el)?|entre|del)\\s+)?({DayRegex})\\s*{TillRegex}\\s*({DayRegex})((\\s+|\\s*,\\s*)((en|del?)\\s+)?{YearRegex})?\\b'
MonthFrontBetweenRegex = f'\\b{MonthSuffixRegex}\\s+((entre(\\s+el)?)\\s+)({DayRegex})\\s*{RangeConnectorRegex}\\s*({DayRegex})((\\s+|\\s*,\\s*)((en|del?)\\s+)?{YearRegex})?\\b'
DayBetweenRegex = f'\\b((entre(\\s+el)?)\\s+)({DayRegex})\\s*{RangeConnectorRegex}\\s*({DayRegex})\\s+{MonthSuffixRegex}((\\s+|\\s*,\\s*)((en|del?)\\s+)?{YearRegex})?\\b'
SpecialYearPrefixes = f'((del\\s+)?calend[aá]rio|(?<special>fiscal|escolar))'
OneWordPeriodRegex = f'\\b(((((la|el)\\s+)?mes\\s+(({OfPrepositionRegex})\\s+)?)|((pr[oó]xim[oa]?|est[ea]|[uú]ltim[oa]?)\\s+))?({MonthRegex})|(((la|el)\\s+)?((({RelativeRegex}\\s+)({DateUnitRegex}|(fin\\s+de\\s+)?semana|finde)(\\s+{RelativeSuffixRegex})?)|{DateUnitRegex}(\\s+{RelativeSuffixRegex}))|va\\s+de\\s+{DateUnitRegex}|((año|mes)|((el\\s+)?fin\\s+de\\s+)?semana|(el\\s+)?finde))\\b)'
MonthWithYearRegex = f'\\b(((pr[oó]xim[oa](s)?|est?[ae]|[uú]ltim[oa]?)\\s+)?({MonthRegex})(\\s+|(\\s*[,-]\\s*))((de(l|\\s+la)?|en)\\s+)?({YearRegex}|(?<order>pr[oó]ximo(s)?|[uú]ltimo?|este)\\s+año))\\b'
MonthNumWithYearRegex = f'\\b(({YearRegex}(\\s*?)[/\\-\\.~](\\s*?){MonthNumRegex})|({MonthNumRegex}(\\s*?)[/\\-\\.~](\\s*?){YearRegex}))\\b'
WeekOfMonthRegex = f'(?<wom>(la\\s+)?(?<cardinal>primera?|1ra|segunda|2da|tercera?|3ra|cuarta|4ta|quinta|5ta|([12345](\\.)?ª)|[uú]ltima)\\s+semana\\s+{MonthSuffixRegex}((\\s+de)?\\s+({BaseDateTime.FourDigitYearRegex}|{RelativeRegex}\\s+año))?)\\b'
WeekOfYearRegex = f'(?<woy>(la\\s+)?(?<cardinal>primera?|1ra|segunda|2da|tercera?|3ra|cuarta|4ta|quinta|5ta|[uú]ltima?|([12345]ª))\\s+semana(\\s+(del?|en))?\\s+({YearRegex}|(?<order>pr[oó]ximo|[uú]ltimo|este)\\s+año))'
FollowedDateUnit = f'^\\s*{DateUnitRegex}'
NumberCombinedWithDateUnit = f'\\b(?<num>\\d+(\\.\\d*)?){DateUnitRegex}'
QuarterTermRegex = f'\\b((?<cardinal>primer|1er|segundo|2do|tercer|3ro|4to|([1234](\\.)?º))\\s+(trimestre|cuarto)|[tq](?<number>[1-4]))\\b'
RelativeQuarterTermRegex = f'\\b((?<orderQuarter>{StrictRelativeRegex})\\s+(trimestre|cuarto)|(trimestre|cuarto)\\s+(?<orderQuarter>(actual|pr[oó]ximo|siguiente|pasado|anterior)))\\b'
QuarterRegex = f'(el\\s+)?{QuarterTermRegex}((\\s+(del?\\s+)?|\\s*[,-]\\s*)({YearRegex}|(?<order>pr[oó]ximo(s)?|[uú]ltimo?|este)\\s+a[ñn]o|a[ñn]o(\\s+{RelativeSuffixRegex}))|\\s+del\\s+a[ñn]o)?|{RelativeQuarterTermRegex}'
QuarterRegexYearFront = f'({YearRegex}|(?<order>pr[oó]ximo(s)?|[uú]ltimo?|este)\\s+a[ñn]o)(?:\\s*-\\s*|\\s+(el\\s+)?)?{QuarterTermRegex}'
AllHalfYearRegex = f'\\b(?<cardinal>primer|1er|segundo|2do|[12](\\.)?º)\\s+semestre(\\s+(de\\s+)?({YearRegex}|{RelativeRegex}\\s+año))?\\b'
EarlyPrefixRegex = f'\\b(?<EarlyPrefix>(?<RelEarly>m[aá]s\\s+temprano(\\s+(del?|en))?)|((comienzos?|inicios?|principios?|temprano)\\s+({OfPrepositionRegex}(\\s+d[ií]a)?)))(\\s+(el|las?|los?))?\\b'
MidPrefixRegex = f'\\b(?<MidPrefix>(media[dn]os\\s+({OfPrepositionRegex})))(\\s+(el|las?|los?))?\\b'
LaterPrefixRegex = f'\\b(?<LatePrefix>((fin(al)?(es)?|[uú]ltimos)\\s+({OfPrepositionRegex}))|(?<RelLate>m[aá]s\\s+tarde(\\s+(del?|en))?))(\\s+(el|las?|los?))?\\b'
PrefixPeriodRegex = f'({EarlyPrefixRegex}|{MidPrefixRegex}|{LaterPrefixRegex})'
PrefixDayRegex = f'\\b((?<EarlyPrefix>(comienzos?|inicios?|principios?|temprano))|(?<MidPrefix>mediados)|(?<LatePrefix>(fin((al)?es)?|m[aá]s\\s+tarde)))(\\s+(en|{OfPrepositionRegex}))?(\\s+([ae]l)(\\s+d[ií]a)?)?$'
CenturySuffixRegex = f'(^siglo)\\b'
SeasonRegex = f'\\b(?<season>(([uú]ltim[oa]|est[ea]|el|la|(pr[oó]xim[oa]s?|siguiente)|{PrefixPeriodRegex})\\s+)?(?<seas>primavera|verano|otoño|invierno)((\\s+(del?|en)|\\s*,\\s*)?\\s+({YearRegex}|(?<order>pr[oó]ximo|[uú]ltimo|este)\\s+año))?)\\b'
WhichWeekRegex = f'\\b(semana)(\\s*)(?<number>5[0-3]|[1-4]\\d|0?[1-9])\\b'
WeekOfRegex = f'((del?|el|la)\\s+)?(semana)(\\s*)({OfPrepositionRegex}|que\\s+(inicia|comienza)\\s+el|(que\\s+va|a\\s+partir)\\s+del)'
MonthOfRegex = f'(mes)(\\s+)({OfPrepositionRegex})'
RangeUnitRegex = f'\\b(?<unit>años?|mes(es)?|semanas?)\\b'
BeforeAfterRegex = f'^[.]'
InConnectorRegex = f'\\b(en)\\b'
SinceYearSuffixRegex = f'^[.]'
WithinNextPrefixRegex = f'\\b(dentro\\s+de)\\b'
TodayNowRegex = f'\\b(hoy|ahora|este entonces)\\b'
FromRegex = f'((\\bde(sde)?)(\\s*la(s)?)?)$'
BetweenRegex = f'(\\bentre\\s*(la(s)?)?)'
WeekDayRegex = f'\\b(?<weekday>(domingos?|lunes|martes|mi[eé]rcoles|jueves|viernes|s[aá]bados?)\\b|(lun|mar|mi[eé]|jue|vie|s[aá]b|dom|lu|ma|mi|ju|vi|s[aá]|do)(\\.|\\b))(?!ñ)'
OnRegex = f'((?<=\\b(e[ln])\\s+)|(\\be[ln]\\s+d[ií]a\\s+))({DayRegex}s?)(?![.,]\\d)\\b'
RelaxedOnRegex = f'(?<=\\b(en|d?el)\\s+)((?<day>10|11|12|13|14|15|16|17|18|19|1st|20|21|22|23|24|25|26|27|28|29|2|30|31|3|4|5|6|7|8|9)s?)(?![.,]\\d)\\b'
SpecialDayRegex = f'\\b((el\\s+)?(d[ií]a\\s+antes\\s+de\\s+ayer|anteayer)|((el\\s+)?d[ií]a\\s+(despu[eé]s\\s+)?de\\s+mañana|pasado\\s+mañana)|(el\\s)?d[ií]a\\s+(siguiente|anterior)|(el\\s)?pr[oó]ximo\\s+d[ií]a|(el\\s+)?[uú]ltimo\\s+d[ií]a|(d)?el\\s+d[ií]a(?!\\s+d)|ayer|mañana|hoy)\\b'
SpecialDayWithNumRegex = f'^[.]'
FlexibleDayRegex = f'(?<DayOfMonth>([A-Za-z]+\\s)?({WrittenDayRegex}|{DayRegex}))'
ForTheRegex = f'\\b((((?<=para\\s+el\\s+){FlexibleDayRegex})|((?<!(\\b{MonthRegex},?|\\bpara)\\s+(el\\s+)|{WeekDayRegex}\\s+)((?<=(e[ln]\\s+))|(\\be[ln]\\s+d[ií]a\\s+)){FlexibleDayRegex}))(?<end>\\s*(,|\\.(?![º°ª])|!|\\?|-|$))(?!\\d))'
WeekDayAndDayOfMonthRegex = f'\\b{WeekDayRegex}\\s+((el\\s+(d[ií]a\\s+)?){FlexibleDayRegex})\\b'
WeekDayAndDayRegex = f'\\b{WeekDayRegex}\\s+({DayRegex}|{WrittenDayRegex})(?!([-:/]|\\.\\d|(\\s+({AmDescRegex}|{PmDescRegex}|{OclockRegex}))))\\b'
WeekDayOfMonthRegex = f'(?<wom>(el\\s+)?(?<cardinal>primera?|1era?|segund[ao]|2d[ao]|tercera?|3era?|cuart[ao]|4t[ao]|quint[ao]|5t[ao]|((1|2|3|4|5)(\\.)?[ºª])|[uú]ltim[ao])\\s+(semana\\s+{MonthSuffixRegex}\\s+el\\s+{WeekDayRegex}|{WeekDayRegex}\\s+{MonthSuffixRegex}))'
RelativeWeekDayRegex = f'^[.]'
AmbiguousRangeModifierPrefix = f'^[.]'
NumberEndingPattern = f'^[.]'
DateTokenPrefix = 'en '
TimeTokenPrefix = 'a las '
TokenBeforeDate = 'el '
TokenBeforeTime = 'a las '
HalfTokenRegex = f'^((y\\s+)?media)'
QuarterTokenRegex = f'^((y\\s+)?cuarto|(?<neg>menos\\s+cuarto))'
PastTokenRegex = f'\\b(pasad[ao]s(\\s+(de\\s+)?las)?)$'
ToTokenRegex = f'\\b((para|antes)(\\s+(de\\s+)?las?)|(?<neg>^menos))$'
SpecialDateRegex = f'(?<=\\b(en)\\s+el\\s+){DayRegex}\\b'
OfMonthRegex = f'^\\s*((d[ií]a\\s+)?d[eo]\\s+)?{MonthSuffixRegex}'
MonthEndRegex = f'({MonthRegex}\\s*(el)?\\s*$)'
WeekDayEnd = f'{WeekDayRegex}\\s*,?\\s*$'
WeekDayStart = f'^[\\.]'
DateYearRegex = f'(?<year>{YearRegex}|(?<!,\\s?){TwoDigitYearRegex}|{TwoDigitYearRegex}(?=(\\.(?!\\d)|[?!;]|$)))'
DateExtractor1 = f'\\b({WeekDayRegex}(\\s+|\\s*,\\s*))?(?<!\\d[.,]){DayRegex}((\\s*(d[eo])|[/\\\\\\.\\-])\\s*)?{MonthRegex}\\b'
DateExtractor2 = f'\\b((el\\s+d[ií]a|{WeekDayRegex})(\\s+|\\s*,\\s*))?(?<!\\d[.,])(({DayRegex}(\\s+(d[eo]\\s+)?|\\s*[.,/-]\\s*){MonthRegex}((\\s+(del?\\s+)?|\\s*[.,/-]\\s*){DateYearRegex}\\b)?)|{BaseDateTime.FourDigitYearRegex}\\s*[.,/-]?\\s*(el\\s+d[ií]a\\s+)?{DayRegex}(\\s+(d[eo]\\s+)?|\\s*[.,/-]\\s*){MonthRegex})'
DateExtractor3 = f'\\b({WeekDayRegex}(\\s+|\\s*,\\s*))?{MonthRegex}(\\s*[.,/-]?\\s*)(el\\s+d[ií]a\\s+)?{DayRegex}((\\s+(del?\\s+)?|\\s*[.,/-]\\s*){DateYearRegex})?\\b'
DateExtractor4 = f'\\b(?<!\\d[.,]){MonthNumRegex}\\s*[/\\\\\\-]\\s*{DayRegex}\\s*[/\\\\\\-]\\s*{DateYearRegex}(?!\\s*[/\\\\\\-\\.]\\s*\\d+)'
DateExtractor5 = f'\\b(?<!\\d[.,]){DayRegex}\\s*[/\\\\\\-\\.]\\s*({MonthNumRegex}|{MonthRegex})\\s*[/\\\\\\-\\.]\\s*{DateYearRegex}(?!\\s*[/\\\\\\.]\\s*\\d+)'
DateExtractor6 = f'(?<=\\b(en|el)\\s+){MonthNumRegex}[\\-\\.]{DayRegex}{BaseDateTime.CheckDecimalRegex}\\b(?!\\s*[/\\\\\\.]\\s*\\d+)'
DateExtractor7 = f'\\b(?<!\\d[.,]){MonthNumRegex}\\s*/\\s*{DayRegex}((\\s+|\\s*,\\s*|\\s+d[eo]\\s+){DateYearRegex})?\\b{BaseDateTime.CheckDecimalRegex}(?!\\s*[/\\\\\\.]\\s*\\d+)'
DateExtractor8 = f'(?<=\\b(en|el)\\s+){DayRegex}[\\\\\\-]{MonthNumRegex}{BaseDateTime.CheckDecimalRegex}\\b(?!\\s*[/\\\\\\.]\\s*\\d+)'
DateExtractor9 = f'\\b({WeekDayRegex}\\s+)?(?<!\\d[.,]){DayRegex}\\s*/\\s*{MonthNumRegex}((\\s+|\\s*,\\s*|\\s+d[eo]\\s+){DateYearRegex})?\\b{BaseDateTime.CheckDecimalRegex}(?!\\s*[/\\\\\\.]\\s*\\d+)'
DateExtractor10 = f'\\b(?<!\\d[.,])(({YearRegex}\\s*[/\\\\\\-\\.]\\s*({MonthNumRegex}|{MonthRegex})\\s*[/\\\\\\-\\.]\\s*{DayRegex}(?!\\s*[/\\\\\\-\\.]\\s*\\d+))|({MonthRegex}\\s*[/\\\\\\-\\.]\\s*{BaseDateTime.FourDigitYearRegex}\\s*[/\\\\\\-\\.]\\s*{DayRegex})|({DayRegex}\\s*[/\\\\\\-\\.]\\s*{BaseDateTime.FourDigitYearRegex}\\s*[/\\\\\\-\\.]\\s*{MonthRegex}))'
HourNumRegex = f'\\b(?<hournum>cero|una|dos|tres|cuatro|cinco|seis|siete|ocho|nueve|diez|once|doce)\\b'
MinuteNumRegex = f'(?<minnum>uno?|d[óo]s|tr[eé]s|cuatro|cinco|s[eé]is|siete|ocho|nueve|diez|once|doce|trece|catorce|quince|diecis[eé]is|diecisiete|dieciocho|diecinueve|veinte|treinta|cuarenta|cincuenta)'
DeltaMinuteNumRegex = f'(?<deltaminnum>uno?|d[óo]s|tr[eé]s|cuatro|cinco|s[eé]is|siete|ocho|nueve|diez|once|doce|trece|catorce|quince|diecis[eé]is|diecisiete|dieciocho|diecinueve|veinte|treinta|cuarenta|cincuenta)'
PmRegex = f'(?<pm>((por|de|a|en)\\s+la)\\s+(tarde|noche))'
AmRegex = f'(?<am>((por|de|a|en)\\s+la)\\s+(mañana|madrugada))'
AmTimeRegex = f'(?<am>(esta|(por|de|a|en)\\s+la)\\s+(mañana|madrugada))'
PmTimeRegex = f'(?<pm>(esta|(por|de|a|en)\\s+la)\\s+(tarde|noche))'
NightTimeRegex = f'(noche)'
LastNightTimeRegex = f'(anoche)'
NowTimeRegex = f'(ahora|mismo|momento)'
RecentlyTimeRegex = f'(mente)'
AsapTimeRegex = f'(posible|pueda[ns]?|podamos)'
LessThanOneHour = f'(?<lth>((\\s+y\\s+)?cuarto|(\\s*)menos cuarto|(\\s+y\\s+)media|{BaseDateTime.DeltaMinuteRegex}(\\s+(minutos?|mins?))|{DeltaMinuteNumRegex}(\\s+(minutos?|mins?))))'
TensTimeRegex = f'(?<tens>diez|veint(i|e)|treinta|cuarenta|cincuenta)'
WrittenTimeRegex = f'(?<writtentime>{HourNumRegex}\\s*((y|(?<prefix>menos))\\s+)?(({TensTimeRegex}(\\s*y\\s+)?)?{MinuteNumRegex}))'
TimePrefix = f'(?<prefix>{LessThanOneHour}(\\s+(pasad[ao]s)\\s+(de\\s+las|las)?|\\s+(para|antes\\s+de)?\\s+(las?))?)'
TimeSuffix = f'(?<suffix>({LessThanOneHour}\\s+)?({AmRegex}|{PmRegex}|{OclockRegex}))'
GeneralDescRegex = f'({DescRegex}|(?<suffix>{AmRegex}|{PmRegex}))'
BasicTime = f'(?<basictime>{WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex}:{BaseDateTime.MinuteRegex}(:{BaseDateTime.SecondRegex})?|{BaseDateTime.HourRegex})'
MidTimeRegex = f'(?<mid>((?<midnight>media\\s*noche)|(?<midmorning>media\\s*mañana)|(?<midafternoon>media\\s*tarde)|(?<midday>medio\\s*d[ií]a)))'
AtRegex = f'\\b((?<=\\b((a|de(sde)?)\\s+las?|al)\\s+)(({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex})\\b(\\s*\\bh\\b)?(DescRegex)?|{MidTimeRegex})|{MidTimeRegex})'
ConnectNumRegex = f'({BaseDateTime.HourRegex}(?<min>[0-5][0-9])\\s*{DescRegex})'
TimeRegexWithDotConnector = f'({BaseDateTime.HourRegex}\\.{BaseDateTime.MinuteRegex})'
TimeRegex1 = f'(\\b{TimePrefix}\\s+)?({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex})\\s*({DescRegex}|\\s*\\bh\\b)'
TimeRegex2 = f'(\\b{TimePrefix}\\s+)?(t)?{BaseDateTime.HourRegex}(\\s*)?:(\\s*)?{BaseDateTime.MinuteRegex}((\\s*)?:(\\s*)?{BaseDateTime.SecondRegex})?((\\s*{DescRegex})|\\b)'
TimeRegex3 = f'\\b(({TimePrefix}\\s+)?{TimeRegexWithDotConnector}(\\s*({DescRegex}|{TimeSuffix}|\\bh\\b))|((las\\s+{TimeRegexWithDotConnector})(?!\\s*(por\\s+cien(to)?|%))(\\s*({DescRegex}|{TimeSuffix}|\\bh\\b)|\\b)))'
TimeRegex4 = f'\\b(({DescRegex}?)|({BasicTime}\\s*)?({GeneralDescRegex}?)){TimePrefix}(\\s*({HourNumRegex}|{BaseDateTime.HourRegex}))?(\\s+{TensTimeRegex}(\\s*(y\\s+)?{MinuteNumRegex})?)?(\\s*({OclockRegex}|{DescRegex})|\\b)'
TimeRegex5 = f'\\b({TimePrefix}|{BasicTime}{TimePrefix})\\s+(\\s*{DescRegex})?{BasicTime}?\\s*{TimeSuffix}\\b'
TimeRegex6 = f'({BasicTime}(\\s*{DescRegex})?\\s+{TimeSuffix}\\b)'
TimeRegex7 = f'\\b{TimeSuffix}\\s+a\\s+las\\s+{BasicTime}((\\s*{DescRegex})|\\b)'
TimeRegex8 = f'\\b{TimeSuffix}\\s+{BasicTime}((\\s*{DescRegex})|\\b)'
TimeRegex9 = f'\\b(?<writtentime>{HourNumRegex}\\s+({TensTimeRegex}\\s*)(y\\s+)?{MinuteNumRegex}?)\\b'
TimeRegex10 = f'(a\\s+la|al)\\s+(madrugada|mañana|tarde|noche)'
TimeRegex11 = f'\\b({WrittenTimeRegex})(\\s+{DescRegex})?\\b'
TimeRegex12 = f'(\\b{TimePrefix}\\s+)?{BaseDateTime.HourRegex}(\\s*h\\s*){BaseDateTime.MinuteRegex}(\\s*{DescRegex})?'
PrepositionRegex = f'(?<prep>^(,\\s*)?(a(l)?|en|de(l)?)?(\\s*(la(s)?|el|los))?$)'
LaterEarlyRegex = f'((?<early>temprano)|(?<late>fin(al)?(\\s+de)?|m[aá]s\\s+tarde))'
NowRegex = f'\\b(?<now>(justo\\s+)?ahora(\\s+mismo)?|en\\s+este\\s+momento|tan\\s+pronto\\s+como\\s+sea\\s+posible|tan\\s+pronto\\s+como\\s+(pueda|puedas|podamos|puedan)|lo\\s+m[aá]s\\s+pronto\\s+posible|recientemente|previamente|este entonces)\\b'
SuffixRegex = f'^\\s*(((y|a|en|por)\\s+la|al)\\s+)?(mañana|madrugada|medio\\s*d[ií]a|(?<!(m[áa]s\\s+))tarde|noche)\\b'
TimeOfDayRegex = f'\\b((?<timeOfDay>(({LaterEarlyRegex}\\s+)((del?|en|por)(\\s+(el|los?|las?))?\\s+)?)?(mañana|madrugada|pasado\\s+(el\\s+)?medio\\s?d[ií]a|(?<!((m[áa]s|tan)\\s+))tarde|noche))|(en|por)\\s+las?\\s+mañana)\\b'
SpecificTimeOfDayRegex = f'\\b(((((a\\s+)?la|esta|siguiente|pr[oó]xim[oa]|[uú]ltim[oa])\\s+)?{TimeOfDayRegex})|anoche)\\b'
TimeOfTodayAfterRegex = f'^\\s*(,\\s*)?(en|de(l)?\\s+)?{SpecificTimeOfDayRegex}'
TimeOfTodayBeforeRegex = f'({SpecificTimeOfDayRegex}(\\s*,)?(\\s+(((cerca|alrededor)?\\s*(de|a)\\s+)la(s)?|para))?\\s*$)'
NonTimeContextTokens = f'(edificio)'
SimpleTimeOfTodayAfterRegex = f'(?<!{NonTimeContextTokens}\\s*)\\b({HourNumRegex}|{BaseDateTime.HourRegex})\\s*(,\\s*)?((en|de(l)?)?\\s+)?{SpecificTimeOfDayRegex}\\b'
SimpleTimeOfTodayBeforeRegex = f'({SpecificTimeOfDayRegex}(\\s*,)?(\\s+(((cerca|alrededor)?\\s*(de|a)\\s+)la(s)?|para))?\\s*({HourNumRegex}|{BaseDateTime.HourRegex}))\\b'
SpecificEndOfRegex = f'((a|e)l\\s+)?fin(alizar|al)?(\\s+(el|de(l)?)(\\s+d[ií]a)?(\\s+de)?)?\\s*$'
UnspecificEndOfRegex = f'\\b([ae]l\\s+)?(fin(al)?\\s+del?\\s+d[ií]a)\\b'
UnspecificEndOfRangeRegex = f'^[.]'
DateTimeTimeOfDayRegex = f'\\b(?<timeOfDay>mañana|madrugada|(?<pm>pasado\\s+(el\\s+)?medio\\s?d[ií]a|tarde|noche))\\b'
PeriodTimeOfDayRegex = f'\\b((en\\s+(el|la|lo)?\\s+)?({LaterEarlyRegex}\\s+)?(est[ae]\\s+)?{DateTimeTimeOfDayRegex})\\b'
PeriodSpecificTimeOfDayRegex = f'\\b(({LaterEarlyRegex}\\s+)?est[ae]\\s+{DateTimeTimeOfDayRegex}|({StrictRelativeRegex}\\s+{PeriodTimeOfDayRegex})|anoche)\\b'
UnitRegex = f'(?<unit>años?|(bi|tri|cuatri|se)mestre|mes(es)?|semanas?|fin(es)?\\s+de\\s+semana|finde|d[ií]as?|horas?|hra?s?|hs?|minutos?|mins?|segundos?|segs?|noches?)\\b'
ConnectorRegex = f'^(,|t|(para|y|a|en|por) las?|(\\s*,\\s*)?(cerca|alrededor) de las?)$'
TimeHourNumRegex = f'(?<hour>veint(i(uno|dos|tres|cuatro)|e)|cero|uno|dos|tres|cuatro|cinco|seis|siete|ocho|nueve|diez|once|doce|trece|catorce|quince|dieci(s([eé])is|siete|ocho|nueve))'
PureNumFromTo = f'((\\b(desde|de)\\s+(la(s)?\\s+)?)?({BaseDateTime.HourRegex}|{TimeHourNumRegex})(?!\\s+al?\\b)(\\s*(?<leftDesc>{DescRegex}))?|(\\b(desde|de)\\s+(la(s)?\\s+)?)({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?)\\s*{TillRegex}\\s*({BaseDateTime.HourRegex}|{TimeHourNumRegex})\\s*(?<rightDesc>{PmRegex}|{AmRegex}|{DescRegex})?'
PureNumBetweenAnd = f'(\\bentre\\s+(la(s)?\\s+)?)(({BaseDateTime.TwoDigitHourRegex}{BaseDateTime.TwoDigitMinuteRegex})|{BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?\\s*{RangeConnectorRegex}\\s*(({BaseDateTime.TwoDigitHourRegex}{BaseDateTime.TwoDigitMinuteRegex})|{BaseDateTime.HourRegex}|{TimeHourNumRegex})\\s*(?<rightDesc>{PmRegex}|{AmRegex}|{DescRegex})?'
SpecificTimeFromTo = f'({RangePrefixRegex}\\s+)?(?<time1>(({TimeRegex2}|{TimeRegexWithDotConnector}(\\s*{DescRegex})?)|({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?))\\s*{TillRegex}\\s*(?<time2>(({TimeRegex2}|{TimeRegexWithDotConnector}(\\s*{DescRegex})?)|({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<rightDesc>{DescRegex}))?))'
SpecificTimeBetweenAnd = f'({BetweenRegex}\\s+)(?<time1>(({TimeRegex1}|{TimeRegex2}|{TimeRegexWithDotConnector}(\\s*{DescRegex})?)|({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?))\\s*{RangeConnectorRegex}\\s*(?<time2>(({TimeRegex1}|{TimeRegex2}|{TimeRegexWithDotConnector}(\\s*{DescRegex})?)|({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<rightDesc>{DescRegex}))?))'
TimeUnitRegex = f'([^A-Za-z]{{1,}}|\\b)(?<unit>horas?|h|minutos?|mins?|segundos?|se[cg]s?)\\b'
TimeFollowedUnit = f'^\\s*{TimeUnitRegex}'
TimeNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\,\\d*)?)\\s*{TimeUnitRegex}'
DateTimePeriodNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\.\\d*)?)\\s*{TimeUnitRegex}'
PeriodTimeOfDayWithDateRegex = f'\\b(((y|a|en|por)\\s+(la\\s+)?|al\\s+)?((((?<early>primeras\\s+horas\\s+)|(?<late>(últimas|altas)\\s+horas\\s+))(de\\s+la\\s+)?|{LaterEarlyRegex}\\s+(est[ae]\\s+)?)?(?<timeOfDay>(mañana|madrugada|pasado\\s+(el\\s+)?medio\\s?d[ií]a|(?<!(m[áa]s\\s+))tarde|noche|anoche))))(\\s+(del|de))?\\b'
RelativeTimeUnitRegex = f'({PastRegex}|{FutureRegex})\\s+{TimeUnitRegex}'
LessThanRegex = f'\\b(dentro\\s+de\\s+menos\\s+de)\\b'
MoreThanRegex = f'\\b(durante\\s+(m[áa]s\\s+)?de)\\b'
SuffixAndRegex = f'(?<suffix>\\s*(y)\\s+((un[ao]?)\\s+)?(?<suffix_num>media|cuarto))'
FollowedUnit = f'^\\s*{UnitRegex}'
DurationNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\,\\d*)?){UnitRegex}'
AnUnitRegex = f'\\b(una?|otr[ao])\\s+{UnitRegex}'
DuringRegex = f'^[.]'
AllRegex = f'\\b(?<all>tod[oa]?\\s+(el|la)\\s+(?<unit>año|mes|semana|d[ií]a)|((una?|el|la)\\s+)?(?<unit>año|mes|semana|d[ií]a)\\s+enter[ao])\\b'
HalfRegex = f'\\b(?<half>medi[oa]\\s+(?<unit>ano|mes|semana|d[íi]a|hora))\\b'
ConjunctionRegex = f'^[.]'
InexactNumberRegex = f'\\b(pocos?|algo|vari[ao]s|algun[ao]s|un[ao]s)\\b'
InexactNumberUnitRegex = f'({InexactNumberRegex})\\s+{UnitRegex}'
HolidayRegex1 = f'\\b(?<holiday>viernes santo|mi[eé]rcoles de ceniza|martes de carnaval|d[ií]a (de|de los) presidentes?|clebraci[oó]n de mao|año nuevo chino|año nuevo|noche vieja|(festividad de )?los mayos|d[ií]a de los inocentes|navidad|noche buena|d[ií]a de acci[oó]n de gracias|acci[oó]n de gracias|yuandan|halloween|noches de brujas|pascuas)(\\s+(del?\\s+)?({YearRegex}|(?<order>(pr[oó]xim[oa]?|est[ea]|[uú]ltim[oa]?|en))\\s+año))?\\b'
HolidayRegex2 = f'\\b(?<holiday>(d[ií]a( del?( la)?)? )?(martin luther king|todos los santos|blanco|san patricio|san valent[ií]n|san jorge|cinco de mayo|independencia|raza|trabajador))(\\s+(del?\\s+)?({YearRegex}|(?<order>(pr[oó]xim[oa]?|est[ea]|[uú]ltim[oa]?|en))\\s+año))?\\b'
HolidayRegex3 = f'\\b(?<holiday>(d[ií]a( internacional)?( del?( l[ao]s?)?)? )(trabajador(es)?|madres?|padres?|[aá]rbol|mujer(es)?|solteros?|niños?|marmota|san valent[ií]n|maestro))(\\s+(del?\\s+)?({YearRegex}|(?<order>(pr[oó]xim[oa]?|est[ea]|[uú]ltim[oa]?|en))\\s+año))?\\b'
BeforeRegex = f'(\\b((ante(s|rior)|m[aá]s\\s+temprano|no\\s+m[aá]s\\s+tard(e|ar)|(?<include>tan\\s+tarde\\s+como))(\\s+(del?|a|que)(\\s+(el|las?|los?))?)?)|(?<!\\w|>)((?<include><\\s*=)|<))'
AfterRegex = f'((\\b(despu[eé]s|(año\\s+)?posterior|m[aá]s\\s+tarde|a\\s+primeros)(\\s*(del?|en|a)(\\s+(el|las?|los?))?)?|(empi?en?zando|comenzando)(\\s+(el|las?|los?))?)\\b|(?<!\\w|<)((?<include>>\\s*=)|>))'
SinceRegex = f'\\b(((cualquier\\s+tiempo\\s+)?(desde|a\\s+partir\\s+del?)|tan\\s+(temprano|pronto)\\s+como(\\s+(de|a))?)(\\s+(el|las?|los?))?)\\b'
SinceRegexExp = f'({SinceRegex}|\\bde\\b)'
AroundRegex = f'(?:\\b(?:cerca|alrededor|aproximadamente)(\\s+(de\\s+(las?|el)|del?))?\\s*\\b)'
PeriodicRegex = f'\\b(?<periodic>a\\s*diario|diaria(s|mente)|(bi|tri)?(semanal|quincenal|mensual|semestral|anual)(es|mente)?)\\b'
EachExpression = f'\\b(cada|tod[oa]s\\s*(l[oa]s)?)\\b\\s*(?!\\s*l[oa]\\b)'
EachUnitRegex = f'(?<each>({EachExpression})\\s*({UnitRegex}|(?<specialUnit>fin(es)?\\s+de\\s+semana|finde)\\b))'
EachPrefixRegex = f'(?<each>({EachExpression})\\s*$)'
EachDayRegex = f'\\s*({EachExpression})\\s*d[ií]as\\s*\\b'
BeforeEachDayRegex = f'({EachExpression})\\s*d[ií]as(\\s+a\\s+las?)?\\s*\\b'
SetEachRegex = f'(?<each>({EachExpression})\\s*)'
LaterEarlyPeriodRegex = f'\\b(({PrefixPeriodRegex})\\s+(?<suffix>{OneWordPeriodRegex}|(?<FourDigitYear>{BaseDateTime.FourDigitYearRegex}))|({UnspecificEndOfRangeRegex}))\\b'
RelativeWeekRegex = f'(((la|el)\\s+)?(((est[ae]|pr[oó]xim[oa]|[uú]ltim(o|as|os))\\s+semanas?)|(semanas?\\s+(que\\s+viene|pasad[oa]))))'
WeekWithWeekDayRangeRegex = f'\\b((({RelativeWeekRegex})((\\s+entre\\s+{WeekDayRegex}\\s+y\\s+{WeekDayRegex})|(\\s+de\\s+{WeekDayRegex}\\s+a\\s+{WeekDayRegex})))|((entre\\s+{WeekDayRegex}\\s+y\\s+{WeekDayRegex})|(de\\s+{WeekDayRegex}\\s+a\\s+{WeekDayRegex})){OfPrepositionRegex}\\s+{RelativeWeekRegex})\\b'
GeneralEndingRegex = f'^\\s*((\\.,)|\\.|,|!|\\?)?\\s*$'
MiddlePauseRegex = f'^[.]'
PrefixArticleRegex = f'\\b(e[ln]\\s+(d[ií]a\\s+)?)'
OrRegex = f'^[.]'
SpecialYearTermsRegex = f'\\b(años?\\s+({SpecialYearPrefixes}\\s+)?(de\\s+)?)'
YearPlusNumberRegex = f'\\b({SpecialYearTermsRegex}((?<year>(\\d{{2,4}}))|{FullTextYearRegex}))\\b'
NumberAsTimeRegex = f'^[.]'
TimeBeforeAfterRegex = f'\\b((?<=\\b(antes|no\\s+m[aá]s\\s+tard(e|ar)\\s+(de|a\\s+las?)|por| después)\\s+)({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex}|{MidTimeRegex}))\\b'
DateNumberConnectorRegex = f'^[.]'
CenturyRegex = f'^[.]'
DecadeRegex = f'(?<decade>diez|veinte|treinta|cuarenta|cincuenta|se[st]enta|ochenta|noventa)'
DecadeWithCenturyRegex = f'(los\\s+)?((((d[ée]cada(\\s+de)?)\\s+)(((?<century>\\d|1\\d|2\\d)?(?<decade>\\d0))))|a[ñn]os\\s+((((dos\\s+)?mil\\s+)?({WrittenOneHundredToNineHundredRegex}\\s+)?{DecadeRegex})|((dos\\s+)?mil\\s+)?({WrittenOneHundredToNineHundredRegex})(\\s+{DecadeRegex}?)|((dos\\s+)?mil)(\\s+{WrittenOneHundredToNineHundredRegex}\\s+)?{DecadeRegex}?))'
RelativeDecadeRegex = f'\\b(((el|las?)\\s+)?{RelativeRegex}\\s+(((?<number>[\\d]+)|{WrittenOneToNineRegex})\\s+)?d[eé]cadas?)\\b'
ComplexDatePeriodRegex = f'(?:((de(sde)?)\\s+)?(?<start>.+)\\s*({StrictTillRegex})\\s*(?<end>.+)|((entre)\\s+)(?<start>.+)\\s*({RangeConnectorRegex})\\s*(?<end>.+))'
AmbiguousPointRangeRegex = f'^(mar\\.?)$'
YearSuffix = f'((,|\\sde)?\\s*({YearRegex}|{FullTextYearRegex}))'
AgoRegex = f'\\b(antes\\s+de\\s+(?<day>hoy|ayer|mañana)|antes)\\b'
LaterRegex = f'\\b(despu[eé]s(?!\\s+de\\b)|desde\\s+ahora|a\\s+partir\\s+de\\s+(?<day>hoy|ayer|mañana))\\b'
Tomorrow = 'mañana'
UnitMap = dict([("años", "Y"),
("año", "Y"),
("meses", "MON"),
("mes", "MON"),
("trimestre", "3MON"),
("trimestres", "3MON"),
("cuatrimestre", "4MON"),
("cuatrimestres", "4MON"),
("semestre", "6MON"),
("semestres", "6MON"),
("bimestre", "2MON"),
("bimestres", "2MON"),
("semanas", "W"),
("semana", "W"),
("fin de semana", "WE"),
("fines de semana", "WE"),
("finde", "WE"),
("dias", "D"),
("dia", "D"),
("días", "D"),
("día", "D"),
("jornada", "D"),
("noche", "D"),
("noches", "D"),
("horas", "H"),
("hora", "H"),
("hrs", "H"),
("hras", "H"),
("hra", "H"),
("hr", "H"),
("h", "H"),
("minutos", "M"),
("minuto", "M"),
("mins", "M"),
("min", "M"),
("segundos", "S"),
("segundo", "S"),
("segs", "S"),
("seg", "S")])
UnitValueMap = dict([("años", 31536000),
("año", 31536000),
("meses", 2592000),
("mes", 2592000),
("semanas", 604800),
("semana", 604800),
("fin de semana", 172800),
("fines de semana", 172800),
("finde", 172800),
("dias", 86400),
("dia", 86400),
("días", 86400),
("día", 86400),
("noche", 86400),
("noches", 86400),
("horas", 3600),
("hora", 3600),
("hrs", 3600),
("hras", 3600),
("hra", 3600),
("hr", 3600),
("h", 3600),
("minutos", 60),
("minuto", 60),
("mins", 60),
("min", 60),
("segundos", 1),
("segundo", 1),
("segs", 1),
("seg", 1)])
SpecialYearPrefixesMap = dict([("fiscal", "FY"),
("escolar", "SY")])
SeasonMap = dict([("primavera", "SP"),
("verano", "SU"),
("otoño", "FA"),
("invierno", "WI")])
SeasonValueMap = dict([("SP", 3),
("SU", 6),
("FA", 9),
("WI", 12)])
CardinalMap = dict([("primer", 1),
("primero", 1),
("primera", 1),
("1er", 1),
("1ro", 1),
("1ra", 1),
("1.º", 1),
("1º", 1),
("1ª", 1),
("segundo", 2),
("segunda", 2),
("2do", 2),
("2da", 2),
("2.º", 2),
("2º", 2),
("2ª", 2),
("tercer", 3),
("tercero", 3),
("tercera", 3),
("3er", 3),
("3ro", 3),
("3ra", 3),
("3.º", 3),
("3º", 3),
("3ª", 3),
("cuarto", 4),
("cuarta", 4),
("4to", 4),
("4ta", 4),
("4.º", 4),
("4º", 4),
("4ª", 4),
("quinto", 5),
("quinta", 5),
("5to", 5),
("5ta", 5),
("5.º", 5),
("5º", 5),
("5ª", 5)])
DayOfWeek = dict([("lunes", 1),
("martes", 2),
("miercoles", 3),
("miércoles", 3),
("jueves", 4),
("viernes", 5),
("sabado", 6),
("sábado", 6),
("domingo", 0),
("dom", 0),
("lun", 1),
("mar", 2),
("mie", 3),
("mié", 3),
("jue", 4),
("vie", 5),
("sab", 6),
("sáb", 6),
("dom.", 0),
("lun.", 1),
("mar.", 2),
("mie.", 3),
("mié.", 3),
("jue.", 4),
("vie.", 5),
("sab.", 6),
("sáb.", 6),
("do", 0),
("lu", 1),
("ma", 2),
("mi", 3),
("ju", 4),
("vi", 5),
("sa", 6)])
MonthOfYear = dict([("enero", 1),
("febrero", 2),
("marzo", 3),
("abril", 4),
("mayo", 5),
("junio", 6),
("julio", 7),
("agosto", 8),
("septiembre", 9),
("setiembre", 9),
("octubre", 10),
("noviembre", 11),
("diciembre", 12),
("ene", 1),
("feb", 2),
("mar", 3),
("abr", 4),
("may", 5),
("jun", 6),
("jul", 7),
("ago", 8),
("sept", 9),
("sep", 9),
("set", 9),
("oct", 10),
("nov", 11),
("dic", 12),
("ene.", 1),
("feb.", 2),
("mar.", 3),
("abr.", 4),
("may.", 5),
("jun.", 6),
("jul.", 7),
("ago.", 8),
("sept.", 9),
("sep.", 9),
("set.", 9),
("oct.", 10),
("nov.", 11),
("dic.", 12),
("1", 1),
("2", 2),
("3", 3),
("4", 4),
("5", 5),
("6", 6),
("7", 7),
("8", 8),
("9", 9),
("10", 10),
("11", 11),
("12", 12),
("01", 1),
("02", 2),
("03", 3),
("04", 4),
("05", 5),
("06", 6),
("07", 7),
("08", 8),
("09", 9)])
Numbers = dict([("cero", 0),
("un", 1),
("una", 1),
("uno", 1),
("dos", 2),
("dós", 2),
("tres", 3),
("trés", 3),
("cuatro", 4),
("cinco", 5),
("seis", 6),
("séis", 6),
("siete", 7),
("ocho", 8),
("nueve", 9),
("diez", 10),
("once", 11),
("doce", 12),
("docena", 12),
("docenas", 12),
("trece", 13),
("catorce", 14),
("quince", 15),
("dieciseis", 16),
("dieciséis", 16),
("diecisiete", 17),
("dieciocho", 18),
("diecinueve", 19),
("veinte", 20),
("veinti", 20),
("ventiuna", 21),
("ventiuno", 21),
("veintiun", 21),
("veintiún", 21),
("veintiuno", 21),
("veintiuna", 21),
("veintidos", 22),
("veintidós", 22),
("veintitres", 23),
("veintitrés", 23),
("veinticuatro", 24),
("veinticinco", 25),
("veintiseis", 26),
("veintiséis", 26),
("veintisiete", 27),
("veintiocho", 28),
("veintinueve", 29),
("treinta", 30),
("cuarenta", 40),
("cincuenta", 50)])
HolidayNames = dict([("padres", ["diadelpadre"]),
("madres", ["diadelamadre"]),
("acciondegracias", ["diadegracias", "diadeacciondegracias", "acciondegracias"]),
("trabajador", ["diadeltrabajador", "diainternacionaldelostrabajadores"]),
("delaraza", ["diadelaraza", "diadeladiversidadcultural"]),
("memoria", ["diadelamemoria"]),
("pascuas", ["diadepascuas", "pascuas"]),
("navidad", ["navidad", "diadenavidad"]),
("nochebuena", ["diadenochebuena", "nochebuena"]),
("añonuevo", ["a\u00f1onuevo", "diadea\u00f1onuevo"]),
("nochevieja", ["nochevieja", "diadenochevieja"]),
("yuandan", ["yuandan"]),
("earthday", ["diadelatierra"]),
("maestro", ["diadelmaestro"]),
("todoslossantos", ["todoslossantos"]),
("niño", ["diadelni\u00f1o"]),
("mujer", ["diadelamujer"]),
("independencia", ["diadelaindependencia", "diadeindependencia", "independencia"])])
VariableHolidaysTimexDictionary = dict([("padres", "-06-WXX-7-3"),
("madres", "-05-WXX-7-2"),
("acciondegracias", "-11-WXX-4-4"),
("delaraza", "-10-WXX-1-2"),
("memoria", "-03-WXX-2-4")])
DoubleNumbers = dict([("mitad", 0.5),
("cuarto", 0.25)])
UpcomingPrefixRegex = f'((este\\s+))'
NextPrefixRegex = f'\\b({UpcomingPrefixRegex}?pr[oó]xim[oa]s?|siguiente|que\\s+viene)\\b'
PastPrefixRegex = f'((este\\s+))'
PreviousPrefixRegex = f'\\b({PastPrefixRegex}?pasad[oa](?!(\\s+el)?\\s+medio\\s*d[ií]a)|[uú]ltim[oa]|anterior)\\b'
ThisPrefixRegex = f'(est?[ea]|actual)\\b'
PrefixWeekDayRegex = f'(\\s*((,?\\s*el)|[-—–]))'
ThisRegex = f'\\b((est[ae]\\s*)(semana{PrefixWeekDayRegex}?)?\\s*{WeekDayRegex})|({WeekDayRegex}\\s*((de\\s+)?esta\\s+semana))\\b'
LastDateRegex = f'\\b(({PreviousPrefixRegex}\\s+(semana{PrefixWeekDayRegex}?)?|(la\\s+)?semana\\s+{PreviousPrefixRegex}{PrefixWeekDayRegex})\\s*{WeekDayRegex})|(este\\s+)?({WeekDayRegex}\\s+([uú]ltimo|pasado|anterior))|({WeekDayRegex}(\\s+((de\\s+)?((esta|la)\\s+([uú]ltima\\s+)?semana)|(de\\s+)?(la\\s+)?semana\\s+(pasada|anterior))))\\b'
NextDateRegex = f'\\b((({NextPrefixRegex}\\s+)(semana{PrefixWeekDayRegex}?)?|(la\\s+)?semana\\s+{NextPrefixRegex}{PrefixWeekDayRegex})\\s*{WeekDayRegex})|(este\\s+)?({WeekDayRegex}\\s+(pr[oó]ximo|siguiente|que\\s+viene))|({WeekDayRegex}(\\s+(de\\s+)?(la\\s+)?((pr[oó]xima|siguiente)\\s+semana|semana\\s+(pr[oó]xima|siguiente))))\\b'
RelativeDayRegex = f'(?<relday>((este|pr[oó]ximo|([uú]ltim(o|as|os)))\\s+días)|(días\\s+((que\\s+viene)|pasado)))\\b'
RestOfDateRegex = f'\\bresto\\s+((del|de)\\s+)?((la|el|est?[ae])\\s+)?(?<duration>semana|mes|año|decada)(\\s+actual)?\\b'
DurationUnitRegex = f'(?<unit>{DateUnitRegex}|horas?|hra?s?|hs?|minutos?|mins?|segundos?|segs?|noches?)\\b'
DurationConnectorRegex = f'^[.]'
RelativeDurationUnitRegex = f'(?:(?<=({NextPrefixRegex}|{PreviousPrefixRegex}|{ThisPrefixRegex})\\s+)({DurationUnitRegex}))'
ReferencePrefixRegex = f'(mism[ao]|aquel|est?e)\\b'
ReferenceDatePeriodRegex = f'\\b{ReferencePrefixRegex}\\s+({DateUnitRegex}|fin\\s+de\\s+semana)\\b'
FromToRegex = f'\\b(from).+(to)\\b.+'
SingleAmbiguousMonthRegex = f'^(the\\s+)?(may|march)$'
UnspecificDatePeriodRegex = f'^[\\.]'
PrepositionSuffixRegex = f'\\b(en|el|la|cerca|alrededor|desde|durante|hasta|hacia)$'
RestOfDateTimeRegex = f'\\bresto\\s+((del?)\\s+)?((la|el|est[ae])\\s+)?(?<unit>(día|jornada))(\\s+de\\s+hoy)?\\b'
SetWeekDayRegex = f'^[\\.]'
NightRegex = f'\\b(medionoche|noche)\\b'
CommonDatePrefixRegex = f'^[\\.]'
SuffixAfterRegex = f'\\b((a\\s+)?(o|y)\\s+(arriba|despu[eé]s|posterior|mayor|m[aá]s\\s+tarde)(?!\\s+(que|de)))\\b'
YearPeriodRegex = f'((((de(sde)?|durante|en)\\s+)?{YearRegex}\\s*({TillRegex})\\s*{YearRegex})|(((entre)\\s+){YearRegex}\\s*({RangeConnectorRegex})\\s*{YearRegex}))'
FutureSuffixRegex = f'\\b(siguiente(s)?|pr[oó]xim[oa](s)?|(en\\s+el\\s+)?futuro|a\\s+partir\\s+de\\s+ahora)\\b'
WrittenDecades = dict([("", 0)])
SpecialDecadeCases = dict([("", 0)])
DefaultLanguageFallback = 'DMY'
DurationDateRestrictions = [r'hoy']
AmbiguityFiltersDict = dict([("^mi$", "\\bmi\\b"),
("^a[nñ]o$", "(?<!el\\s+)a[nñ]o"),
("^semana$", "(?<!la\\s+)semana"),
("^mes$", "(?<!el\\s+)mes"),
("^(abr|ago|dic|feb|ene|ju[ln]|mar|may|nov|oct|sep?t|sep)$", "([$%£&!?@#])(abr|ago|dic|feb|ene|ju[ln]|mar|may|nov|oct|sep?t|sep)|(abr|ago|dic|feb|ene|ju[ln]|mar|may|nov|oct|sep?t|sep)([$%£&@#])")])
EarlyMorningTermList = [r'madrugada']
MorningTermList = [r'mañana', r'la mañana']
AfternoonTermList = [r'pasado mediodia', r'pasado el mediodia', r'pasado mediodía', r'pasado el mediodía', r'pasado medio dia', r'pasado el medio dia', r'pasado medio día', r'pasado el medio día']
EveningTermList = [r'tarde']
NightTermList = [r'noche']
SameDayTerms = [r'hoy', r'el dia']
PlusOneDayTerms = [r'mañana', r'dia siguiente', r'el dia de mañana', r'proximo dia']
MinusOneDayTerms = [r'ayer', r'ultimo dia', r'dia anterior']
PlusTwoDayTerms = [r'pasado mañana', r'dia despues de mañana']
MinusTwoDayTerms = [r'anteayer', r'dia antes de ayer']
MonthTerms = [r'mes', r'meses']
MonthToDateTerms = [r'mes a la fecha', r'meses a la fecha']
WeekendTerms = [r'finde', r'fin de semana', r'fines de semana']
WeekTerms = [r'semana']
YearTerms = [r'año', r'años']
YearToDateTerms = [r'año a la fecha', r'años a la fecha']
SpecialCharactersEquivalent = dict([("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u")])
DoubleMultiplierRegex = f'^(bi)(-|\\s)?'
DayTypeRegex = f'(d[ií]as?|diari(o|as|amente))$'
WeekTypeRegex = f'(semanas?|semanalmente)$'
BiWeekTypeRegex = f'(quincenalmente)$'
WeekendTypeRegex = f'(fin(es)?\\s+de\\s+semana|finde)$'
MonthTypeRegex = f'(mes(es)?|mensual(es|mente)?)$'
YearTypeRegex = f'(años?|anualmente)$'
# pylint: enable=line-too-long
| 74.847458
| 443
| 0.503533
|
4a025cfee475a60f473590aa7ff73c52da637977
| 5,008
|
py
|
Python
|
u96v2_sbc_dualcam/dualcam.py
|
AlbertaBeef/u96v2_dualcam_python_examples
|
a8aa3fa14a1828af441c20c15d33c644ab686610
|
[
"Apache-2.0"
] | null | null | null |
u96v2_sbc_dualcam/dualcam.py
|
AlbertaBeef/u96v2_dualcam_python_examples
|
a8aa3fa14a1828af441c20c15d33c644ab686610
|
[
"Apache-2.0"
] | null | null | null |
u96v2_sbc_dualcam/dualcam.py
|
AlbertaBeef/u96v2_dualcam_python_examples
|
a8aa3fa14a1828af441c20c15d33c644ab686610
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright 2021 Avnet Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# Based on DualCam 2020.2 Design
# reference : http://avnet.me/u96v2-dualcam-2020.2
import numpy as np
import cv2
import os
class DualCam():
def __init__(self, cap_config='ar0144_dual', cap_id=0, cap_width=1280, cap_height=800):
self.cap_config = cap_config
self.cap_id = cap_id
self.cap_width = cap_width
self.cap_height = cap_height
self.input_resolution = 'WxH'
self.output_width = 0
self.output_height = 0
self.output_resolution = 'WxH'
if cap_config == 'ar0144_dual':
self.input_resolution = '2560x800'
self.output_width = self.cap_width*2
self.output_height = self.cap_height
self.output_resolution = str(self.output_width)+'x'+str(self.output_height)
elif cap_config == 'ar0144_single':
self.input_resolution = '1280x800'
self.output_width = self.cap_width
self.output_height = self.cap_height
self.output_resolution = str(self.output_width)+'x'+str(self.output_height)
elif cap_config == 'ar1335_single':
self.input_resolution = '3840x2160'
self.output_width = self.cap_width
self.output_height = self.cap_height
self.output_resolution = str(self.output_width)+'x'+str(self.output_height)
else:
print("[DualCam] Invalid cap_config = ",cap_config," ! (must be ar0144_dual|ar0144_single|ar1335_single)")
return None
print("\n\r[DualCam] Initializing capture pipeline for ",self.cap_config,self.cap_id,self.cap_width,self.cap_height)
cmd = "media-ctl -d /dev/media0 -V \"'ap1302.4-003c':2 [fmt:UYVY8_1X16/"+self.input_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "media-ctl -d /dev/media0 -V \"'b0000000.mipi_csi2_rx_subsystem':0 [fmt:UYVY8_1X16/"+self.input_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "media-ctl -d /dev/media0 -V \"'b0000000.mipi_csi2_rx_subsystem':1 [fmt:UYVY8_1X16/"+self.input_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "media-ctl -d /dev/media0 -V \"'b0010000.v_proc_ss':0 [fmt:UYVY8_1X16/"+self.input_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "media-ctl -d /dev/media0 -V \"'b0010000.v_proc_ss':1 [fmt:RBG24/"+self.input_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "media-ctl -d /dev/media0 -V \"'b0040000.v_proc_ss':0 [fmt:RBG24/"+self.input_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "media-ctl -d /dev/media0 -V \"'b0040000.v_proc_ss':1 [fmt:RBG24/"+self.output_resolution+" field:none]\""
print(cmd)
os.system(cmd)
cmd = "v4l2-ctl -d /dev/video0 --set-fmt-video=width="+str(self.output_width)+",height="+str(self.output_height)+",pixelformat=BGR3"
print(cmd)
os.system(cmd)
if cap_config == 'ar0144_dual':
print("\n\r[DualCam] Configuring AP1302 for left-right side-by-side configuration")
cmd = "i2cset -f -y 4 0x3c 0x10 0x0C 0x00 0x04 i"
print(cmd)
os.system(cmd)
if cap_config == 'ar1335_single':
print("\n\r[DualCam] Configuring AP1302 for no horizontal/vertical flip")
cmd = "i2cset -f -y 4 0x3c 0x10 0x0C 0x00 0x00 i"
print(cmd)
os.system(cmd)
print("\n\r[DualCam] Configuring AP1302 to enable auto-focus")
cmd = "i2cset -f -y 4 0x3c 0x50 0x58 0x11 0x86 i"
print(cmd)
os.system(cmd)
print("\n\r[DualCam] Opening cv2.VideoCapture for ",self.cap_id,self.output_width,self.output_height)
self.cap = cv2.VideoCapture(self.cap_id)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,self.output_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,self.output_height)
print("\n\r")
def capture(self):
if not (self.cap.grab()):
print("[DualCam] No more frames !")
return None
_, frame = self.cap.retrieve()
return frame
def capture_dual(self):
if not (self.cap.grab()):
print("[DualCam] No more frames !")
return None
_, frame = self.cap.retrieve()
left = frame[:,1:(self.cap_width)+1,:]
right = frame[:,(self.cap_width):(self.cap_width*2)+1,:]
return left,right
def release(self):
self.cap_id = 0
self.cap_dual = True
self.cap_width = 0
self.cap_height = 0
self.input_resolution = 'WxH'
self.output_width = 0
self.output_height = 0
self.output_resolution = 'WxH'
del self.cap
self.cap = None
| 31.696203
| 137
| 0.670527
|
4a025d5a24611ce2ef2cb6021c97accb894602da
| 151
|
py
|
Python
|
backend/apps/core/routing.py
|
jaliste/mi_coop
|
66be4328117c39026d85a3d6cc9b8635bde49077
|
[
"MIT"
] | 54
|
2019-08-11T03:39:22.000Z
|
2022-01-06T02:16:33.000Z
|
backend/apps/core/routing.py
|
jaliste/mi_coop
|
66be4328117c39026d85a3d6cc9b8635bde49077
|
[
"MIT"
] | 11
|
2020-02-12T01:17:38.000Z
|
2021-05-10T06:22:22.000Z
|
backend/apps/core/routing.py
|
jaliste/mi_coop
|
66be4328117c39026d85a3d6cc9b8635bde49077
|
[
"MIT"
] | 20
|
2019-09-07T20:04:08.000Z
|
2021-12-05T04:43:20.000Z
|
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r"^ws/ping-pong/$", consumers.CoreConsumer.as_asgi(),),
]
| 18.875
| 63
| 0.721854
|
4a025d5e7118ee20f136c8a31b4c183de11f1e7f
| 8,884
|
py
|
Python
|
official/vision/detection/utils/object_detection/matcher.py
|
bamdada/UdacityProj10FinaltfModels
|
4701bfbc924539860f610fa4ceae484a7bf194c6
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
official/vision/detection/utils/object_detection/matcher.py
|
akshayjaryal603/models
|
db39ef826193d0802f644ba30397242a7272676e
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
official/vision/detection/utils/object_detection/matcher.py
|
akshayjaryal603/models
|
db39ef826193d0802f644ba30397242a7272676e
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(input=self.matched_column_indices())
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.size(input=self.unmatched_column_indices())
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(input=self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher.
"""
__metaclass__ = ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
if not scope:
scope = 'Match'
with tf.name_scope(scope) as scope:
return Match(self._match(similarity_matrix, **params))
@abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
| 36.409836
| 80
| 0.710716
|
4a025dab93d5f1129c91ae2dd773aed8cd992730
| 15,521
|
py
|
Python
|
spanner_orm/tests/model_test.py
|
MetaOfX/python-spanner-orm
|
59063eb6989b845d1658118a7a0282eede19d8bf
|
[
"Apache-2.0"
] | 37
|
2018-11-01T18:29:03.000Z
|
2022-03-30T17:24:39.000Z
|
spanner_orm/tests/model_test.py
|
MetaOfX/python-spanner-orm
|
59063eb6989b845d1658118a7a0282eede19d8bf
|
[
"Apache-2.0"
] | 48
|
2018-11-05T18:51:23.000Z
|
2021-12-17T20:28:11.000Z
|
spanner_orm/tests/model_test.py
|
MetaOfX/python-spanner-orm
|
59063eb6989b845d1658118a7a0282eede19d8bf
|
[
"Apache-2.0"
] | 19
|
2019-05-04T06:05:31.000Z
|
2021-12-17T20:52:53.000Z
|
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import typing
from typing import List
import unittest
from unittest import mock
from absl.testing import parameterized
from google.api_core import exceptions
from google.cloud import spanner
from spanner_orm import error
from spanner_orm import field
from spanner_orm.testlib.spanner_emulator import testlib as spanner_emulator_testlib
from spanner_orm.tests import models
_TIMESTAMP = datetime.datetime.now(tz=datetime.timezone.utc)
class ModelTest(
spanner_emulator_testlib.TestCase,
parameterized.TestCase,
):
def setUp(self):
super().setUp()
self.run_orm_migrations(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'migrations_for_emulator_test',
))
@mock.patch('spanner_orm.table_apis.find')
def test_find_calls_api(self, find):
mock_transaction = mock.Mock()
models.UnittestModel.find(
string='string',
int_=1,
float_=2.3,
bytes_=b'A1A1',
transaction=mock_transaction,
)
find.assert_called_once()
(transaction, table, columns, keyset), _ = find.call_args
self.assertEqual(transaction, mock_transaction)
self.assertEqual(table, models.UnittestModel.table)
self.assertEqual(columns, models.UnittestModel.columns)
self.assertEqual(keyset.keys, [[1, 2.3, 'string', b'A1A1']])
@mock.patch('spanner_orm.table_apis.find')
def test_find_result(self, find):
mock_transaction = mock.Mock()
find.return_value = [['key', 'value_1', None]]
result = models.SmallTestModel.find(key='key', transaction=mock_transaction)
if result:
self.assertEqual(result.key, 'key')
self.assertEqual(result.value_1, 'value_1')
self.assertIsNone(result.value_2)
else:
self.fail('Failed to find result')
def test_find_required(self):
test_model = models.SmallTestModel(
dict(
key='some-key',
value_1='foo',
value_2='bar',
))
test_model.save()
self.assertEqual(
test_model,
models.SmallTestModel.find_required(key='some-key'),
)
def test_find_required_not_found(self):
with self.assertRaisesRegex(exceptions.NotFound,
'SmallTestModel has no object'):
models.SmallTestModel.find_required(key='some-key')
@mock.patch('spanner_orm.table_apis.find')
def test_find_multi_calls_api(self, find):
mock_transaction = mock.Mock()
models.UnittestModel.find_multi(
[{
'string': 'string',
'bytes_': b'bytes',
'int_': 1,
'float_': 2.3
}],
transaction=mock_transaction,
)
find.assert_called_once()
(transaction, table, columns, keyset), _ = find.call_args
self.assertEqual(transaction, mock_transaction)
self.assertEqual(table, models.UnittestModel.table)
self.assertEqual(columns, models.UnittestModel.columns)
self.assertEqual(keyset.keys, [[1, 2.3, 'string', b'bytes']])
@mock.patch('spanner_orm.table_apis.find')
def test_find_multi_result(self, find):
mock_transaction = mock.Mock()
find.return_value = [['key', 'value_1', None]]
results = models.SmallTestModel.find_multi(
[{
'key': 'key'
}],
transaction=mock_transaction,
)
self.assertEqual(results[0].key, 'key')
self.assertEqual(results[0].value_1, 'value_1')
self.assertIsNone(results[0].value_2)
@mock.patch('spanner_orm.table_apis.insert')
def test_create_calls_api(self, insert):
mock_transaction = mock.Mock()
models.SmallTestModel.create(
key='key',
value_1='value',
transaction=mock_transaction,
)
insert.assert_called_once()
(transaction, table, columns, values), _ = insert.call_args
self.assertEqual(transaction, mock_transaction)
self.assertEqual(table, models.SmallTestModel.table)
self.assertEqual(list(columns), ['key', 'value_1'])
self.assertEqual(list(values), [['key', 'value']])
def test_create_error_on_invalid_keys(self):
with self.assertRaises(error.SpannerError):
models.SmallTestModel.create(key_2='key')
def assert_api_called(self, mock_api, mock_transaction):
mock_api.assert_called_once()
(transaction, table, columns, values), _ = mock_api.call_args
self.assertEqual(transaction, mock_transaction)
self.assertEqual(table, models.SmallTestModel.table)
self.assertEqual(list(columns), ['key', 'value_1', 'value_2'])
self.assertEqual(list(values), [['key', 'value', None]])
@mock.patch('spanner_orm.table_apis.insert')
def test_save_batch_inserts(self, insert):
mock_transaction = mock.Mock()
values = {'key': 'key', 'value_1': 'value'}
not_persisted = models.SmallTestModel(values)
models.SmallTestModel.save_batch([not_persisted],
transaction=mock_transaction)
self.assert_api_called(insert, mock_transaction)
@mock.patch('spanner_orm.table_apis.update')
def test_save_batch_updates(self, update):
mock_transaction = mock.Mock()
values = {'key': 'key', 'value_1': 'value'}
persisted = models.SmallTestModel(values, persisted=True)
models.SmallTestModel.save_batch([persisted], transaction=mock_transaction)
self.assert_api_called(update, mock_transaction)
@mock.patch('spanner_orm.table_apis.upsert')
def test_save_batch_force_write_upserts(self, upsert):
mock_transaction = mock.Mock()
values = {'key': 'key', 'value_1': 'value'}
not_persisted = models.SmallTestModel(values)
models.SmallTestModel.save_batch(
[not_persisted],
force_write=True,
transaction=mock_transaction,
)
self.assert_api_called(upsert, mock_transaction)
@mock.patch('spanner_orm.table_apis.delete')
def test_delete_batch_deletes(self, delete):
mock_transaction = mock.Mock()
values = {'key': 'key', 'value_1': 'value'}
model = models.SmallTestModel(values)
models.SmallTestModel.delete_batch([model], transaction=mock_transaction)
delete.assert_called_once()
(transaction, table, keyset), _ = delete.call_args
self.assertEqual(transaction, mock_transaction)
self.assertEqual(table, models.SmallTestModel.table)
self.assertEqual(keyset.keys, [[model.key]])
@mock.patch('spanner_orm.table_apis.delete')
def test_delete_by_key_deletes(self, delete):
mock_transaction = mock.Mock()
models.SmallTestModel.delete_by_key(
key='some-key',
transaction=mock_transaction,
)
delete.assert_called_once_with(
mock_transaction,
models.SmallTestModel.table,
spanner.KeySet(keys=[['some-key']]),
)
def test_set_attr(self):
test_model = models.SmallTestModel({'key': 'key', 'value_1': 'value'})
test_model.value_1 = 'value_1'
test_model.value_2 = 'value_2'
self.assertEqual(test_model.values, {
'key': 'key',
'value_1': 'value_1',
'value_2': 'value_2',
})
def test_set_error_on_primary_key(self):
test_model = models.SmallTestModel({'key': 'key', 'value_1': 'value'})
with self.assertRaises(AttributeError):
test_model.key = 'error'
@parameterized.parameters(('int_2', 'foo'), ('float_2', 'bar'),
('string_2', 5), ('bytes_2', 'string'),
('string_array', 'foo'), ('timestamp', 5))
def test_set_error_on_invalid_type(self, attribute, value):
string_array = ['foo', 'bar']
timestamp = datetime.datetime.now(tz=datetime.timezone.utc)
test_model = models.UnittestModel({
'int_': 0,
'float_': 0,
'string': '',
'bytes_': b'',
'string_array': string_array,
'timestamp': timestamp
})
with self.assertRaises(AttributeError):
setattr(test_model, attribute, value)
def test_get_attr(self):
test_model = models.SmallTestModel({'key': 'key', 'value_1': 'value'})
self.assertEqual(test_model.key, 'key')
self.assertEqual(test_model.value_1, 'value')
self.assertEqual(test_model.value_2, None)
@parameterized.parameters(
(True, True),
(True, False),
(False, True),
)
def test_skip_validation(self, persisted, skip_validation):
models.SmallTestModel(
{'value_1': 'value'},
persisted=persisted,
skip_validation=skip_validation,
)
def test_validation(self):
with self.assertRaises(error.SpannerError):
models.SmallTestModel(
{'value_1': 'value'},
persisted=False,
skip_validation=False,
)
def test_model_equates(self):
timestamp = datetime.datetime.now(tz=datetime.timezone.utc)
test_model1 = models.UnittestModel({
'int_': 0,
'float_': 0,
'string': '',
'bytes_': b'',
'string_array': ['foo', 'bar'],
'timestamp': timestamp,
})
test_model2 = models.UnittestModel({
'int_': 0,
'float_': 0.0,
'string': '',
'bytes_': b'',
'string_array': ['foo', 'bar'],
'timestamp': timestamp,
})
self.assertEqual(test_model1, test_model2)
@parameterized.parameters(
(models.UnittestModel({
'int_': 0,
'float_': 0,
'string': '1',
'bytes_': b'1111',
'timestamp': _TIMESTAMP,
}),
models.UnittestModel({
'int_': 0,
'float_': 0,
'string': 'a',
'bytes_': b'A1A1',
'timestamp': _TIMESTAMP,
})),
(models.UnittestModel({
'int_': 0,
'float_': 0,
'string': '',
'bytes_': b'A1A1',
'string_array': ['foo', 'bar'],
'timestamp': _TIMESTAMP,
}),
models.UnittestModel({
'int_': 0,
'float_': 0,
'string': '',
'bytes_': b'A1A1',
'string_array': ['bar', 'foo'],
'timestamp': _TIMESTAMP,
})),
(models.SmallTestModel({
'key': 'key',
'value_1': 'value'
}), models.InheritanceTestModel({
'key': 'key',
'value_1': 'value'
})),
)
def test_model_are_different(self, test_model1, test_model2):
self.assertNotEqual(test_model1, test_model2)
def test_id(self):
primary_key = {'string': 'foo', 'int_': 5, 'float_': 2.3, 'bytes_': b'A1A1'}
all_data = primary_key.copy()
all_data.update({
'timestamp': datetime.datetime.now(tz=datetime.timezone.utc),
'string_array': ['foo', 'bar']
})
test_model = models.UnittestModel(all_data)
self.assertEqual(test_model.id(), primary_key)
def test_changes(self):
test_model = models.SmallTestModel({'key': 'key', 'value_1': 'value'})
test_model.value_1 = 'change'
self.assertEqual(test_model.changes(), {'value_1': 'change'})
test_model.value_1 = 'value'
self.assertEqual(test_model.changes(), {})
def test_object_changes(self):
array = ['foo', 'bar']
timestamp = datetime.datetime.now(tz=datetime.timezone.utc)
test_model = models.UnittestModel({
'int_': 0,
'float_': 0,
'string': '',
'bytes_': b'',
'string_array': array,
'timestamp': timestamp
})
# Make sure that changing an object on the model shows up in changes()
string_array = typing.cast(List[str], test_model.string_array)
string_array.append('bat')
self.assertIn('string_array', test_model.changes())
def test_field_exists_on_model_class(self):
self.assertIsInstance(models.SmallTestModel.key, field.Field)
self.assertEqual(models.SmallTestModel.key.field_type(), field.String)
self.assertFalse(models.SmallTestModel.key.nullable())
self.assertEqual(models.SmallTestModel.key.name, 'key')
def test_field_inheritance(self):
self.assertEqual(models.InheritanceTestModel.key, models.SmallTestModel.key)
values = {'key': 'key', 'value_1': 'value_1', 'value_3': 'value_3'}
test_model = models.InheritanceTestModel(values)
for name, value in values.items():
self.assertEqual(getattr(test_model, name), value)
def test_relation_get(self):
test_model = models.RelationshipTestModel({
'parent_key': 'parent',
'child_key': 'child',
'parent': []
})
self.assertEqual(test_model.parent, [])
def test_relation_get_error_on_unretrieved(self):
test_model = models.RelationshipTestModel({
'parent_key': 'parent',
'child_key': 'child'
})
with self.assertRaises(AttributeError):
_ = test_model.parent
def test_interleaved(self):
self.assertEqual(models.ChildTestModel.interleaved, models.SmallTestModel)
@mock.patch('spanner_orm.model.Model.find')
def test_reload(self, find):
values = {'key': 'key', 'value_1': 'value_1'}
model = models.SmallTestModel(values, persisted=False)
find.return_value = None
self.assertIsNone(model.reload())
find.assert_called_once_with(**model.id(), transaction=None)
@mock.patch('spanner_orm.model.Model.find')
def test_reload_reloads(self, find):
values = {'key': 'key', 'value_1': 'value_1'}
model = models.SmallTestModel(values, persisted=False)
updated_values = {'key': 'key', 'value_1': 'value_2'}
find.return_value = models.SmallTestModel(updated_values)
model.reload()
self.assertEqual(model.value_1, updated_values['value_1'])
self.assertEqual(model.changes(), {})
@mock.patch('spanner_orm.model.Model.create')
def test_save_creates(self, create):
values = {'key': 'key', 'value_1': 'value_1'}
model = models.SmallTestModel(values, persisted=False)
model.save()
create.assert_called_once_with(**values, value_2=None, transaction=None)
@mock.patch('spanner_orm.model.Model.update')
def test_save_updates(self, update):
values = {'key': 'key', 'value_1': 'value_1'}
model = models.SmallTestModel(values, persisted=True)
values['value_1'] = 'new_value'
model.value_1 = values['value_1']
model.save()
update.assert_called_once_with(**values, transaction=None)
@mock.patch('spanner_orm.model.Model.update')
def test_save_no_changes(self, update):
values = {'key': 'key', 'value_1': 'value_1'}
model = models.SmallTestModel(values, persisted=True)
model.save()
update.assert_not_called()
@mock.patch('spanner_orm.table_apis.delete')
def test_delete_deletes(self, delete):
mock_transaction = mock.Mock()
values = {'key': 'key', 'value_1': 'value_1'}
model = models.SmallTestModel(values)
model.delete(transaction=mock_transaction)
delete.assert_called_once()
(transaction, table, keyset), _ = delete.call_args
self.assertEqual(transaction, mock_transaction)
self.assertEqual(table, models.SmallTestModel.table)
self.assertEqual(keyset.keys, [[model.key]])
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
| 33.306867
| 84
| 0.660138
|
4a025dc45461c38f500daef9f5647d5ae724bf04
| 1,793
|
py
|
Python
|
AcademicDealerBackend/users/migrations/0001_initial.py
|
Acciente717/AcademicDealerBackend
|
8024725f88997fa430fa92e1caa28161ffbb06f6
|
[
"MIT"
] | 5
|
2019-03-10T06:57:15.000Z
|
2019-03-17T03:04:40.000Z
|
AcademicDealerBackend/users/migrations/0001_initial.py
|
Acciente717/AcademicDealerBackend
|
8024725f88997fa430fa92e1caa28161ffbb06f6
|
[
"MIT"
] | 11
|
2019-05-14T15:13:48.000Z
|
2019-05-31T15:31:33.000Z
|
AcademicDealerBackend/users/migrations/0001_initial.py
|
Acciente717/AcademicDealerBackend
|
8024725f88997fa430fa92e1caa28161ffbb06f6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-05-26 13:34
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
('pw_hash', models.CharField(max_length=255)),
('create_time', models.DateTimeField(auto_now_add=True)),
('last_activated_time', models.DateTimeField(default=datetime.date.today)),
('real_name', models.CharField(max_length=255)),
('nick_name', models.CharField(max_length=255, unique=True)),
('pic_url', models.URLField(max_length=512)),
('school', models.CharField(max_length=255)),
('department', models.CharField(max_length=255)),
('title', models.CharField(max_length=255)),
('enrollment_date', models.DateField()),
('profile', models.TextField()),
],
),
migrations.CreateModel(
name='UserFollow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follow_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follow_user', to='users.UserAccount')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to='users.UserAccount')),
],
),
]
| 41.697674
| 148
| 0.597323
|
4a025e5e9dba893a1962371b31eb4830466d96d6
| 5,452
|
py
|
Python
|
docs/conf.py
|
mgorny/Flask-Migrate
|
dd389ba7a45b8a673c653346a15f8d08f4f2417f
|
[
"MIT"
] | 1,992
|
2015-01-03T03:24:50.000Z
|
2022-03-27T15:28:30.000Z
|
docs/conf.py
|
mgorny/Flask-Migrate
|
dd389ba7a45b8a673c653346a15f8d08f4f2417f
|
[
"MIT"
] | 408
|
2015-01-12T09:44:02.000Z
|
2022-03-25T15:06:03.000Z
|
docs/conf.py
|
mgorny/Flask-Migrate
|
dd389ba7a45b8a673c653346a15f8d08f4f2417f
|
[
"MIT"
] | 248
|
2015-01-06T20:02:25.000Z
|
2022-03-11T03:03:47.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Flask-Migrate'
copyright = '2019, Miguel Grinberg'
author = 'Miguel Grinberg'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'miguelgrinberg',
'github_repo': 'flask-migrate',
'github_banner': True,
'github_button': True,
'github_type': 'star',
'fixed_sidebar': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
'**': [
'about.html',
'localtoc.html',
'searchbox.html'
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Migratedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-Migrate.tex', 'Flask-Migrate Documentation',
'Miguel Grinberg', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-migrate', 'Flask-Migrate Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-Migrate', 'Flask-Migrate Documentation',
author, 'Flask-Migrate', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 29.15508
| 79
| 0.64912
|
4a02609fa2618b7dc798a12928168ade98495a93
| 581
|
py
|
Python
|
cmd/read.py
|
onexi/threetiers-2022
|
07261cb00828ce45b801c19428acf4ef388e7162
|
[
"MIT"
] | null | null | null |
cmd/read.py
|
onexi/threetiers-2022
|
07261cb00828ce45b801c19428acf4ef388e7162
|
[
"MIT"
] | null | null | null |
cmd/read.py
|
onexi/threetiers-2022
|
07261cb00828ce45b801c19428acf4ef388e7162
|
[
"MIT"
] | 11
|
2022-03-14T13:46:04.000Z
|
2022-03-14T14:53:10.000Z
|
# To solve the driver problem
# uninstall the following
# -----------------------------------
# pip3 uninstall mysql-connector
# -----------------------------------
# Then install
# -----------------------------------
# pip3 install mysql-connector-python
# -----------------------------------
import mysql.connector
cnx = mysql.connector.connect(user='root',
password='MyNewPass',
host='127.0.0.1',
database='education',
auth_plugin='mysql_native_password')
# -----------------------------------
# YOUR CODE
# -----------------------------------
| 25.26087
| 43
| 0.428571
|
4a0262350e4fa84a0cb8578806520540cfc9fbd8
| 4,009
|
py
|
Python
|
rebrickable_color_to_bricklink.py
|
maczniak/brickstats
|
ac16a62242a374b15d46ccf4132db445dfa1fd76
|
[
"MIT"
] | 1
|
2015-09-10T00:04:23.000Z
|
2015-09-10T00:04:23.000Z
|
rebrickable_color_to_bricklink.py
|
maczniak/brickstats
|
ac16a62242a374b15d46ccf4132db445dfa1fd76
|
[
"MIT"
] | null | null | null |
rebrickable_color_to_bricklink.py
|
maczniak/brickstats
|
ac16a62242a374b15d46ccf4132db445dfa1fd76
|
[
"MIT"
] | null | null | null |
# color references:
# http://rebrickable.com/colors
# http://www.bricklink.com/catalogColors.asp
rebrickable_color_to_bricklink = {
# Solid Colors
15: (1, 'White'),
503: (49, 'Very Light Gray'),
151: (99, 'Very Light Bluish Gray'),
71: (86, 'Light Bluish Gray'),
7: (9, 'Light Gray'),
8: (10, 'Dark Gray'),
72: (85, 'Dark Bluish Gray'),
0: (11, 'Black'),
320: (59, 'Dark Red'),
4: (5, 'Red'),
216: (27, 'Rust'),
12: (25, 'Salmon'),
100: (26, 'Light Salmon'),
335: (58, 'Sand Red'),
70: (88, 'Reddish Brown'),
6: (8, 'Brown'),
308: (120, 'Dark Brown'),
28: (69, 'Dark Tan'),
19: (2, 'Tan'),
78: (90, 'Light Flesh'),
92: (28, 'Flesh'),
84: (150, 'Medium Dark Flesh'),
86: (91, 'Dark Flesh'),
450: (106, 'Fabuland Brown'),
366: (29, 'Earth Orange'),
484: (68, 'Dark Orange'),
25: (4, 'Orange'),
462: (31, 'Medium Orange'),
191: (110, 'Bright Light Orange'),
125: (32, 'Light Orange'),
68: (96, 'Very Light Orange'),
14: (3, 'Yellow'),
226: (103, 'Bright Light Yellow'),
18: (33, 'Light Yellow'),
120: (35, 'Light Lime'),
158: (158, 'Yellowish Green'),
115: (76, 'Medium Lime'),
27: (34, 'Lime'),
326: (155, 'Olive Green'),
288: (80, 'Dark Green'),
2: (6, 'Green'),
10: (36, 'Bright Green'),
74: (37, 'Medium Green'),
17: (38, 'Light Green'),
378: (48, 'Sand Green'),
3: (39, 'Dark Turquoise'),
11: (40, 'Light Turquoise'),
118: (41, 'Aqua'),
323: (152, 'Light Aqua'),
272: (63, 'Dark Blue'),
1: (7, 'Blue'),
321: (153, 'Dark Azure'),
322: (156, 'Medium Azure'),
73: (42, 'Medium Blue'),
313: (72, 'Maersk Blue'),
212: (105, 'Bright Light Blue'),
9: (62, 'Light Blue'),
232: (87, 'Sky Blue'),
379: (55, 'Sand Blue'),
112: (97, 'Blue-Violet'),
23: (109, 'Dark Blue-Violet'),
110: (43, 'Violet'),
1001: (73, 'Medium Violet'),
20: (44, 'Light Violet'),
85: (89, 'Dark Purple'),
22: (24, 'Purple'),
69: (93, 'Light Purple'),
30: (157, 'Medium Lavender'),
31: (154, 'Lavender'),
373: (54, 'Sand Purple'),
26: (71, 'Magenta'),
5: (47, 'Dark Pink'),
351: (94, 'Medium Dark Pink'),
29: (104, 'Bright Pink'),
13: (23, 'Pink'),
77: (56, 'Light Pink'),
# Transparent Colors
47: (12, 'Trans-Clear'),
40: (13, 'Trans-Black'),
36: (17, 'Trans-Red'),
57: (18, 'Trans-Neon Orange'),
182: (98, 'Trans-Orange'),
54: (121, 'Trans-Neon Yellow'),
46: (19, 'Trans-Yellow'),
42: (16, 'Trans-Neon Green'),
35: (108, 'Trans-Bright Green'),
34: (20, 'Trans-Green'),
33: (14, 'Trans-Dark Blue'),
143: (74, 'Trans-Medium Blue'),
41: (15, 'Trans-Light Blue'),
43: (113, 'Trans-Very Lt Blue'),
236: (114, 'Trans-Light Purple'),
52: (51, 'Trans-Purple'),
45: (50, 'Trans-Dark Pink'),
230: (107, 'Trans-Pink'),
# Chrome Colors
334: (21, 'Chrome Gold'),
383: (22, 'Chrome Silver'),
60: (57, 'Chrome Antique Brass'),
64: (122, 'Chrome Black'),
61: (52, 'Chrome Blue'),
62: (64, 'Chrome Green'),
63: (82, 'Chrome Pink'),
# Pearl Colors
183: (83, 'Pearl White'),
150: (119, 'Pearl Very Light Gray'),
135: (66, 'Pearl Light Gray'),
179: (95, 'Flat Silver'),
148: (77, 'Pearl Dark Gray'),
137: (78, 'Metal Blue'),
142: (61, 'Pearl Light Gold'),
297: (115, 'Pearl Gold'),
178: (81, 'Flat Dark Gold'),
134: (84, 'Copper'),
# Metallic Colors
80: (67, 'Metallic Silver'),
81: (70, 'Metallic Green'),
82: (65, 'Metallic Gold'),
# Milky Colors
79: (60, 'Milky White'),
1000: (159, 'Glow in Dark White'),
21: (46, 'Glow In Dark Opaque'),
294: (118, 'Glow In Dark Trans'),
# Glitter Colors
117: (101, 'Glitter Trans-Clear'),
1002: (163, 'Glitter Trans-Neon Green'),
1003: (162, 'Glitter Trans-Light Blue'),
129: (102, 'Glitter Trans-Purple'),
114: (100, 'Glitter Trans-Dark Pink'),
# Speckle Colors
132: (111, 'Speckle Black-Silver'),
133: (151, 'Speckle Black-Gold'),
75: (116, 'Speckle Black-Copper'),
76: (117, 'Speckle DBGray-Silver'),
# missing: (160, 'Fabuland Orange'), (161, 'Dark Yellow'), Modulex Colors
9999: (-1, '(No Color)'),
-1: (-1, 'Unknown'),
89: (-1, 'Royal Blue'), # part bb556 of set 2852725
}
| 27.458904
| 74
| 0.575954
|
4a02640060252f6410db47be68c57355772f2cbb
| 24,828
|
py
|
Python
|
api/base/filters.py
|
saradbowman/osf.io
|
085a3cd581625101e954e6df39940b4c4bd0a5a3
|
[
"Apache-2.0"
] | null | null | null |
api/base/filters.py
|
saradbowman/osf.io
|
085a3cd581625101e954e6df39940b4c4bd0a5a3
|
[
"Apache-2.0"
] | null | null | null |
api/base/filters.py
|
saradbowman/osf.io
|
085a3cd581625101e954e6df39940b4c4bd0a5a3
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import functools
import operator
import re
import pytz
from api.base import utils
from api.base.exceptions import (
InvalidFilterComparisonType,
InvalidFilterError, InvalidFilterFieldError,
InvalidFilterMatchType, InvalidFilterOperator,
InvalidFilterValue,
)
from api.base.serializers import RelationshipField, ShowIfVersion, TargetField
from dateutil import parser as date_parser
from django.core.exceptions import ValidationError
from django.db.models import QuerySet as DjangoQuerySet
from django.db.models import Q
from rest_framework import serializers as ser
from rest_framework.filters import OrderingFilter
from osf.models import Subject, Preprint
from osf.models.base import GuidMixin
from functools import cmp_to_key
def lowercase(lower):
if hasattr(lower, '__call__'):
return lower()
return lower
def sort_multiple(fields):
fields = list(fields)
def sort_fn(a, b):
sort_direction = 1
for field in fields:
if field[0] == '-':
sort_direction = -1
field = field[1:]
a_field = getattr(a, field)
b_field = getattr(b, field)
if a_field > b_field:
return 1 * sort_direction
elif a_field < b_field:
return -1 * sort_direction
return 0
return sort_fn
class OSFOrderingFilter(OrderingFilter):
"""Adaptation of rest_framework.filters.OrderingFilter to work with modular-odm."""
# override
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if isinstance(queryset, DjangoQuerySet):
if queryset.ordered:
return queryset
elif ordering and getattr(queryset.query, 'distinct_fields', None):
order_fields = tuple([field.lstrip('-') for field in ordering])
distinct_fields = queryset.query.distinct_fields
queryset.query.distinct_fields = tuple(set(distinct_fields + order_fields))
return super(OSFOrderingFilter, self).filter_queryset(request, queryset, view)
if ordering:
if isinstance(ordering, (list, tuple)):
sorted_list = sorted(queryset, key=cmp_to_key(sort_multiple(ordering)))
return sorted_list
return queryset.sort(*ordering)
return queryset
def get_serializer_source_field(self, view, request):
"""
Returns a dictionary of serializer fields and source names. i.e. {'date_created': 'created'}
Logic borrowed from OrderingFilter.get_default_valid_fields with modifications to retrieve
source fields for serializer field names.
:param view api view
:
"""
field_to_source_mapping = {}
if hasattr(view, 'get_serializer_class'):
serializer_class = view.get_serializer_class()
else:
serializer_class = getattr(view, 'serializer_class', None)
# This will not allow any serializer fields with nested related fields to be sorted on
for field_name, field in serializer_class(context={'request': request}).fields.items():
if not getattr(field, 'write_only', False) and not field.source == '*' and field_name != field.source:
field_to_source_mapping[field_name] = field.source.replace('.', '_')
return field_to_source_mapping
# Overrides OrderingFilter
def remove_invalid_fields(self, queryset, fields, view, request):
"""
Returns an array of valid fields to be used for ordering.
Any valid source fields which are input remain in the valid fields list using the super method.
Serializer fields are mapped to their source fields and returned.
:param fields, array, input sort fields
:returns array of source fields for sorting.
"""
valid_fields = super(OSFOrderingFilter, self).remove_invalid_fields(queryset, fields, view, request)
if not valid_fields:
for invalid_field in fields:
ordering_sign = '-' if invalid_field[0] == '-' else ''
invalid_field = invalid_field.lstrip('-')
field_source_mapping = self.get_serializer_source_field(view, request)
source_field = field_source_mapping.get(invalid_field, None)
if source_field:
valid_fields.append(ordering_sign + source_field)
return valid_fields
class FilterMixin(object):
""" View mixin with helper functions for filtering. """
QUERY_PATTERN = re.compile(r'^filter\[(?P<fields>((?:,*\s*\w+)*))\](\[(?P<op>\w+)\])?$')
FILTER_FIELDS = re.compile(r'(?:,*\s*(\w+)+)')
MATCH_OPERATORS = ('contains', 'icontains')
MATCHABLE_FIELDS = (ser.CharField, ser.ListField)
DEFAULT_OPERATORS = ('eq', 'ne')
DEFAULT_OPERATOR_OVERRIDES = {
ser.CharField: 'icontains',
ser.ListField: 'contains',
}
NUMERIC_FIELDS = (ser.IntegerField, ser.DecimalField, ser.FloatField)
DATE_FIELDS = (ser.DateTimeField, ser.DateField)
DATETIME_PATTERN = re.compile(r'^\d{4}\-\d{2}\-\d{2}(?P<time>T\d{2}:\d{2}(:\d{2}(\.\d{1,6})?)?)$')
COMPARISON_OPERATORS = ('gt', 'gte', 'lt', 'lte')
COMPARABLE_FIELDS = NUMERIC_FIELDS + DATE_FIELDS
LIST_FIELDS = (ser.ListField, )
RELATIONSHIP_FIELDS = (RelationshipField, TargetField)
def __init__(self, *args, **kwargs):
super(FilterMixin, self).__init__(*args, **kwargs)
if not self.serializer_class:
raise NotImplementedError()
def _get_default_operator(self, field):
return self.DEFAULT_OPERATOR_OVERRIDES.get(type(field), 'eq')
def _get_valid_operators(self, field):
if isinstance(field, self.COMPARABLE_FIELDS):
return self.COMPARISON_OPERATORS + self.DEFAULT_OPERATORS
elif isinstance(field, self.MATCHABLE_FIELDS):
return self.MATCH_OPERATORS + self.DEFAULT_OPERATORS
else:
return self.DEFAULT_OPERATORS
def _get_field_or_error(self, field_name):
"""
Check that the attempted filter field is valid
:raises InvalidFilterError: If the filter field is not valid
"""
predeclared_fields = self.serializer_class._declared_fields
initialized_fields = self.get_serializer().fields if hasattr(self, 'get_serializer') else {}
serializer_fields = predeclared_fields.copy()
# Merges fields that were declared on serializer with fields that may have been dynamically added
serializer_fields.update(initialized_fields)
if field_name not in serializer_fields:
raise InvalidFilterError(detail="'{0}' is not a valid field for this endpoint.".format(field_name))
if field_name not in getattr(self.serializer_class, 'filterable_fields', set()):
raise InvalidFilterFieldError(parameter='filter', value=field_name)
field = serializer_fields[field_name]
# You cannot filter on deprecated fields.
if isinstance(field, ShowIfVersion) and utils.is_deprecated(self.request.version, field.min_version, field.max_version):
raise InvalidFilterFieldError(parameter='filter', value=field_name)
return serializer_fields[field_name]
def _validate_operator(self, field, field_name, op):
"""
Check that the operator and field combination is valid
:raises InvalidFilterComparisonType: If the query contains comparisons against non-date or non-numeric fields
:raises InvalidFilterMatchType: If the query contains comparisons against non-string or non-list fields
:raises InvalidFilterOperator: If the filter operator is not a member of self.COMPARISON_OPERATORS
"""
if op not in set(self.MATCH_OPERATORS + self.COMPARISON_OPERATORS + self.DEFAULT_OPERATORS):
valid_operators = self._get_valid_operators(field)
raise InvalidFilterOperator(value=op, valid_operators=valid_operators)
if op in self.COMPARISON_OPERATORS:
if not isinstance(field, self.COMPARABLE_FIELDS):
raise InvalidFilterComparisonType(
parameter='filter',
detail="Field '{0}' does not support comparison operators in a filter.".format(field_name),
)
if op in self.MATCH_OPERATORS:
if not isinstance(field, self.MATCHABLE_FIELDS):
raise InvalidFilterMatchType(
parameter='filter',
detail="Field '{0}' does not support match operators in a filter.".format(field_name),
)
def _parse_date_param(self, field, source_field_name, op, value):
"""
Allow for ambiguous date filters. This supports operations like finding Nodes created on a given day
even though Node.created is a specific datetime.
:return list<dict>: list of one (specific datetime) or more (date range) parsed query params
"""
time_match = self.DATETIME_PATTERN.match(value)
if op != 'eq' or time_match:
return {
'op': op,
'value': self.convert_value(value, field),
'source_field_name': source_field_name,
}
else: # TODO: let times be as generic as possible (i.e. whole month, whole year)
start = self.convert_value(value, field)
stop = start + datetime.timedelta(days=1)
return [
{
'op': 'gte',
'value': start,
'source_field_name': source_field_name,
}, {
'op': 'lt',
'value': stop,
'source_field_name': source_field_name,
},
]
def bulk_get_values(self, value, field):
"""
Returns list of values from query_param for IN query
If url contained `/nodes/?filter[id]=12345, abcde`, the returned values would be:
[u'12345', u'abcde']
"""
value = value.lstrip('[').rstrip(']')
separated_values = value.split(',')
values = [self.convert_value(val.strip(), field) for val in separated_values]
return values
def parse_query_params(self, query_params):
"""Maps query params to a dict usable for filtering
:param dict query_params:
:return dict: of the format {
<resolved_field_name>: {
'op': <comparison_operator>,
'value': <resolved_value>,
'source_field_name': <model_field_source_of_serializer_field>
}
}
"""
query = {}
for key, value in query_params.items():
match = self.QUERY_PATTERN.match(key)
if match:
match_dict = match.groupdict()
fields = match_dict['fields']
field_names = re.findall(self.FILTER_FIELDS, fields.strip())
query.update({key: {}})
for field_name in field_names:
field = self._get_field_or_error(field_name)
op = match_dict.get('op') or self._get_default_operator(field)
self._validate_operator(field, field_name, op)
source_field_name = field_name
if not isinstance(field, ser.SerializerMethodField):
source_field_name = self.convert_key(field_name, field)
# Special case date(time)s to allow for ambiguous date matches
if isinstance(field, self.DATE_FIELDS):
query.get(key).update({
field_name: self._parse_date_param(field, source_field_name, op, value),
})
elif not isinstance(value, int) and source_field_name in ['_id', 'guid._id', 'journal_id']:
query.get(key).update({
field_name: {
'op': 'in',
'value': self.bulk_get_values(value, field),
'source_field_name': source_field_name,
},
})
elif not isinstance(value, int) and source_field_name == 'root':
query.get(key).update({
field_name: {
'op': op,
'value': self.bulk_get_values(value, field),
'source_field_name': source_field_name,
},
})
elif self.should_parse_special_query_params(field_name):
query = self.parse_special_query_params(field_name, key, value, query)
else:
query.get(key).update({
field_name: {
'op': op,
'value': self.convert_value(value, field),
'source_field_name': source_field_name,
},
})
self.postprocess_query_param(key, field_name, query[key][field_name])
return query
def postprocess_query_param(self, key, field_name, operation):
"""Hook to update parsed query parameters. Overrides of this method should either
update ``operation`` in-place or do nothing.
"""
pass
def should_parse_special_query_params(self, field_name):
""" This should be overridden in subclasses for custom filtering behavior
"""
return False
def parse_special_query_params(self, field_name, key, value, query):
""" This should be overridden in subclasses for custom filtering behavior
"""
pass
def convert_key(self, field_name, field):
"""Used so that that queries on fields with the source attribute set will work
:param basestring field_name: text representation of the field name
:param rest_framework.fields.Field field: Field instance
"""
field = utils.decompose_field(field)
source = field.source
if source == '*':
source = getattr(field, 'filter_key', None)
return source or field_name
def convert_value(self, value, field):
"""Used to convert incoming values from query params to the appropriate types for filter comparisons
:param basestring value: value to be resolved
:param rest_framework.fields.Field field: Field instance
"""
field = utils.decompose_field(field)
if isinstance(field, ShowIfVersion):
field = field.field
if isinstance(field, ser.BooleanField):
if utils.is_truthy(value):
return True
elif utils.is_falsy(value):
return False
else:
raise InvalidFilterValue(
value=value,
field_type='bool',
)
elif isinstance(field, self.DATE_FIELDS):
try:
ret = date_parser.parse(value, ignoretz=False)
if not ret.tzinfo:
ret = ret.replace(tzinfo=pytz.utc)
return ret
except ValueError:
raise InvalidFilterValue(
value=value,
field_type='date',
)
elif isinstance(field, (self.RELATIONSHIP_FIELDS, ser.SerializerMethodField, ser.ManyRelatedField)):
if value == 'null':
value = None
return value
elif isinstance(field, self.LIST_FIELDS) or isinstance((getattr(field, 'field', None)), self.LIST_FIELDS):
if value == 'null':
value = []
return value
else:
try:
return field.to_internal_value(value)
except ValidationError:
raise InvalidFilterValue(
value=value,
)
class ListFilterMixin(FilterMixin):
"""View mixin that adds a get_queryset_from_request method which uses query params
of the form `filter[field_name]=value` to filter a list of objects.
Subclasses must define `get_default_queryset()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a frozenset of strings representing the field names as they appear in the serialization.
"""
FILTERS = {
'eq': operator.eq,
'lt': operator.lt,
'lte': operator.le,
'gt': operator.gt,
'gte': operator.ge,
}
def __init__(self, *args, **kwargs):
super(FilterMixin, self).__init__(*args, **kwargs)
if not self.serializer_class:
raise NotImplementedError()
def get_default_queryset(self):
raise NotImplementedError('Must define get_default_queryset')
def get_queryset_from_request(self):
default_queryset = self.get_default_queryset()
if not self.kwargs.get('is_embedded') and self.request.query_params:
param_queryset = self.param_queryset(self.request.query_params, default_queryset)
return param_queryset
else:
return default_queryset
def param_queryset(self, query_params, default_queryset):
"""filters default queryset based on query parameters"""
filters = self.parse_query_params(query_params)
queryset = default_queryset
query_parts = []
if filters:
for key, field_names in filters.items():
sub_query_parts = []
for field_name, data in field_names.items():
operations = data if isinstance(data, list) else [data]
if isinstance(queryset, list):
for operation in operations:
queryset = self.get_filtered_queryset(field_name, operation, queryset)
else:
sub_query_parts.append(
functools.reduce(
operator.and_, [
self.build_query_from_field(field_name, operation)
for operation in operations
],
),
)
if not isinstance(queryset, list):
sub_query = functools.reduce(operator.or_, sub_query_parts)
query_parts.append(sub_query)
if not isinstance(queryset, list):
for query in query_parts:
queryset = queryset.filter(query)
return queryset
def build_query_from_field(self, field_name, operation):
query_field_name = operation['source_field_name']
if operation['op'] == 'ne':
return ~Q(**{query_field_name: operation['value']})
elif operation['op'] != 'eq':
query_field_name = '{}__{}'.format(query_field_name, operation['op'])
return Q(**{query_field_name: operation['value']})
return Q(**{query_field_name: operation['value']})
def postprocess_query_param(self, key, field_name, operation):
# tag queries will usually be on Tag.name,
# ?filter[tags]=foo should be translated to MQ('tags__name', 'eq', 'foo')
# But queries on lists should be tags, e.g.
# ?filter[tags]=foo,bar should be translated to MQ('tags', 'isnull', True)
# ?filter[tags]=[] should be translated to MQ('tags', 'isnull', True)
if field_name == 'tags':
if operation['value'] not in (list(), tuple()):
operation['source_field_name'] = 'tags__name'
operation['op'] = 'iexact'
elif operation['value'] == []:
operation['source_field_name'] = 'tags__isnull'
operation['value'] = True
operation['op'] = 'eq'
# contributors iexact because guid matching
if field_name == 'contributors':
if operation['value'] not in (list(), tuple()):
operation['source_field_name'] = '_contributors__guids___id'
operation['op'] = 'iexact'
if field_name == 'kind':
operation['source_field_name'] = 'is_file'
# The value should be boolean
operation['value'] = operation['value'] == 'file'
if field_name == 'bibliographic':
operation['op'] = 'exact'
if field_name == 'permission':
operation['op'] = 'exact'
if field_name == 'id':
operation['source_field_name'] = (
'guids___id'
if issubclass(self.model_class, GuidMixin)
else self.model_class.primary_identifier_name
)
operation['op'] = 'in'
if field_name == 'subjects':
self.postprocess_subject_query_param(operation)
def postprocess_subject_query_param(self, operation):
if Subject.objects.filter(_id=operation['value']).exists():
operation['source_field_name'] = 'subjects___id'
else:
operation['source_field_name'] = 'subjects__text'
operation['op'] = 'iexact'
def get_filtered_queryset(self, field_name, params, default_queryset):
"""filters default queryset based on the serializer field type"""
field = self.serializer_class._declared_fields[field_name]
source_field_name = params['source_field_name']
if isinstance(field, ser.SerializerMethodField):
return_val = [
item for item in default_queryset
if self.FILTERS[params['op']](self.get_serializer_method(field_name)(item), params['value'])
]
elif isinstance(field, ser.CharField):
if source_field_name in ('_id', 'root'):
# Param parser treats certain ID fields as bulk queries: a list of options, instead of just one
# Respect special-case behavior, and enforce exact match for these list fields.
options = set(item.lower() for item in params['value'])
return_val = [
item for item in default_queryset
if getattr(item, source_field_name, '') in options
]
else:
# TODO: What is {}.lower()? Possible bug
return_val = [
item for item in default_queryset
if params['value'].lower() in getattr(item, source_field_name, {}).lower()
]
elif isinstance(field, ser.ListField):
return_val = [
item for item in default_queryset
if params['value'].lower() in [
lowercase(i.lower) for i in getattr(item, source_field_name, [])
]
]
else:
try:
return_val = [
item for item in default_queryset
if self.FILTERS[params['op']](getattr(item, source_field_name, None), params['value'])
]
except TypeError:
raise InvalidFilterValue(detail='Could not apply filter to specified field')
return return_val
def get_serializer_method(self, field_name):
"""
:param field_name: The name of a SerializerMethodField
:return: The function attached to the SerializerMethodField to get its value
"""
serializer = self.get_serializer()
serializer_method_name = 'get_' + field_name
return getattr(serializer, serializer_method_name)
class PreprintFilterMixin(ListFilterMixin):
"""View mixin that uses ListFilterMixin, adding postprocessing for preprint querying
Subclasses must define `get_default_queryset()`.
"""
def postprocess_query_param(self, key, field_name, operation):
if field_name == 'provider':
operation['source_field_name'] = 'provider___id'
if field_name == 'id':
operation['source_field_name'] = 'guids___id'
if field_name == 'subjects':
self.postprocess_subject_query_param(operation)
def preprints_queryset(self, base_queryset, auth_user, allow_contribs=True, public_only=False):
return Preprint.objects.can_view(
base_queryset=base_queryset,
user=auth_user,
allow_contribs=allow_contribs,
public_only=public_only,
)
| 43.104167
| 128
| 0.596786
|
4a0264c0d77f22d3c389bdb9b353888bc49f4df7
| 22,279
|
py
|
Python
|
dataworkspaces/utils/git_utils.py
|
data-workspaces/data-workspaces-core
|
2fd457414a9481f78dbd689c92479fb3a6d838b0
|
[
"Apache-2.0"
] | 6
|
2019-04-16T10:44:41.000Z
|
2021-02-24T09:34:10.000Z
|
dataworkspaces/utils/git_utils.py
|
data-workspaces/data-workspaces-core
|
2fd457414a9481f78dbd689c92479fb3a6d838b0
|
[
"Apache-2.0"
] | 67
|
2019-03-08T13:32:31.000Z
|
2022-03-09T15:15:41.000Z
|
dataworkspaces/utils/git_utils.py
|
data-workspaces/data-workspaces-core
|
2fd457414a9481f78dbd689c92479fb3a6d838b0
|
[
"Apache-2.0"
] | 2
|
2020-04-24T02:48:56.000Z
|
2022-01-14T01:07:48.000Z
|
# Copyright 2018,2019 by MPI-SWS and Data-ken Research. Licensed under Apache 2.0. See LICENSE.txt.
"""
Utility functions related to interacting with git
"""
from os.path import isdir, join, dirname, exists
from subprocess import run, PIPE
import shutil
import re
import tempfile
import json
from typing import Any, List
import click
from .subprocess_utils import find_exe, call_subprocess, call_subprocess_for_rc
from .file_utils import remove_dir_if_empty
from dataworkspaces.errors import ConfigurationError, InternalError, UserAbort
def is_git_repo(dirpath):
if isdir(join(dirpath, ".git")):
return True
else:
return False
GIT_EXE_PATH = find_exe("git", "Please make sure that you have git installed on your machine.")
def is_git_dirty(cwd):
"""See if the git repo is dirty. We are looking for untracked
files, changes in staging, and changes in the working directory.
"""
if GIT_EXE_PATH is None:
raise ConfigurationError("git executable not found")
cmd = [GIT_EXE_PATH, "status", "--porcelain"]
p = run(cmd, cwd=cwd, stdout=PIPE, encoding="utf-8")
for line in p.stdout.split("\n"):
if len(line) < 2:
continue
if (line[0] in ("?", "D", "M", "A")) or (line[1] in ("?", "D", "M", "A")):
return True
if p.returncode == 0:
return False
else:
raise ConfigurationError("Problem invoking %s status on %s" % (GIT_EXE_PATH, cwd))
def is_git_subdir_dirty(cwd, subdir):
"""See if the git repo is dirty. We are looking for untracked
files, changes in staging, and changes in the working directory.
"""
cmd = [GIT_EXE_PATH, "status", "--porcelain", subdir]
p = run(cmd, cwd=cwd, stdout=PIPE, encoding="utf-8")
for line in p.stdout.split("\n"):
if len(line) < 2:
continue
if (line[0] in ("?", "D", "M", "A")) or (line[1] in ("?", "D", "M", "A")):
return True
if p.returncode == 0:
return False
else:
raise ConfigurationError(
"Problem invoking %s status %s on %s" % (GIT_EXE_PATH, subdir, cwd)
)
def is_git_staging_dirty(cwd, subdir=None):
"""See if the git repo as uncommited changes in staging. If the
subdirectory is specified, then we only look within that subdirectory
"""
cmd = [GIT_EXE_PATH, "status", "--porcelain"]
if subdir is not None:
cmd.append(subdir)
p = run(cmd, cwd=cwd, stdout=PIPE, encoding="utf-8")
for line in p.stdout.split("\n"):
if len(line) < 2:
continue
if line[0] in ("D", "M", "A"):
return True
if p.returncode == 0:
return False
else:
raise ConfigurationError("Problem invoking %s status on %s" % (GIT_EXE_PATH, cwd))
def echo_git_status_for_user(cwd):
"""Run git status and echo to the user.
"""
if GIT_EXE_PATH is None:
raise ConfigurationError("git executable not found")
cmd = [GIT_EXE_PATH, "status"]
# p = run(cmd, cwd=cwd, stdout=PIPE, encoding="utf-8")
p = run(cmd, cwd=cwd, encoding="utf-8")
# for line in p.stdout.split("\n"):
# click.echo(line)
if p.returncode != 0:
raise ConfigurationError("Problem invoking %s status on %s" % (GIT_EXE_PATH, cwd))
def is_pull_needed_from_remote(cwd: str, branch: str, verbose: bool) -> bool:
"""Do check whether we need a pull, we get the hash of the HEAD
of the remote's master branch. Then, we see if we have this object locally.
"""
hashval = get_remote_head_hash(cwd, branch, verbose)
if hashval is None:
return False
# cmd = [GIT_EXE_PATH, 'show', '--oneline', hashval]
cmd = [GIT_EXE_PATH, "cat-file", "-e", hashval + "^{commit}"]
rc = call_subprocess_for_rc(cmd, cwd, verbose=verbose)
return rc != 0
def git_init(repo_dir, verbose=False):
call_subprocess([GIT_EXE_PATH, "init"], cwd=repo_dir, verbose=verbose)
def git_add(repo_dir: str, relative_paths: List[str], verbose: bool = False) -> None:
call_subprocess([GIT_EXE_PATH, "add"] + relative_paths, cwd=repo_dir, verbose=verbose)
def git_commit(repo_dir: str, message: str, verbose: bool = False) -> None:
"""Unconditional git commit
"""
call_subprocess([GIT_EXE_PATH, "commit", "-m", message], cwd=repo_dir, verbose=verbose)
def get_branch_info(local_path, verbose=False):
data = call_subprocess([GIT_EXE_PATH, "branch"], cwd=local_path, verbose=verbose)
current = None
other = []
for line in data.split("\n"):
line = line.strip()
if len(line) == 0:
continue
if line.startswith("*"):
assert current is None
current = line[2:]
else:
other.append(line)
if current is None:
raise InternalError(
"Problem obtaining branch information for local git repo at %s" % local_path
)
else:
return (current, other)
def switch_git_branch(local_path, branch, verbose):
try:
call_subprocess([GIT_EXE_PATH, "checkout", branch], cwd=local_path, verbose=verbose)
except Exception as e:
raise ConfigurationError(
"Unable to switch git repo at %s to branch %s" % (local_path, branch)
) from e
def switch_git_branch_if_needed(local_path, branch, verbose, ok_if_not_present=False):
(current, others) = get_branch_info(local_path, verbose)
if branch == current:
return
else:
if (branch not in others) and (not ok_if_not_present):
raise InternalError(
"Trying to switch to branch %s not in repo at %s" % (branch, others)
)
switch_git_branch(local_path, branch, verbose)
def git_remove_subtree(
repo_dir: str, relative_path: str, remove_history: bool = False, verbose: bool = False
) -> None:
if remove_history:
# removing history is problematic, as you need to --force the
# next time you do a push. That also implies that you do a pull before
# running the delete. See
# https://help.github.com/en/articles/removing-sensitive-data-from-a-repository
# for details.
assert 0, "removing history not currently supported"
if is_git_staging_dirty(repo_dir):
# The history rewrite will fail if the repo is dirty, so
# we will commit first.
call_subprocess(
[
GIT_EXE_PATH,
"commit",
"-m",
"commit before removing %s and its history" % relative_path,
],
cwd=repo_dir,
verbose=verbose,
)
call_subprocess(
[
GIT_EXE_PATH,
"filter-branch",
"--index-filter",
"%s rm --cached --ignore-unmatch -rf %s" % (GIT_EXE_PATH, relative_path),
"--prune-empty",
"-f",
"HEAD",
],
cwd=repo_dir,
verbose=verbose,
)
else:
call_subprocess([GIT_EXE_PATH, "rm", "-rf", relative_path], cwd=repo_dir, verbose=verbose)
def git_remove_file(
repo_dir: str, relative_path: str, remove_history: bool = False, verbose: bool = False
) -> None:
if remove_history:
# removing history is problematic, as you need to --force the
# next time you do a push. That also implies that you do a pull before
# running the delete. See
# https://help.github.com/en/articles/removing-sensitive-data-from-a-repository
# for details.
assert 0, "removing history not currently supported"
if is_git_staging_dirty(repo_dir):
# The history rewrite will fail if the repo is dirty, so
# we will commit first.
call_subprocess(
[
GIT_EXE_PATH,
"commit",
"-m",
"commit before removing %s and its history" % relative_path,
],
cwd=repo_dir,
verbose=verbose,
)
call_subprocess(
[
GIT_EXE_PATH,
"filter-branch",
"--index-filter",
"%s rm --cached --ignore-unmatch %s" % (GIT_EXE_PATH, relative_path),
"--prune-empty",
"-f",
"HEAD",
],
cwd=repo_dir,
verbose=verbose,
)
else:
call_subprocess([GIT_EXE_PATH, "rm", relative_path], cwd=repo_dir, verbose=verbose)
def commit_changes_in_repo(local_path, message, remove_empty_dirs=False, verbose=False):
"""Figure out what has changed in the working tree relative to
HEAD and get those changes into HEAD. We only commit if there
is something to be done.
"""
status = call_subprocess(
[GIT_EXE_PATH, "status", "--porcelain"], cwd=local_path, verbose=verbose
)
maybe_delete_dirs = []
need_to_commit = False
for line in status.split("\n"):
if len(line) < 2:
continue
relpath = line[2:].strip()
if line[1] == "?":
call_subprocess([GIT_EXE_PATH, "add", relpath], cwd=local_path, verbose=verbose)
need_to_commit = True
elif line[1] == "D":
call_subprocess([GIT_EXE_PATH, "rm", relpath], cwd=local_path, verbose=verbose)
maybe_delete_dirs.append(dirname(join(local_path, relpath)))
need_to_commit = True
elif line[1] == "M":
call_subprocess([GIT_EXE_PATH, "add", relpath], cwd=local_path, verbose=verbose)
need_to_commit = True
elif line[0] in ("?", "A", "D", "M"):
need_to_commit = True
if line[0] == "D":
maybe_delete_dirs.append(dirname(join(local_path, relpath)))
elif verbose:
click.echo("Skipping git status line: '%s'" % line)
if remove_empty_dirs:
for d in maybe_delete_dirs:
remove_dir_if_empty(d, local_path, verbose=verbose)
if need_to_commit:
call_subprocess([GIT_EXE_PATH, "commit", "-m", message], cwd=local_path, verbose=verbose)
def checkout_and_apply_commit(local_path, commit_hash, verbose=False):
"""Checkout the commit and apply the changes to HEAD.
"""
# make sure the repo is in a committed state
commit_changes_in_repo(
local_path, "Commit state of repo prior to restore of %s" % commit_hash, verbose=verbose
)
# make sure there are actually differences between the commits
if (
call_subprocess_for_rc(
[GIT_EXE_PATH, "diff", "--exit-code", "--quiet", "HEAD", commit_hash],
cwd=local_path,
verbose=verbose,
)
== 0
):
if verbose:
click.echo("No changes for %s between HEAD and %s" % (local_path, commit_hash))
return
# ok, there are, apply the changes
cmdstr = "%s diff HEAD %s | %s apply" % (GIT_EXE_PATH, commit_hash, GIT_EXE_PATH)
if verbose:
click.echo(cmdstr + "[run in %s]" % local_path)
cp = run(cmdstr, cwd=local_path, shell=True)
cp.check_returncode()
commit_changes_in_repo(
local_path, "Revert to commit %s" % commit_hash, remove_empty_dirs=True, verbose=verbose
)
def commit_changes_in_repo_subdir(
local_path, subdir, message, remove_empty_dirs=False, verbose=False
):
"""For only the specified subdirectory, figure out what has changed in
the working tree relative to HEAD and get those changes into HEAD. We
only commit if there is something to be done.
"""
if not subdir.endswith("/"):
subdir = subdir + "/"
status = call_subprocess(
[GIT_EXE_PATH, "status", "--porcelain", subdir], cwd=local_path, verbose=verbose
)
maybe_delete_dirs = []
need_to_commit = False
for line in status.split("\n"):
if len(line) < 2:
continue
# first character is the staging area status, second character
# is the working tree status, and rest is the relative path.
relpath = line[2:].strip()
if not relpath.startswith(subdir):
raise InternalError("Git status line not in subdirectory %s: %s" % (subdir, line))
elif line[1] == "?":
call_subprocess([GIT_EXE_PATH, "add", relpath], cwd=local_path, verbose=verbose)
need_to_commit = True
elif line[1] == "D":
call_subprocess([GIT_EXE_PATH, "rm", relpath], cwd=local_path, verbose=verbose)
maybe_delete_dirs.append(dirname(join(local_path, relpath)))
need_to_commit = True
elif line[1] == "M":
call_subprocess([GIT_EXE_PATH, "add", relpath], cwd=local_path, verbose=verbose)
need_to_commit = True
elif line[0] in ("?", "A", "D", "M"):
need_to_commit = True
if line[0] == "D":
maybe_delete_dirs.append(dirname(join(local_path, relpath)))
elif verbose:
click.echo("Skipping git status line: '%s'" % line)
if remove_empty_dirs:
for d in maybe_delete_dirs:
remove_dir_if_empty(d, join(local_path, subdir), verbose=verbose)
if need_to_commit:
call_subprocess(
[GIT_EXE_PATH, "commit", "--only", subdir, "-m", message],
cwd=local_path,
verbose=verbose,
)
def checkout_subdir_and_apply_commit(local_path, subdir, commit_hash, verbose=False):
"""Checkout the commit and apply the changes to HEAD, just for a specific
subdirectory in the repo.
"""
commit_changes_in_repo_subdir(
local_path,
subdir,
"Commit state of repo prior to restore of %s" % commit_hash,
verbose=verbose,
)
# make sure there are actually differences between the commits
if (
call_subprocess_for_rc(
[GIT_EXE_PATH, "diff", "--exit-code", "--quiet", "HEAD", commit_hash, subdir],
cwd=local_path,
verbose=verbose,
)
== 0
):
if verbose:
click.echo(
"No changes for %s in %s between HEAD and %s" % (local_path, subdir, commit_hash)
)
return
# ok, there are, apply the changes
cmdstr = "%s diff HEAD %s %s | %s apply" % (GIT_EXE_PATH, commit_hash, subdir, GIT_EXE_PATH)
if verbose:
click.echo(cmdstr + "[run in %s]" % local_path)
cp = run(cmdstr, cwd=local_path, shell=True)
cp.check_returncode()
commit_changes_in_repo_subdir(
local_path,
subdir,
"Revert to commit %s" % commit_hash,
remove_empty_dirs=True,
verbose=verbose,
)
def is_file_tracked_by_git(filepath, cwd, verbose):
cmd = [GIT_EXE_PATH, "ls-files", "--error-unmatch", filepath]
rc = call_subprocess_for_rc(cmd, cwd, verbose=verbose)
return rc == 0
def get_local_head_hash(git_root, verbose=False):
hashval = call_subprocess([GIT_EXE_PATH, "rev-parse", "HEAD"], cwd=git_root, verbose=verbose)
return hashval.strip()
def get_remote_head_hash(cwd, branch, verbose):
cmd = [GIT_EXE_PATH, "ls-remote", "origin", "-h", "refs/heads/" + branch]
try:
output = call_subprocess(cmd, cwd, verbose).split("\n")[0].strip()
if output == "":
return None # remote has not commits
else:
hashval = output.split()[0]
return hashval
except Exception as e:
raise ConfigurationError(
"Problem in accessing remote repository associated with '%s'" % cwd
) from e
LS_TREE_RE = re.compile(r"^\d+\s+tree\s([0-9a-f]{40})\s+(\w+.*)$")
def get_subdirectory_hash(repo_dir, relpath, verbose=False):
"""Get the subdirectory hash for the HEAD revision of the
specified path. This matches the hash that git is storing
internally. You should be able to run: git cat-file -p HASH
to see a listing of the contents.
"""
cmd = [GIT_EXE_PATH, "ls-tree", "-t", "HEAD", relpath]
if verbose:
click.echo("%s [run in %s]" % (" ".join(cmd), repo_dir))
cp = run(cmd, cwd=repo_dir, encoding="utf-8", stdout=PIPE, stderr=PIPE)
cp.check_returncode()
for line in cp.stdout.split("\n"):
m = LS_TREE_RE.match(line)
if m is None:
continue
hashval = m.group(1)
subdir = m.group(2)
if subdir == relpath:
return hashval
raise InternalError("Did not find subdirectory '%s' in git ls-tree" % relpath)
def get_remote_origin_url(repo_dir: str, verbose: bool) -> str:
try:
url = call_subprocess(
[GIT_EXE_PATH, "config", "--get", "remote.origin.url"], cwd=repo_dir, verbose=verbose
)
return url.strip()
except Exception as e:
raise ConfigurationError(
"Problem getting remote origin from repository at %s. Do you have a remote origin configured?"
% repo_dir
) from e
def get_json_file_from_remote(relpath: str, repo_dir: str, verbose: bool) -> Any:
"""Download a JSON file from the remote master, parse it,
and return it.
"""
remote_url = get_remote_origin_url(repo_dir, verbose)
tdir = None
try:
with tempfile.TemporaryDirectory() as tdir:
# Issue #30 - we wanted to use the git-archive command,
# but it is not supported by GitHub.
call_subprocess(
[GIT_EXE_PATH, "clone", "--depth=1", remote_url, "root"], cwd=tdir, verbose=verbose
)
with open(join(join(tdir, "root"), relpath), "r") as f:
return json.load(f)
except Exception as e:
if (tdir is not None) and isdir(tdir):
shutil.rmtree(tdir)
raise ConfigurationError("Problem retrieving file %s from remote" % relpath) from e
def set_remote_origin(repo_dir, remote_url, verbose):
call_subprocess(
[GIT_EXE_PATH, "remote", "add", "origin", remote_url], cwd=repo_dir, verbose=verbose
)
def get_git_config_param(repo_dir, param_name, verbose):
param_val = call_subprocess([GIT_EXE_PATH, "config", param_name], cwd=repo_dir, verbose=verbose)
return param_val.strip()
def ensure_entry_in_gitignore(
repo_dir: str,
gitignore_rel_path: str,
entry: str,
match_independent_of_slashes=False,
commit: bool = False,
verbose=False,
) -> bool:
"""Ensure that the specified entry is in the specified .gitignore file.
Entries can have a leading slash (refers to an absolute path within the repo)
and a trailing slash (matches only a directory, not a file).
If match_independent_of_slashes is True, we match an existing
entry, even if it differs on leading and/or trailing slashes. Otherwise,
it must be an exact match.
If a change was made, and commit is specified, commit the change. Otherwise,
just add the file to the staging.
Returns True if a change was made, False otherwise.
"""
def strip_slashes(e):
if len(e) == 0:
return ""
if e.startswith("/"):
e = e[1:]
if e.endswith("/"):
e = e[:-1]
assert len(e) > 0
return e
entry_wo_slashes = strip_slashes(entry)
abs_file_path = join(repo_dir, gitignore_rel_path)
if exists(abs_file_path):
last_has_newline = True
with open(abs_file_path, "r") as f:
for line in f:
if line.endswith("\n"):
last_has_newline = True
else:
last_has_newline = False
line = line.rstrip()
if line == entry or (
match_independent_of_slashes and strip_slashes(line) == entry_wo_slashes
):
return False # entry already present, nothing to do
with open(abs_file_path, "a") as f:
if not last_has_newline:
f.write("\n")
f.write(entry + "\n")
else:
with open(abs_file_path, "a") as f:
f.write(entry + "\n")
call_subprocess([GIT_EXE_PATH, "add", gitignore_rel_path], cwd=repo_dir, verbose=verbose)
if commit:
call_subprocess(
[GIT_EXE_PATH, "commit", "-m", "Add .gitignore entry for %s" % entry],
cwd=repo_dir,
verbose=verbose,
)
return True
def verify_git_config_initialized(cwd: str, batch: bool = False, verbose: bool = False):
"""When trying to clone or initialize a new git repo, git requires
that user.name and user.email are set. If not, it will error
out. We verify that they are set. If not, and in interactive
mode, we can ask the user and set them. If not, and in batch mode,
we will error out explaining the issue.
"""
rc = call_subprocess_for_rc([GIT_EXE_PATH, "config", "user.name"], cwd=cwd, verbose=verbose)
name_set = True if rc == 0 else False
rc = call_subprocess_for_rc([GIT_EXE_PATH, "config", "user.email"], cwd=cwd, verbose=verbose)
email_set = True if rc == 0 else False
if name_set and email_set:
if verbose:
click.echo("Successfully verified that git user.name and user.email are configured.")
return
need_to_set = (["user.name"] if not name_set else []) + (
["user.email"] if not email_set else []
)
if batch:
raise ConfigurationError(
"Git is not fully configured, need to set the Git config parameter%s %s before running."
% ("s" if len(need_to_set) == 2 else "", " and ".join(need_to_set))
)
# if we get here, we're going to interactively ask the user for the git config parameters
for param in need_to_set:
click.echo(
"Git is not fully configured - you need to set the Git config parameter '%s'." % param
)
value = click.prompt("Please enter a value for %s or return to abort" % param, default="")
if value == "":
raise UserAbort("Need to configure git parameter %s" % param)
call_subprocess(
[GIT_EXE_PATH, "config", "--global", param, value], cwd=cwd, verbose=verbose
)
click.echo("Successfully set Git parameter %s" % param)
| 37.131667
| 106
| 0.608151
|
4a026595f72b492182f3ae140ec5718087255a7f
| 10,398
|
py
|
Python
|
url2sql.py
|
renesugar/movenotes
|
e054a7599cb981d164b6593150896cdc51bcaa72
|
[
"MIT"
] | 3
|
2020-09-02T10:05:40.000Z
|
2022-03-26T04:19:59.000Z
|
url2sql.py
|
renesugar/movenotes
|
e054a7599cb981d164b6593150896cdc51bcaa72
|
[
"MIT"
] | null | null | null |
url2sql.py
|
renesugar/movenotes
|
e054a7599cb981d164b6593150896cdc51bcaa72
|
[
"MIT"
] | 1
|
2021-07-14T07:19:57.000Z
|
2021-07-14T07:19:57.000Z
|
import re
import os
import argparse
import sys
import errno
import optparse
import sqlite3
import uuid
import email
import email.utils
from email.message import EmailMessage
from email.parser import BytesParser, Parser
from email.policy import default
from bs4 import BeautifulSoup
from datetime import datetime, timedelta, timezone
import hashlib
import plistlib
import urllib
from urllib.parse import urlparse
import urllib.request
import html
import notesdb
import constants
import common
#
# MIT License
#
# https://opensource.org/licenses/MIT
#
# Copyright 2020 Rene Sugar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Description:
#
# This program loads URL bookmarks into a SQLite database.
#
global __name__, __author__, __email__, __version__, __license__
__program_name__ = 'url2sql'
__author__ = 'Rene Sugar'
__email__ = 'rene.sugar@gmail.com'
__version__ = '1.00'
__license__ = 'MIT License (https://opensource.org/licenses/MIT)'
__website__ = 'https://github.com/renesugar'
__db_schema_version__ = '1'
__db_schema_min_version__ = '1'
def _get_option_parser():
parser = optparse.OptionParser('%prog [options]',
version='%prog ' + __version__)
parser.add_option('', "--email",
action="store", dest="email_address", default=None,
help="Email address")
parser.add_option("", "--input",
action="store", dest="input_path", default=[],
help="Path to input URL bookmarks")
parser.add_option('', "--output",
action="store", dest="output_path", default=None,
help="Path to output SQLite directory")
parser.add_option("", "--folder",
action="store", dest="note_folder", default="Bookmarks",
help="Folder name to store bookmark notes")
return parser
def process_url_note(sqlconn, columns):
# note_title
if columns["note_title"] is None:
note_title = constants.NOTES_UNTITLED
else:
note_title = common.remove_line_breakers(columns["note_title"]).strip()
print("processing '%s'" % (note_title,))
# note_original_format (email, apple, icloud, joplin, bookmark)
note_original_format = "bookmark"
# note_internal_date
# note_title
# note_url
# note_data
note_data = columns['note_data']
if note_data is None:
note_data = ''
# note_data_format
note_data_format = columns['note_data_format']
# note_hash (hash the markdown text)
h = hashlib.sha512()
h.update(note_data.encode('utf-8'))
note_hash = h.hexdigest()
# apple_id
apple_id = None
# apple_title
apple_title = None
# apple_snippet
apple_snippet = None
# apple_folder
# apple_created
# apple_last_modified
# apple_data
apple_data = None
# apple_attachment_id
apple_attachment_id = None
# apple_attachment_path
apple_attachment_path = None
# apple_account_description
apple_account_description = None
# apple_account_identifier
apple_account_identifier = None
# apple_account_username
apple_account_username = None
# apple_version
apple_version = None
# apple_user
apple_user = None
# apple_source
apple_source = None
columns["note_type"] = "note"
columns["note_uuid"] = None
columns["note_parent_uuid"] = None
columns["note_original_format"] = note_original_format
#columns["note_internal_date"] = note_internal_date
columns["note_hash"] = note_hash
columns["note_title"] = note_title
columns["note_data"] = note_data
columns["note_data_format"] = note_data_format
columns["apple_id"] = apple_id
columns["apple_title"] = apple_title
columns["apple_snippet"] = apple_snippet
#columns["apple_folder"] = apple_folder
#columns["apple_created"] = apple_created
#columns["apple_last_modified"] = apple_last_modified
columns["apple_data"] = apple_data
columns["apple_attachment_id"] = apple_attachment_id
columns["apple_attachment_path"] = apple_attachment_path
columns["apple_account_description"] = apple_account_description
columns["apple_account_identifier"] = apple_account_identifier
columns["apple_account_username"] = apple_account_username
columns["apple_version"] = apple_version
columns["apple_user"] = apple_user
columns["apple_source"] = apple_source
notesdb.add_apple_note(sqlconn, columns)
sqlconn.commit()
def main(args):
parser = _get_option_parser()
(options, args) = parser.parse_args(args)
email_address = ''
if hasattr(options, 'email_address') and options.email_address:
email_address = options.email_address
if common.check_email_address(email_address) == False:
# Check if email address is valid
common.error("email address '%s' is not valid." % (email_address,))
else:
common.error("email address not specified.")
inputPath = ''
if hasattr(options, 'input_path') and options.input_path:
inputPath = os.path.abspath(os.path.expanduser(options.input_path))
if os.path.isfile(inputPath) == False:
# Check if input file exists
common.error("input path '%s' does not exist." % (inputPath,))
else:
common.error("input path not specified.")
outputPath = ''
if hasattr(options, 'output_path') and options.output_path:
outputPath = os.path.abspath(os.path.expanduser(options.output_path))
if os.path.isdir(outputPath) == False:
# Check if output directory exists
common.error("output path '%s' does not exist." % (outputPath,))
else:
common.error("output path not specified.")
note_folder = None
if hasattr(options, 'note_folder') and options.note_folder:
note_folder = options.note_folder
notesdbfile = os.path.join(outputPath, 'notesdb.sqlite')
new_database = (not os.path.isfile(notesdbfile))
sqlconn = sqlite3.connect(notesdbfile,
detect_types=sqlite3.PARSE_DECLTYPES)
sqlcur = sqlconn.cursor()
if (new_database):
notesdb.create_database(sqlconn=sqlconn, db_schema_version=__db_schema_version__, email_address=options.email_address)
db_settings = notesdb.get_db_settings(sqlcur, __db_schema_version__)
notesdb.check_db_settings(db_settings, '%prog', __version__, __db_schema_min_version__, __db_schema_version__)
data_file_extensions = [
".png",
".jpeg",
".jpg",
".bmp",
".txt",
".pdf",
".zip",
".csv",
".xls",
".gzip",
".gz",
".7z",
".xlsx",
]
with open(inputPath, 'r') as fp:
lines = fp.readlines()
if (len(lines) % 4) != 0:
# File consists of a title line followed by date created, date modified and a URL line
# for each bookmark
print("Error: Uneven number of lines in file.\n")
sys.exit(1)
titles = {}
index = 0
while index < len(lines):
note_title = lines[index].strip()
add_date = datetime.strptime(lines[index+1].strip(), '%Y-%m-%d %H:%M:%S.%f')
last_modified = datetime.strptime(lines[index+2].strip(), '%Y-%m-%d %H:%M:%S.%f')
note_url = lines[index+3].strip()
# Check for missing URL scheme
urlTuple = urllib.parse.urlparse(note_url)
if urlTuple.scheme == '':
note_url = urllib.parse.urlunparse(urllib.parse.ParseResult(scheme="http", netloc=urlTuple.netloc, path=urlTuple.path, params=urlTuple.params, query=urlTuple.query, fragment=urlTuple.fragment))
# Get title for URL if possible
if note_title == note_url or note_url.endswith(note_title):
note_title = note_url
try:
if note_url in titles:
# cached title
note_title = titles[note_url]
# Don't download data files to get a title
elif common.url_path_extension(note_url) in data_file_extensions:
# Avoid HTTP request for non-HTML files
pass
else:
# request title
with urllib.request.urlopen(note_url) as response:
http_message = response.info()
if http_message['Content-type'].split(';')[0] == 'text/html':
html_text = response.read()
soup = BeautifulSoup(html_text, features="html.parser")
note_title = soup.title.string
if note_title is None:
note_title = note_url
else:
# cache title
titles[note_url] = note_title
except:
pass
if note_title == '':
note_title = 'New Note'
note_title = note_title.replace(u'\u808e', '')
# Markdown text for note
note_data = '[' + common.escape_html(note_title) + '](' + common.escape_url(note_url) + ')'
columns = {}
columns["note_type"] = "note"
columns["note_uuid"] = None
columns["note_parent_uuid"] = None
columns["note_original_format"] = None
columns["note_internal_date"] = add_date
columns["note_hash"] = None
columns["note_title"] = note_title
columns["note_url"] = note_url
columns["note_data"] = note_data
columns["note_data_format"] = 'text/markdown'
columns["apple_folder"] = note_folder
columns["apple_created"] = add_date.strftime("%Y-%m-%d %H:%M:%S.%f")
columns["apple_last_modified"] = last_modified.strftime("%Y-%m-%d %H:%M:%S.%f")
process_url_note(sqlconn, columns)
index += 4
if __name__ == "__main__":
main(sys.argv[1:])
| 30.763314
| 201
| 0.682535
|
4a0268b1f2eda5d8a94961e044f815640a5d9dda
| 1,862
|
py
|
Python
|
Guess_the_number(Week 2).py
|
ditiansm2015/Python-Game_Projects
|
07c172fc18e214cf78d5971691c470669357e55d
|
[
"Apache-2.0"
] | 1
|
2017-12-26T14:03:21.000Z
|
2017-12-26T14:03:21.000Z
|
Guess_the_number(Week 2).py
|
ditiansm2015/Python-Game_Projects
|
07c172fc18e214cf78d5971691c470669357e55d
|
[
"Apache-2.0"
] | null | null | null |
Guess_the_number(Week 2).py
|
ditiansm2015/Python-Game_Projects
|
07c172fc18e214cf78d5971691c470669357e55d
|
[
"Apache-2.0"
] | null | null | null |
# input will come from buttons and an input field
# all output for the game will be printed in the console
import random
import simplegui
secret_number=0
counter=0
# helper function to start and restart the game
def new_game(): # initialize global variables used in your code here
global secret_number
secret_number=random.randrange(0,100)
# define event handlers for control panel
def range100():
print "Range is from 0-99"
# button that changes the range to [0,100) and starts a new game
global secret_number
secret_number=random.randrange(0,100)
global counter
counter=7
print "You have",counter,"chances"
def range1000():
# button that changes the range to [0,1000) and starts a new game
print "Range is from 0-999"
global secret_number
secret_number=random.randrange(0,1000)
global counter
counter=10
print "You have",counter,"chances"
def input_guess(guess):
# main game logic goes here
global counter
if counter>0:
counter=counter-1
guess_no = int(guess)
if guess_no==secret_number:
print "correct guess"
elif guess_no>secret_number:
print "Guess was",guess_no
print "Number of remaining guesses is",counter
print "Lower"
else:
print "Guess was",guess_no
print "Number of remaining guesses is",counter
print "Higher"
# create frame
frame=simplegui.create_frame("Guess The Number",400,400)
# register event handlers for control elements and start frame
frame.add_button("RANGE 0-100",range100)
frame.add_button("Range 0-1000",range1000)
frame.add_input('Your Guess', input_guess, 100)
frame.start()
# call new_game
new_game()
| 23.871795
| 71
| 0.649839
|
4a026a36dd9bcc21ca0c5fdbbc37b73336667986
| 66
|
py
|
Python
|
procgame/_version.py
|
Curbfeeler/PinbotFromES
|
e12734afa37c4c0accef762f39d42503392c8f5b
|
[
"MIT"
] | 17
|
2015-05-14T23:35:59.000Z
|
2021-08-09T18:22:25.000Z
|
procgame/_version.py
|
horseyhorsey/SkeletonGameVisualPinball10
|
8ae19ce99d143c61f0bcb9e00259137e96a39b4b
|
[
"MIT"
] | 20
|
2016-06-09T16:42:52.000Z
|
2019-08-08T15:27:55.000Z
|
procgame/_version.py
|
horseyhorsey/SkeletonGameVisualPinball10
|
8ae19ce99d143c61f0bcb9e00259137e96a39b4b
|
[
"MIT"
] | 6
|
2015-08-07T01:00:43.000Z
|
2020-07-14T20:07:20.000Z
|
# Generated by: paver inc_version
__version_info__ = (1, 1, 2, 1)
| 22
| 33
| 0.712121
|
4a026a59ed498473e784b49c8acd6572e9d5f4dd
| 3,203
|
py
|
Python
|
pgoapi/protos/pogoprotos/enums/tutorial_state_pb2.py
|
linherest/pgoapi
|
e3bdce71b06c099663e9796c8df166883059edd9
|
[
"MIT"
] | 14
|
2017-03-28T16:32:24.000Z
|
2021-03-13T23:03:57.000Z
|
pgoapi/protos/pogoprotos/enums/tutorial_state_pb2.py
|
linherest/pgoapi
|
e3bdce71b06c099663e9796c8df166883059edd9
|
[
"MIT"
] | 8
|
2017-03-01T07:56:09.000Z
|
2017-08-15T07:37:12.000Z
|
pgoapi/protos/pogoprotos/enums/tutorial_state_pb2.py
|
linherest/pgoapi
|
e3bdce71b06c099663e9796c8df166883059edd9
|
[
"MIT"
] | 15
|
2017-02-24T01:30:23.000Z
|
2021-06-27T08:46:43.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/enums/tutorial_state.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/enums/tutorial_state.proto',
package='pogoprotos.enums',
syntax='proto3',
serialized_pb=_b('\n%pogoprotos/enums/tutorial_state.proto\x12\x10pogoprotos.enums*\xe4\x01\n\rTutorialState\x12\x10\n\x0cLEGAL_SCREEN\x10\x00\x12\x14\n\x10\x41VATAR_SELECTION\x10\x01\x12\x14\n\x10\x41\x43\x43OUNT_CREATION\x10\x02\x12\x13\n\x0fPOKEMON_CAPTURE\x10\x03\x12\x12\n\x0eNAME_SELECTION\x10\x04\x12\x11\n\rPOKEMON_BERRY\x10\x05\x12\x0c\n\x08USE_ITEM\x10\x06\x12\"\n\x1e\x46IRST_TIME_EXPERIENCE_COMPLETE\x10\x07\x12\x15\n\x11POKESTOP_TUTORIAL\x10\x08\x12\x10\n\x0cGYM_TUTORIAL\x10\tb\x06proto3')
)
_TUTORIALSTATE = _descriptor.EnumDescriptor(
name='TutorialState',
full_name='pogoprotos.enums.TutorialState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LEGAL_SCREEN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVATAR_SELECTION', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_CREATION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKEMON_CAPTURE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NAME_SELECTION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKEMON_BERRY', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USE_ITEM', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FIRST_TIME_EXPERIENCE_COMPLETE', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKESTOP_TUTORIAL', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GYM_TUTORIAL', index=9, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=60,
serialized_end=288,
)
_sym_db.RegisterEnumDescriptor(_TUTORIALSTATE)
TutorialState = enum_type_wrapper.EnumTypeWrapper(_TUTORIALSTATE)
LEGAL_SCREEN = 0
AVATAR_SELECTION = 1
ACCOUNT_CREATION = 2
POKEMON_CAPTURE = 3
NAME_SELECTION = 4
POKEMON_BERRY = 5
USE_ITEM = 6
FIRST_TIME_EXPERIENCE_COMPLETE = 7
POKESTOP_TUTORIAL = 8
GYM_TUTORIAL = 9
DESCRIPTOR.enum_types_by_name['TutorialState'] = _TUTORIALSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
# @@protoc_insertion_point(module_scope)
| 32.683673
| 505
| 0.750859
|
4a026d144e86e4ae53488b4b7ddd3f4f8a62fb6a
| 3,945
|
py
|
Python
|
app/routes.py
|
vherolf/labelmaker
|
67e885ab04172ec5670e78485fe86a9470db742d
|
[
"MIT"
] | null | null | null |
app/routes.py
|
vherolf/labelmaker
|
67e885ab04172ec5670e78485fe86a9470db742d
|
[
"MIT"
] | 1
|
2020-07-06T19:59:52.000Z
|
2020-07-06T19:59:52.000Z
|
app/routes.py
|
vherolf/labelmaker
|
67e885ab04172ec5670e78485fe86a9470db742d
|
[
"MIT"
] | null | null | null |
from flask import render_template, flash, redirect, url_for
from app import app
from app.forms import LabelForm, TapeForm
import subprocess
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Home')
@app.route('/label', methods=['GET', 'POST'])
def label():
form = LabelForm()
if form.validate_on_submit():
flash('printing label: {}'.format(form.labeltext.data))
printLabel(form.labeltext.data, form.generate_pdf_only.data)
return redirect(url_for('label'))
return render_template('label.html', title='Print Label', form=form)
@app.route('/tape', methods=['GET', 'POST'])
def tape():
form = TapeForm(tapewidth='12')
if form.validate_on_submit():
flash('printing tape: {} on {}'.format(form.tapetext.data, form.tapewidth.data))
printTape(form.tapetext.data, form.generate_pdf_only.data, form.tapewidth.data)
return redirect(url_for('tape'))
return render_template('tape.html', title='Print Tape', form=form)
from matplotlib import pyplot as plot
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.font_manager import FontProperties
from matplotlib.cbook import get_sample_data
import matplotlib.pyplot as plt
from weasyprint import HTML, CSS
from weasyprint.fonts import FontConfiguration
def printLabel( text_to_print = '', generate_pdf_only=True ):
font_config = FontConfiguration()
print(text_to_print)
textsize = str(len( text_to_print.splitlines() )+1)
textInHtml = '<h'+ textsize + '><pre>'
for line in text_to_print.splitlines():
textInHtml=textInHtml + line + '<br />'
textInHtml = textInHtml + '</pre></h'+ textsize + '>'
print(textInHtml)
html = HTML(string=textInHtml)
css = CSS(string='''
@page {
size: 3.5in 0.9in;
margin: 0em;
margin-bottom: 0em;
margin-top: 0em;
vertical-align: center;
}
@font-face {
font-family: 'Roboto Slab', serif;
font-family: 'BioRhyme Expanded', serif;
src: url(https://fonts.googleapis.com/css?family=BioRhyme+Expanded|Roboto+Slab);
}
h1 { font-family: 'BioRhyme Expanded', serif; }''', font_config=font_config)
html.write_pdf('text_to_print.pdf', stylesheets=[css], font_config=font_config)
# lpr -o PrintQuality=Text text_to_print.pdf -P LabelWriter-450-DUO-Label
command = [ "lpr", "-o", "PrintQuality=Graphics", "text_to_print.pdf", "-P" , app.config["LABELPRINTER"] ]
print(command)
if not generate_pdf_only:
subprocess.call(command)
def printTape( text_to_print = '', generate_pdf_only=True, tapewidth='9' ):
## set width for cups - find all available with
cupswidth = "PageSize="+ tapewidth +"_mm__1___Label__Auto_"
figwidth = {'11': 0.2, '12': 0.25, '9':0.13, '19':0.5 }
## generate dynamic pdf with matplotlib
pdf_pages = PdfPages('text_to_print.pdf')
fig = plot.figure(figsize=(0, figwidth[tapewidth] ),facecolor='w')
#fig.text(0, 0, text_to_print)
fig.text(0, 0.25, text_to_print)
## bbox_inches='tight' resize automativally
## found here https://stackoverflow.com/questions/1271023/resize-a-figure-automatically-in-matplotlib
pdf_pages.savefig(fig, fontsize=tapewidth,verticalalignment='center', bbox_inches='tight',dpi=100)
pdf_pages.close()
## lpr -o PageSize=9_mm__1___Label__Auto -o PrintQuality=Text text_to_print.pdf -P LabelWriter-450-DUO-Tape
command = [ "lpr", "-o", cupswidth , "-o", "PrintQuality=Text", "text_to_print.pdf", "-P" , app.config["TAPEPRINTER"] ]
print(command)
if not generate_pdf_only:
subprocess.call(command)
@app.route('/printers')
def printers():
command = [ "lpstat", "-p", "-d" ]
out = subprocess.check_output(command)
print(command)
return render_template('printers.html', title='Printers', out=out.decode("utf-8"))
| 40.670103
| 123
| 0.676299
|
4a026d1622d67c1b73a18ca07578946ac8d6fa27
| 22,121
|
py
|
Python
|
bin/forms/staffForm.py
|
grinchios/college_project-commented
|
3cc5ccd6c1b946b625e1dcc0689c9273f6ba2178
|
[
"MIT"
] | null | null | null |
bin/forms/staffForm.py
|
grinchios/college_project-commented
|
3cc5ccd6c1b946b625e1dcc0689c9273f6ba2178
|
[
"MIT"
] | null | null | null |
bin/forms/staffForm.py
|
grinchios/college_project-commented
|
3cc5ccd6c1b946b625e1dcc0689c9273f6ba2178
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
sys.path = ['..'] + sys.path
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtWidgets import QMessageBox
import fileHandling
from validation import validate
from datetime import date
'''
S2
All customers details should be saved together (names, phone number, email address, DoB) Customer ID will be the key and then the details will be attached as subheadings in the file.
S3
Any details should be editable by the manager only except for changing comments.
I1
To ensure all dates are the same they’ll be selected using PyQt5’s calendar function.
I2
To check all numeric inputs against a validation class. (data type, range check, length check)
I3
Presence checks will be done on all required fields such as name and any other needed values (see design section)
I4
ID will be automatically entered to avoid errors
I5
All date of births will be validated between the range of 1/1/1930 - (Now-16 years)
P2
When adding anything to files it will be given a new ID if it doesn't have one already. This will involve adding one to the current maximum ID and then this being added to the JSON files.
P4
Using table view boxes to store data that can be selected for use. This was mentioned in #9 of input validation, they’ll be used for efficiency of data inputs, and improving the user interface. These will be filled with the use of threads.
P8
The processing of data for inputting into the files will be done as a Class system that can be easily called since each file is similar in layout.
O4
Data will be shown in tables, all the graphs on the manager panel will load with the page but any on the main chart form will have buttons to generate them.
'''
class staff():
'''
P8
The processing of data for inputting into the files will be done as a Class system that can be easily called since each file is similar in layout.
'''
def __init__(self, ui_class):
# starts an object of the filehandling class
self.file = fileHandling.fileHandling('database/staff.json')
# staffSort is what field the table
# is sorted by on double click
self.sortValue = 'id'
self.fileContent = self.file.readFile()['data']
self.tableData = self.file.readFile()['data']
# sets column titles
self.columnTitles = ['ID',
'Name',
'Surname',
'Sex',
'Username',
'Password',
'Admin',
'DOB']
# assigning the ui to a variable for use
self.ui = ui_class
# threaded method to fill the table
self.makeTable = self.fillTable(self.fileContent, '0', self.file.readFile(), self.ui)
# set true on double click
self.filter = False
self.startTable()
def fillFields(self, rowNum):
'''
S3
Any details should be editable by the manager only except for changing comments.
'''
'''
I4
ID will be automatically entered to avoid errors
'''
self.ui.txtStaffID.setText(str(list(self.tableData[rowNum].keys())[0]))
self.ui.txtStaffUsername.setText(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['username'])
self.ui.txtStaffPassword.setText(str(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['password'][
'password']))
self.ui.checkBoxAdmin.setChecked(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['password']['admin'])
self.ui.txtStaffFirstName.setText(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['name'])
self.ui.checkBoxStaffSex.setChecked(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['sex'])
self.ui.txtStaffLastName.setText(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['surname'])
# DOB is in this format: 1980-01-01
# therefore must be altered for the dateEdit
dob = self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['dob']
# dob[0:4] is year YYYY
# dob[5:7] is month MM
# dob[8:10] is day DD
self.ui.dateStaffDOB.setDate(QtCore.QDate(int(dob[0:4])), int(dob[5:7], int(dob[8:10])))
def search(self):
if self.filter:
try:
searchIndex = self.file.binSearch(
self.tableData, self.ui.txtStaffSearch.text(), self.searchTerm)
self.fillFields(int(searchIndex))
except TypeError:
QMessageBox.about(self.ui, "Error", 'Field not found')
else:
QMessageBox.about(self.ui, "Error", 'Must double click on a field first to sort by')
def checkFields(self, newField):
'''
I1
To ensure all dates are the same they’ll be selected using PyQt5’s calendar function.
'''
'''
I2
To check all numeric inputs against a validation class. (data type, range check, length check)
'''
'''
I3
Presence checks will be done on all required fields such as name and any other needed values (see design section)
'''
'''
I5
All date of births will be validated between the range of 1/1/1930 - (Now-16 years)
'''
# interfacing with validation class
# staff name
val = validate.checkPresence(self.ui.txtStaffFirstName.text(), 'Staff name')
if val is not True: QMessageBox.about(self.ui, "Error", val); return False
# staff surname
val = validate.checkPresence(self.ui.txtStaffLastName.text(), 'Staff surname')
if val is not True: QMessageBox.about(self.ui, "Error", val); return False
# username
val = validate.checkPresence(self.ui.txtStaffUsername.text(), 'Staff username')
if val is not True: QMessageBox.about(self.ui, "Error", val); return False
# if the data is new then the date can be checked
# relies on the fact that dob and appointment dates
# will never or will rarely change
# uses the default also since this needs to be changed
dob = self.ui.dateStaffDOB.date().toPyDate().strftime('%Y-%m-%d')
if dob == '2000-01-01': QMessageBox.about(self.ui, "Error", 'DOB must be changed'); return False
if newField is True:
# date range 80 years ago to 16 years ago
low = date.today().strftime('%Y-%m-%d')
low = str(int(low[:4]) - 80) + low[4:]
high = date.today().strftime('%Y-%m-%d')
high = str(int(high[:4]) - 16) + high[4:]
val = validate.checkRange(dob, low, high)
if val is not True: QMessageBox.about(self.ui, "Error", val); return False
return True
def saveFunc(self):
self.fileContent = self.file.readFile()['data']
if self.filter is True:
sortedData = self.file.bubblesort(self.columnTitles[self.sortValue].lower())
self.tableData = sortedData
self.makeTable.tableContent = sortedData
else:
self.fileContent = self.file.readFile()['data']
self.tableData = self.fileContent
self.makeTable.tableContent = self.fileContent
self.makeTable.staffFile = self.file.readFile()
self.startTable()
self.staffCancel()
def saveStaff(self):
# activates when save button is pressed
if self.ui.checkBoxAdmin.isChecked():
admin = True
else:
admin = False
if self.ui.checkBoxStaffSex.isChecked():
sex = True
else:
sex = False
ID = int(self.ui.txtStaffID.text())
if ID != int(self.file.newID()):
newField = False
else:
newField = True
if self.checkFields(newField) is True:
# creates a dictionary of the user for the file
'''
S2
All customers details should be saved together (names, phone number, email address, DoB) Customer ID will be the key and then the details will be attached as subheadings in the file.
'''
'''
S3
Any details should be editable by the manager only except for changing comments.
'''
'''
I1
To ensure all dates are the same they’ll be selected using PyQt5’s calendar function.
'''
'''
I4
ID will be automatically entered to avoid errors
'''
'''
P2
When adding anything to files it will be given a new ID if it doesn't have one already. This will involve adding one to the current maximum ID and then this being added to the JSON files.
'''
arrUser = {
ID: {
'name': self.ui.txtStaffFirstName.text(),
'surname': self.ui.txtStaffLastName.text(),
'sex': sex,
'username': self.ui.txtStaffUsername.text(),
'password': {
'password': self.ui.txtStaffPassword.text(),
'admin': admin
},
'dob': self.ui.dateStaffDOB.date().toPyDate().strftime('%Y-%m-%d')
}}
# if its an old ID thats updated
# otherwise it's added to the end
if ID != int(self.file.newID()):
self.file.editOneField(arrUser, ID)
self.saveFunc()
QMessageBox.about(self.ui, "Field edited", 'Field has been updated')
else:
# will never execute but left as an error catch
self.file.writeToFile(arrUser)
self.saveFunc()
QMessageBox.about(self.ui, "Field edited", 'Field has been added')
self.ui.valueChange.emit(2)
def startTable(self):
self.ui.listStaff.update()
# put the file into a variable
staffFile = self.file.readFile()
# row amount
rowNum = len(staffFile['data'])
# sets row and column amounts
self.ui.listStaff.setRowCount(rowNum)
self.ui.listStaff.setColumnCount(len(self.columnTitles))
self.ui.listStaff.setHorizontalHeaderLabels(self.columnTitles)
self.makeTable.start()
def staffClick(self):
'''
S3
Any details should be editable by the manager only except for changing comments.
'''
# function allows user to pre fill boxes based on a selected row
rows = sorted(set(index.row() for index in self.ui.listStaff.selectedIndexes()))
for row in rows:
rowNum = int(row)
self.ui.txtStaffID.setText(str(list(self.tableData[rowNum].keys())[0]))
self.ui.txtStaffUsername.setText(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['username'])
self.ui.txtStaffPassword.setText(str(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['password'][
'password']))
self.ui.checkBoxAdmin.setChecked(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['password']['admin'])
self.ui.txtStaffFirstName.setText(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['name'])
self.ui.checkBoxStaffSex.setChecked(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['sex'])
self.ui.txtStaffLastName.setText(
self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['surname'])
# DOB is in this format: 1980-01-01
# therefore must be altered for the dateEdit
dob = self.tableData[rowNum][str(list(self.tableData[rowNum].keys())[0])]['dob']
# dob[0:4] is year YYYY
# dob[5:7] is month MM
# dob[8:10] is day DD
self.ui.dateStaffDOB.setDate(QtCore.QDate(int(dob[0:4]), int(dob[5:7]), int(dob[8:])))
def staffDoubleClick(self):
# allows a column to be selected for sorting and searching
columns = sorted(set(index.column() for index in self.ui.listStaff.selectedIndexes()))
for column in columns:
self.staffColumn = column
if self.columnTitles[self.staffColumn] == 'ID':
# sets to default
self.tableData = self.file.readFile()['data']
self.makeTable.setTableContent([], tableFilter=False)
self.makeTable.staffFile = self.file.readFile()
self.filter = False
elif self.columnTitles[self.staffColumn] == 'Password' or self.columnTitles[self.staffColumn] == 'Admin':
# sets to default
self.tableData = self.file.readFile()['data']
self.makeTable.setTableContent([], tableFilter=False)
self.makeTable.staffFile = self.file.readFile()
self.filter = False
else:
# sorts data and displays
sortedData = self.file.quicksort(self.columnTitles[self.staffColumn].lower())
self.searchTerm = self.columnTitles[self.staffColumn].lower()
self.tableData = sortedData
self.makeTable.setTableContent(sortedData)
self.filter = True
self.startTable()
class fillTable(QThread):
'''
P4
Using table view boxes to store data that can be selected for use. This was mentioned in #9 of input validation, they’ll be used for efficiency of data inputs, and improving the user interface. These will be filled with the use of threads.
'''
'''
O4
Data will be shown in tables, all the graphs on the manager panel will load with the page but any on the main chart form will have buttons to generate them.
'''
def __init__(self, content, index, file, ui):
QThread.__init__(self)
self.fileContent = content
self.sortBy = index
self.tableContent = []
self.staffFile = file
self.ui = ui
self.contentChange = False
def setTableContent(self, data, tableFilter=True):
# changed on double click
self.tableContent = data
self.contentChange = tableFilter
def fillTable(self, data, i):
# if table has been double clicked
if self.contentChange:
pass
else:
# if it's the first field
if i == 0:
self.tableContent = []
# fills the table with data
self.tableContent.append(data)
self.ui.listStaff.setItem(i, 0, QtWidgets.QTableWidgetItem(str(list(self.tableContent[i].keys())[0])))
self.ui.listStaff.setItem(i, 1, QtWidgets.QTableWidgetItem(
self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['name']))
self.ui.listStaff.setItem(i, 2, QtWidgets.QTableWidgetItem(
self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['surname']))
# converts bool to male or female
if self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['sex']:
self.ui.listStaff.setItem(i, 3, QtWidgets.QTableWidgetItem('Female'))
else:
self.ui.listStaff.setItem(i, 3, QtWidgets.QTableWidgetItem('Male'))
self.ui.listStaff.setItem(i, 4, QtWidgets.QTableWidgetItem(
self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['username']))
self.ui.listStaff.setItem(i, 5, QtWidgets.QTableWidgetItem(
str(self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['password']['password'])))
self.ui.listStaff.setItem(i, 6, QtWidgets.QTableWidgetItem(
str(self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['password']['admin'])))
self.ui.listStaff.setItem(i, 7, QtWidgets.QTableWidgetItem(
self.tableContent[i][str(list(self.tableContent[i].keys())[0])]['dob']))
def run(self):
# row amount
rowNum = len(self.staffFile['data'])
for i in range(rowNum):
self.fillTable(self.staffFile['data'][i], i)
self.ui.listStaff.update()
def staffCancel(self):
'''
I4
ID will be automatically entered to avoid errors
'''
# clears significant fields and assigns new ID
self.ui.txtStaffID.setText(str(self.file.newID()))
self.ui.txtStaffFirstName.setText('')
self.ui.txtStaffLastName.setText('')
self.ui.txtStaffUsername.setText('')
self.ui.txtStaffPassword.setText('')
# staff form
def createStaffForm(self):
self.pageStaff = QtWidgets.QWidget()
self.pageStaff.setObjectName("pageStaff")
self.label_8 = QtWidgets.QLabel(self.pageStaff)
self.label_8.setGeometry(QtCore.QRect(10, 0, 91, 17))
font = QtGui.QFont()
font.setFamily("Arial Black")
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.listStaff = QtWidgets.QTableWidget(self.pageStaff)
self.listStaff.setGeometry(QtCore.QRect(10, 30, 1111, 351))
self.listStaff.setObjectName("listStaff")
self.listStaff.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.label_9 = QtWidgets.QLabel(self.pageStaff)
self.label_9.setGeometry(QtCore.QRect(10, 390, 161, 17))
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.pageStaff)
self.label_10.setGeometry(QtCore.QRect(10, 460, 151, 17))
self.label_10.setObjectName("label_10")
self.txtStaffFirstName = QtWidgets.QLineEdit(self.pageStaff)
self.txtStaffFirstName.setGeometry(QtCore.QRect(10, 480, 181, 29))
self.txtStaffFirstName.setObjectName("txtStaffFirstName")
self.txtStaffLastName = QtWidgets.QLineEdit(self.pageStaff)
self.txtStaffLastName.setGeometry(QtCore.QRect(10, 540, 181, 29))
self.txtStaffLastName.setObjectName("txtStaffLastName")
self.label_11 = QtWidgets.QLabel(self.pageStaff)
self.label_11.setGeometry(QtCore.QRect(10, 520, 151, 17))
self.label_11.setObjectName("label_11")
self.txtStaffPassword = QtWidgets.QLineEdit(self.pageStaff)
self.txtStaffPassword.setGeometry(QtCore.QRect(230, 540, 181, 29))
self.txtStaffPassword.setObjectName("txtStaffPassword")
self.txtStaffPassword.setReadOnly(True)
self.txtStaffUsername = QtWidgets.QLineEdit(self.pageStaff)
self.txtStaffUsername.setGeometry(QtCore.QRect(230, 480, 181, 29))
self.txtStaffUsername.setObjectName("txtStaffUsername")
self.txtStaffUsername.setReadOnly(True)
self.label_12 = QtWidgets.QLabel(self.pageStaff)
self.label_12.setGeometry(QtCore.QRect(230, 460, 151, 17))
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.pageStaff)
self.label_13.setGeometry(QtCore.QRect(230, 520, 151, 17))
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.pageStaff)
self.label_14.setGeometry(QtCore.QRect(450, 460, 161, 17))
self.label_14.setObjectName("label_14")
self.checkBoxAdmin = QtWidgets.QCheckBox(self.pageStaff)
self.checkBoxAdmin.setGeometry(QtCore.QRect(450, 480, 96, 22))
self.checkBoxAdmin.setObjectName("checkBoxAdmin")
self.dateStaffDOB = QtWidgets.QDateEdit(self.pageStaff)
self.dateStaffDOB.setGeometry(QtCore.QRect(450, 540, 111, 27))
self.dateStaffDOB.setCalendarPopup(True)
self.dateStaffDOB.setObjectName("dateStaffDOB")
self.label_15 = QtWidgets.QLabel(self.pageStaff)
self.label_15.setGeometry(QtCore.QRect(450, 520, 161, 17))
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.pageStaff)
self.label_16.setGeometry(QtCore.QRect(10, 430, 67, 17))
self.label_16.setObjectName("label_16")
self.txtStaffID = QtWidgets.QLineEdit(self.pageStaff)
self.txtStaffID.setGeometry(QtCore.QRect(70, 420, 121, 29))
self.txtStaffID.setReadOnly(True)
self.txtStaffID.setObjectName("txtStaffID")
self.label_staffsex = QtWidgets.QLabel(self.pageStaff)
self.label_staffsex.setGeometry(QtCore.QRect(230, 420, 161, 17))
self.label_staffsex.setObjectName("label_staffsex")
self.checkBoxStaffSex = QtWidgets.QCheckBox(self.pageStaff)
self.checkBoxStaffSex.setGeometry(QtCore.QRect(300, 420, 96, 22))
self.checkBoxStaffSex.setObjectName("checkBoxStaffSex")
self.checkBoxStaffSex.setToolTip("Select for female, leave unselected for male")
self.btnSaveStaff = QtWidgets.QPushButton(self.pageStaff)
self.btnSaveStaff.setGeometry(QtCore.QRect(10, 590, 101, 29))
self.btnSaveStaff.setObjectName("btnSaveStaff")
self.label_17 = QtWidgets.QLabel(self.pageStaff)
self.label_17.setGeometry(QtCore.QRect(670, 400, 67, 17))
self.label_17.setObjectName("label_17")
self.txtStaffSearch = QtWidgets.QLineEdit(self.pageStaff)
self.txtStaffSearch.setGeometry(QtCore.QRect(750, 390, 371, 29))
self.txtStaffSearch.setObjectName("txtStaffSearch")
self.btnStaffCancel = QtWidgets.QPushButton(self.pageStaff)
self.btnStaffCancel.setGeometry(QtCore.QRect(120, 590, 101, 29))
self.btnStaffCancel.setObjectName("btnStaffCancel")
self.stackedWidget.addWidget(self.pageStaff)
# starts an object of the staff class for less globals
staffClass = staff(self)
# restarts form
staffClass.staffCancel()
# connects a click on the table to the select the row
self.listStaff.clicked.connect(lambda : staffClass.staffClick())
self.listStaff.doubleClicked.connect(lambda : staffClass.staffDoubleClick())
# connecting the buttons
self.btnStaffCancel.clicked.connect(lambda : staffClass.staffCancel())
self.btnSaveStaff.clicked.connect(lambda : staffClass.saveStaff())
self.txtStaffSearch.textChanged.connect(lambda : staffClass.search())
| 39.501786
| 241
| 0.640297
|
4a026dcce05b88ca0acb70de36c78869db741aa1
| 10,428
|
py
|
Python
|
product.py
|
prakashpp/nereid-catalog-variants
|
51c195c2b492bd4b2d7f0b8de245392fe7e3df88
|
[
"BSD-3-Clause"
] | null | null | null |
product.py
|
prakashpp/nereid-catalog-variants
|
51c195c2b492bd4b2d7f0b8de245392fe7e3df88
|
[
"BSD-3-Clause"
] | null | null | null |
product.py
|
prakashpp/nereid-catalog-variants
|
51c195c2b492bd4b2d7f0b8de245392fe7e3df88
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from functools import partial
from trytond.model import ModelView, ModelSQL, fields, Unique
from trytond.pool import PoolMeta
from trytond.pyson import Eval
from trytond.transaction import Transaction
from nereid import url_for, current_website
from flask import json
from babel import numbers
__all__ = [
'Template', 'Product', 'ProductVariationAttributes', 'ProductAttribute',
]
__metaclass__ = PoolMeta
class Template:
"Product Template"
__name__ = 'product.template'
variation_attributes = fields.One2Many(
'product.variation_attributes', 'template', 'Variation Attributes',
)
@classmethod
def __setup__(cls):
super(Template, cls).__setup__()
cls._error_messages.update({
'missing_attributes':
"Please define following attributes for product %s: %s"
})
def validate_variation_attributes(self):
for product in self.products_displayed_on_eshop:
product.validate_attributes()
@classmethod
def validate(cls, templates):
super(Template, cls).validate(templates)
for template in templates:
template.validate_variation_attributes()
def _get_product_variation_data(self):
"""
This method returns the variation data in a serializable format
for the main API. Extend this module to add data that your
customization may need. In most cases, just extending the serialize
api method in product and variation should be sufficient.
"""
variation_attributes = map(
lambda variation: variation.serialize(),
self.variation_attributes
)
variants = []
for product in self.products_displayed_on_eshop:
variant_data = product.serialize(purpose='variant_selection')
variant_data['attributes'] = {}
for variation in self.variation_attributes:
if variation.attribute.type_ == 'selection':
# Selection option objects are obviously not serializable
# So get the name
variant_data['attributes'][variation.attribute.id] = \
str(
product.get_attribute_value(variation.attribute).id
)
else:
variant_data['attributes'][variation.attribute.name] = \
product.get_attribute_value(variation.attribute)
variants.append(variant_data)
rv = {
'variants': variants,
'variation_attributes': variation_attributes,
}
return rv
def get_product_variation_data(self):
"""
Returns json data for product for variants. The data returned
by this method should be sufficient to render a product selection
interface based on variation data.
The structure of the data returned is::
{
'variants': [
# A list of active records of the variants if not
# requested as JSON. If JSON, the record is serialized
# with type JSON.
{
# see documentation of the serialize method
# on product.product to see values sent.
}
],
'variation_attributes': [
{
# see documentation of the serialize method
# on product.varying_attribute to see values sent.
}
...
]
}
.. tip::
If your downstream module needs more information in the
JSON, subclass and implement _get_product_variation_data
which returns a dictionary. Otherwise, it would require you
to deserialize, add value and then serialize again.
"""
return json.dumps(self._get_product_variation_data())
class Product:
"Product"
__name__ = 'product.product'
@classmethod
def __setup__(cls):
super(Product, cls).__setup__()
cls._error_messages.update({
'missing_attributes':
"Please define following attributes for product %s: %s"
})
@classmethod
def copy(cls, products, default=None):
with Transaction().set_context(_copy=True):
# Inject a context variable to let other methods know that
# control is coming from copy method.
return super(Product, cls).copy(products, default)
def validate_attributes(self):
"""Check if product defines all the attributes specified in
template variation attributes.
"""
if Transaction().context.get('_copy'):
# While copying, attributes are added later so first time
# validation will always result in error saying there are
# missing attributes, hence skip the validation if its coming
# from copy method.
return
if not self.displayed_on_eshop:
return
required_attrs = set(
[v.attribute for v in self.template.variation_attributes]
)
missing = required_attrs - \
set(map(lambda attr: attr.attribute, self.attributes))
if missing:
missing = map(lambda attr: attr.name, missing)
self.raise_user_error(
"missing_attributes",
(self.rec_name, ', '.join(missing))
)
@classmethod
def validate(cls, products):
super(Product, cls).validate(products)
for product in products:
product.validate_attributes()
def get_attribute_value(self, attribute, silent=True):
"""
:param attribute: Active record of attribute
"""
for product_attr in self.attributes:
if product_attr.attribute == attribute:
return getattr(
product_attr,
'value_%s' % attribute.type_
)
else:
if silent:
return True
raise AttributeError(attribute.name)
def serialize(self, purpose=None):
"""
Return serializable dictionary suitable for use with variant
selection.
"""
if purpose != 'variant_selection':
return super(Product, self).serialize(purpose)
currency_format = partial(
numbers.format_currency,
currency=current_website.company.currency.code,
locale=current_website.default_locale.language.code
)
return {
'id': self.id,
'rec_name': self.rec_name,
'name': self.name,
'code': self.code,
'price': currency_format(self.sale_price(1)),
'url': url_for('product.product.render', uri=self.uri),
'image_urls': [
{
'large': (
image.transform_command().thumbnail(500, 500, 'a')
.url()
),
'thumbnail': (
image.transform_command().thumbnail(120, 120, 'a')
.url()
),
'regular': image.url,
}
for image in self.get_images()
],
}
class ProductVariationAttributes(ModelSQL, ModelView):
"Variation attributes for product template"
__name__ = 'product.variation_attributes'
sequence = fields.Integer('Sequence')
template = fields.Many2One('product.template', 'Template', required=True)
attribute = fields.Many2One(
'product.attribute', 'Attribute', required=True,
domain=[('sets', '=',
Eval('_parent_template', {}).get('attribute_set', -1))],
)
widget = fields.Selection([
('dropdown', 'Dropdown'),
('swatches', 'Swatches'),
], 'Widget', required=True)
@staticmethod
def default_widget():
return 'dropdown'
@staticmethod
def default_sequence():
return 10
def serialize(self, purpose=None):
"""
Returns serialized version of the attribute::
{
'sequence': 1, # Integer id to determine order
'name': 'shirt color', # Internal name of the attribute
'display_name': 'Color', # (opt) display name of attr
'rec_name': 'Color', # The name that should be shown
'widget': 'swatch', # clue on how to render widget
'options': [
# id, value of the options available to choose from
(12, 'Blue'),
(13, 'Yellow'),
...
]
}
"""
if self.attribute.type_ == 'selection':
# The attribute type needs options to choose from.
# Send only the options that the products displayed on webshop
# can have, instead of the exhaustive list of attribute options
# the attribute may have.
#
# For example, the color attribute values could be
# ['red', 'yellow', 'orange', 'green', 'black', 'blue']
# but the shirt itself might only be available in
# ['red', 'yellow']
#
# This can be avoided by returning options based on the product
# rather than on the attributes list of values
options = set()
for product in self.template.products_displayed_on_eshop:
value = product.get_attribute_value(self.attribute)
options.add((value.id, value.name))
else:
options = []
return {
'sequence': self.sequence,
'name': self.attribute.name,
'display_name': self.attribute.display_name,
'widget': self.widget,
'options': list(options),
'attribute_id': self.attribute.id,
}
class ProductAttribute:
__name__ = 'product.attribute'
@classmethod
def __setup__(cls):
super(ProductAttribute, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
('unique_name', Unique(table, table.name),
'Attribute name must be unique!'),
]
| 34.644518
| 79
| 0.56588
|
4a026eb1d46a148bb7c6e4aec6004138a2ac6dfa
| 2,631
|
py
|
Python
|
cbuild/util/cmake.py
|
mahiuchun/cports
|
37315875922d65a6d4f612cdc17bc0d88856d6d0
|
[
"BSD-2-Clause"
] | null | null | null |
cbuild/util/cmake.py
|
mahiuchun/cports
|
37315875922d65a6d4f612cdc17bc0d88856d6d0
|
[
"BSD-2-Clause"
] | null | null | null |
cbuild/util/cmake.py
|
mahiuchun/cports
|
37315875922d65a6d4f612cdc17bc0d88856d6d0
|
[
"BSD-2-Clause"
] | null | null | null |
from cbuild.core import paths
def configure(
pkg, cmake_dir = None, build_dir = None, extra_args = [],
cross_build = None
):
if cmake_dir:
cdir = pkg.chroot_cwd / cmake_dir
else:
cdir = pkg.chroot_cwd
if not build_dir:
build_dir = pkg.make_dir
(pkg.cwd / build_dir).mkdir(parents = True, exist_ok = True)
cargs = []
if pkg.bootstrapping:
with open(
pkg.cwd / build_dir / "bootstrap.cmake", "w"
) as infile:
infile.write(f"""
SET(CMAKE_SYSTEM_NAME Linux)
SET(CMAKE_SYSTEM_VERSION 1)
SET(CMAKE_C_COMPILER {pkg.get_tool("CC")})
SET(CMAKE_CXX_COMPILER {pkg.get_tool("CXX")})
SET(CMAKE_FIND_ROOT_PATH "{paths.bldroot() / 'usr'};{paths.bldroot()}")
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
""")
cargs.append("-DCMAKE_TOOLCHAIN_FILE=bootstrap.cmake")
elif pkg.build_profile.cross and cross_build != False:
# map known profiles to cmake arch
cmake_cpu = {
"aarch64": "aarch64",
"ppc64le": "ppc64le",
"ppc64": "ppc64",
"x86_64": "x86_64",
"riscv64": "riscv64"
}.get(pkg.build_profile.arch, None)
if not cmake_cpu:
pkg.error(f"unknown architecture: {pkg.build_profile.arch}")
sroot = pkg.build_profile.sysroot
with open(
pkg.cwd / build_dir / "cross.cmake", "w"
) as infile:
infile.write(f"""
SET(CMAKE_SYSTEM_NAME Linux)
SET(CMAKE_SYSTEM_VERSION 1)
SET(CMAKE_C_COMPILER {pkg.get_tool("CC")})
SET(CMAKE_CXX_COMPILER {pkg.get_tool("CXX")})
SET(CMAKE_C_COMPILER_TARGET {pkg.build_profile.short_triplet})
SET(CMAKE_CXX_COMPILER_TARGET {pkg.build_profile.short_triplet})
SET(CMAKE_ASM_COMPILER_TARGET {pkg.build_profile.short_triplet})
SET(CMAKE_CROSSCOMPILING TRUE)
SET(CMAKE_SYSROOT "{sroot}")
SET(CMAKE_SYSTEM_PROCESSOR {cmake_cpu})
SET(CMAKE_FIND_ROOT_PATH "{sroot / 'usr'};{sroot}")
SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
""")
cargs.append("-DCMAKE_TOOLCHAIN_FILE=cross.cmake")
pkg.do(
"cmake", cargs + [
"-DCMAKE_INSTALL_PREFIX=/usr",
"-DCMAKE_BUILD_TYPE=None",
"-DCMAKE_INSTALL_LIBDIR=lib",
"-DCMAKE_INSTALL_SBINDIR=bin"
] + pkg.configure_args + extra_args + [cdir],
wrksrc = build_dir, env = {
"CMAKE_GENERATOR": (
"Ninja" if pkg.make_cmd == "ninja" else "Unix Makefiles"
)
}
)
| 29.561798
| 72
| 0.643482
|
4a026fb90ee120f0fe8e613d83c89ba7dff84812
| 4,701
|
py
|
Python
|
models/preresnet.py
|
Pexure/FixMatch-pytorch
|
5b846fa44d434eeff97e4445da0577fa250c4963
|
[
"MIT"
] | null | null | null |
models/preresnet.py
|
Pexure/FixMatch-pytorch
|
5b846fa44d434eeff97e4445da0577fa250c4963
|
[
"MIT"
] | null | null | null |
models/preresnet.py
|
Pexure/FixMatch-pytorch
|
5b846fa44d434eeff97e4445da0577fa250c4963
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreResNet(nn.Module):
def __init__(self, depth, num_classes=10):
super().__init__()
blocks = {18: PreActBlock, 34: PreActBlock, 50: PreActBottleneck, 101: PreActBottleneck, 152: PreActBottleneck}
layers = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}
assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152)'
self.in_planes = 64
self.conv1 = conv3x3(3, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0], stride=1)
self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)
self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)
self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * blocks[depth].expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, return_feat=False):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
if return_feat:
return x
x = self.fc(x)
return x
def forward2(self, x, lin=0, lout=5):
out = x
if lin < 1 and lout > -1:
out = self.conv1(out)
out = self.bn1(out)
out = F.relu(out)
if lin < 2 and lout > 0:
out = self.layer1(out)
if lin < 3 and lout > 1:
out = self.layer2(out)
if lin < 4 and lout > 2:
out = self.layer3(out)
if lin < 5 and lout > 3:
out = self.layer4(out)
if lout > 4:
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
if __name__ == '__main__':
model = PreResNet(depth=18)
x = torch.rand(2, 3, 32, 32)
x = model(x, return_feat=True)
print(x.shape)
| 35.345865
| 119
| 0.587535
|
4a02713c0e716642b00d65ed3ee655f95b74e968
| 14,337
|
py
|
Python
|
src/sparseml/pytorch/utils/logger.py
|
kevinaer/sparseml
|
067bb3e1e9ea35930eb7a105eac4864f9145678a
|
[
"Apache-2.0"
] | 2
|
2021-07-04T07:29:48.000Z
|
2021-08-17T05:59:40.000Z
|
src/sparseml/pytorch/utils/logger.py
|
kevinaer/sparseml
|
067bb3e1e9ea35930eb7a105eac4864f9145678a
|
[
"Apache-2.0"
] | null | null | null |
src/sparseml/pytorch/utils/logger.py
|
kevinaer/sparseml
|
067bb3e1e9ea35930eb7a105eac4864f9145678a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains code for loggers that help visualize the information from each modifier
"""
import logging
import os
import time
from abc import ABC, abstractmethod
from logging import Logger
from typing import Dict, Union
from numpy import ndarray
from torch import Tensor
try:
try:
from torch.utils.tensorboard import SummaryWriter
except (ModuleNotFoundError, ImportError):
from tensorboardX import SummaryWriter
tensorboard_import_error = None
except Exception as tensorboard_err:
SummaryWriter = None
tensorboard_import_error = tensorboard_err
from sparseml.utils import create_dirs
__all__ = ["PyTorchLogger", "PythonLogger", "TensorBoardLogger"]
class PyTorchLogger(ABC):
"""
Base class that all modifier loggers must implement.
:param name: name given to the logger, used for identification
"""
def __init__(self, name: str):
self._name = name
@property
def name(self) -> str:
"""
:return: name given to the logger, used for identification
"""
return self._name
@abstractmethod
def log_hyperparams(self, params: Dict):
"""
:param params: Each key-value pair in the dictionary is the name of the
hyper parameter and it's corresponding value.
"""
raise NotImplementedError()
@abstractmethod
def log_scalar(
self,
tag: str,
value: float,
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the value with
:param value: value to save
:param step: global step for when the value was taken
:param wall_time: global wall time for when the value was taken
"""
raise NotImplementedError()
@abstractmethod
def log_scalars(
self,
tag: str,
values: Dict[str, float],
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the values with
:param values: values to save
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken
"""
raise NotImplementedError()
@abstractmethod
def log_histogram(
self,
tag: str,
values: Union[Tensor, ndarray],
bins: str = "tensorflow",
max_bins: Union[int, None] = None,
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the histogram with
:param values: values to log as a histogram
:param bins: the type of bins to use for grouping the values,
follows tensorboard terminology
:param max_bins: maximum number of bins to use (default None)
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken
"""
raise NotImplementedError()
@abstractmethod
def log_histogram_raw(
self,
tag: str,
min_val: Union[float, int],
max_val: Union[float, int],
num_vals: int,
sum_vals: Union[float, int],
sum_squares: Union[float, int],
bucket_limits: Union[Tensor, ndarray],
bucket_counts: Union[Tensor, ndarray],
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the histogram with
:param min_val: min value
:param max_val: max value
:param num_vals: number of values
:param sum_vals: sum of all the values
:param sum_squares: sum of the squares of all the values
:param bucket_limits: upper value per bucket
:param bucket_counts: number of values per bucket
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken
"""
raise NotImplementedError()
class PythonLogger(PyTorchLogger):
"""
Modifier logger that handles printing values into a python logger instance.
:param logger: a logger instance to log to, if None then will create it's own
:param name: name given to the logger, used for identification;
defaults to python
"""
def __init__(self, logger: Logger = None, name: str = "python"):
super().__init__(name)
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(__name__)
def __getattr__(self, item):
return getattr(self._logger, item)
def log_hyperparams(self, params: Dict):
"""
:param params: Each key-value pair in the dictionary is the name of the
hyper parameter and it's corresponding value.
"""
msg = "{}-HYPERPARAMS:\n".format(self.name) + "\n".join(
" {}: {}".format(key, value) for key, value in params.items()
)
self._logger.info(msg)
def log_scalar(
self,
tag: str,
value: float,
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the value with
:param value: value to save
:param step: global step for when the value was taken
:param wall_time: global wall time for when the value was taken,
defaults to time.time()
"""
if wall_time is None:
wall_time = time.time()
msg = "{}-SCALAR {} [{} - {}]: {}".format(
self.name, tag, step, wall_time, value
)
self._logger.info(msg)
def log_scalars(
self,
tag: str,
values: Dict[str, float],
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the values with
:param values: values to save
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken,
defaults to time.time()
"""
if wall_time is None:
wall_time = time.time()
msg = "{}-SCALARS {} [{} - {}]:\n".format(
self.name, tag, step, wall_time
) + "\n".join("{}: {}".format(key, value) for key, value in values.items())
self._logger.info(msg)
def log_histogram(
self,
tag: str,
values: Union[Tensor, ndarray],
bins: str = "tensorflow",
max_bins: Union[int, None] = None,
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the histogram with
:param values: values to log as a histogram
:param bins: the type of bins to use for grouping the values,
follows tensorboard terminology
:param max_bins: maximum number of bins to use (default None)
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken,
defaults to time.time()
"""
if wall_time is None:
wall_time = time.time()
msg = "{}-HISTOGRAM {} [{} - {}]: cannot log".format(
self.name, tag, step, wall_time
)
self._logger.info(msg)
def log_histogram_raw(
self,
tag: str,
min_val: Union[float, int],
max_val: Union[float, int],
num_vals: int,
sum_vals: Union[float, int],
sum_squares: Union[float, int],
bucket_limits: Union[Tensor, ndarray],
bucket_counts: Union[Tensor, ndarray],
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the histogram with
:param min_val: min value
:param max_val: max value
:param num_vals: number of values
:param sum_vals: sum of all the values
:param sum_squares: sum of the squares of all the values
:param bucket_limits: upper value per bucket
:param bucket_counts: number of values per bucket
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken,
defaults to time.time()
"""
if wall_time is None:
wall_time = time.time()
msg = "{}-HISTOGRAM {} [{} - {}]: cannot log".format(
self.name, tag, step, wall_time
)
self._logger.info(msg)
class TensorBoardLogger(PyTorchLogger):
"""
Modifier logger that handles outputting values into a TensorBoard log directory
for viewing in TensorBoard.
:param log_path: the path to create a SummaryWriter at. writer must be None
to use if not supplied (and writer is None),
will create a TensorBoard dir in cwd
:param writer: the writer to log results to,
if none is given creates a new one at the log_path
:param name: name given to the logger, used for identification;
defaults to tensorboard
"""
def __init__(
self,
log_path: str = None,
writer: SummaryWriter = None,
name: str = "tensorboard",
):
super().__init__(name)
if tensorboard_import_error:
raise tensorboard_import_error
if writer and log_path:
raise ValueError(
(
"log_path given:{} and writer object passed in, "
"to create a writer at the log path set writer=None"
).format(log_path)
)
elif not writer and not log_path:
log_path = os.path.join(".", "tensorboard")
if log_path:
create_dirs(log_path)
self._writer = writer if writer is not None else SummaryWriter(log_path)
def log_hyperparams(self, params: Dict):
"""
:param params: Each key-value pair in the dictionary is the name of the
hyper parameter and it's corresponding value.
"""
try:
self._writer.add_hparams(params, {})
except Exception:
# fall back incase add_hparams isn't available, log as scalars
for name, val in params.items():
self.log_scalar(name, val)
def log_scalar(
self,
tag: str,
value: float,
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the value with
:param value: value to save
:param step: global step for when the value was taken
:param wall_time: global wall time for when the value was taken,
defaults to time.time()
"""
self._writer.add_scalar(tag, value, step, wall_time)
def log_scalars(
self,
tag: str,
values: Dict[str, float],
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the values with
:param values: values to save
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken,
defaults to time.time()
"""
self._writer.add_scalars(tag, values, step, wall_time)
def log_histogram(
self,
tag: str,
values: Union[Tensor, ndarray],
bins: str = "tensorflow",
max_bins: Union[int, None] = None,
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the histogram with
:param values: values to log as a histogram
:param bins: the type of bins to use for grouping the values,
follows tensorboard terminology
:param max_bins: maximum number of bins to use (default None)
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken,
defaults to time.time()
"""
self._writer.add_histogram(tag, values, step, bins, wall_time, max_bins)
def log_histogram_raw(
self,
tag: str,
min_val: Union[float, int],
max_val: Union[float, int],
num_vals: int,
sum_vals: Union[float, int],
sum_squares: Union[float, int],
bucket_limits: Union[Tensor, ndarray],
bucket_counts: Union[Tensor, ndarray],
step: Union[None, int] = None,
wall_time: Union[None, float] = None,
):
"""
:param tag: identifying tag to log the histogram with
:param min_val: min value
:param max_val: max value
:param num_vals: number of values
:param sum_vals: sum of all the values
:param sum_squares: sum of the squares of all the values
:param bucket_limits: upper value per bucket
:param bucket_counts: number of values per bucket
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken,
defaults to time.time()
"""
self._writer.add_histogram_raw(
tag,
min_val,
max_val,
num_vals,
sum_vals,
sum_squares,
bucket_limits,
bucket_counts,
step,
wall_time,
)
| 33.264501
| 83
| 0.601032
|
4a0271c22ef938f483063fe61369f5d3f8bfafca
| 13,861
|
py
|
Python
|
defunct/age-defunct/age/train_n_test_male_model.py
|
amriduls/Gender-Age-Classification-CNN
|
4e3e4f0e4e9ef373e7235b739f491afd062325d2
|
[
"MIT"
] | 43
|
2017-05-18T08:50:57.000Z
|
2022-03-29T07:40:24.000Z
|
defunct/age-defunct/age/train_n_test_male_model.py
|
amriduls/Gender-Age-Classification-CNN
|
4e3e4f0e4e9ef373e7235b739f491afd062325d2
|
[
"MIT"
] | 5
|
2017-08-07T17:08:43.000Z
|
2020-03-08T10:07:06.000Z
|
defunct/age-defunct/age/train_n_test_male_model.py
|
amriduls/Gender-Age-Classification-CNN
|
4e3e4f0e4e9ef373e7235b739f491afd062325d2
|
[
"MIT"
] | 31
|
2016-12-22T08:17:20.000Z
|
2021-06-20T19:32:05.000Z
|
import tensorflow as tf
import numpy as np
import sys
import pickle
import random
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from PIL import Image
from matplotlib import gridspec
from collections import Counter
train_mode = 'age'
def load_train_file(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def load_val_file(name):
with open(name+ '.pkl', 'rb') as f:
return pickle.load(f)
def one_hot(y):
if train_mode == 'gender':
y_ret = np.zeros((len(y), 2))
else:
y_ret = np.zeros((len(y), 8))
y_ret[np.arange(len(y)), y.astype(int)] = 1
return y_ret
def one_hot_to_number(y):
indices = np.where(y == 1)
return indices[1]
def load_test_file(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def train_and_test():
#List of cv folds
cv_fold_names = ['0','1','2','3']
pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/male/'
#pickle_file_path_prefix = '/home/ubuntu/gender_age/gender_based_train_and_testing/gender_based_data/cv/male/'
past_tacc = 0
past_tloss = 3.0
test_fold_names = ['predicted_males_test']
#pickle_file_path_prefix = '/Volumes/Mac-B/faces-recognition/gender_neutral_data/'
pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/final_test_data_based_on_predicted_genders/male/'
#/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/final_test_data/male/'
print('Trying to read test fold: %s......' % test_fold_names[0])
test_file = load_test_file(pickle_file_path_prefix+test_fold_names[0])
test_images = []
test_ages = []
imgs = np.array(test_file['images'])
ages = np.array(test_file['gt_ages'])
one_hot1 = one_hot(ages)
test_images.append(imgs)
test_ages.append(one_hot1)
test_images = np.array(test_images)
test_images = np.vstack(test_images)
test_ages = np.array(test_ages)
test_ages = np.vstack(test_ages)
X_test = test_images
y_test = test_ages
print ("Test data done for fold: %s" % test_fold_names[0])
print X_test.shape
print y_test.shape
print(' ')
'''
for fold in cv_fold_names:
print('Trying to read training fold: %s......' % fold)
train_file = load_train_file(pickle_file_path_prefix+'male_cv_train_'+fold)
val_file = load_val_file(pickle_file_path_prefix+'male_cv_val_'+fold)
train_images = []
train_ages = []
val_images = []
val_ages = []
age_group_ratios = np.zeros(8)
#Load all the training images for CV fold. Implies: One CV fold has 3-sub folds.
#So, it'll load images from all the 3-sub folds
for i in range(len(train_file)):
current_file = train_file[i]
imgs = np.array(current_file['images'])
ages = np.array(current_file['ages'])
one_hot1 = one_hot(ages)
train_images.append(imgs)
train_ages.append(one_hot1)
val_images = np.array(val_file['images'])
val_ages = np.array(val_file['ages'])
val_ages = one_hot(val_ages)
train_images = np.array(train_images)
train_images = np.vstack(train_images)
train_ages = np.array(train_ages)
train_ages = np.vstack(train_ages)
X_train = train_images
y_train = train_ages
X_val = val_images
y_val = val_ages
print ("Train Details for fold: %s" % fold)
print X_train.shape
print y_train.shape
print ("Val Details for fold: %s" % fold)
print X_val.shape
print y_val.shape
#Find the weighted age_group_ratios
age_group_counters = Counter(one_hot_to_number(y_train))
age_group_counters_dict = dict(age_group_counters)
print age_group_counters_dict
sum_age_groups = sum(age_group_counters_dict.values())
for i in range(len(age_group_counters_dict)):
age_group_ratios[i] = (1 - (float(age_group_counters_dict[i])/sum_age_groups))
print('Age group ratios: ')
print age_group_ratios
print (' ')
X_train, y_train = shuffle(X_train, y_train, random_state=42)
print ('Training, Validation done for fold: %s\n' % fold)
'''
age_group_ratios = [0.90,0.90,0.90,0.8,0.8,0.90,0.90,0.90]
image_size = 227
num_channels = 3
batch_size = 50
width = 256
height = 256
new_width = 227
new_height = 227
num_labels = 8
sess = tf.InteractiveSession()
#layer initialization functions
def conv_ortho_weights(chan_in,filter_h,filter_w,chan_out):
bound = np.sqrt(6./(chan_in*filter_h*filter_w + chan_out*filter_h*filter_w))
W = np.random.random((chan_out, chan_in * filter_h * filter_w))
u, s, v = np.linalg.svd(W,full_matrices=False)
if u.shape[0] != u.shape[1]:
W = u.reshape((chan_in, filter_h, filter_w, chan_out))
else:
W = v.reshape((chan_in, filter_h, filter_w, chan_out))
return W.astype(np.float32)
def dense_ortho_weights(fan_in,fan_out):
bound = np.sqrt(2./(fan_in+fan_out))
W = np.random.randn(fan_in,fan_out)*bound
u, s, v = np.linalg.svd(W,full_matrices=False)
if u.shape[0] != u.shape[1]:
W = u
else:
W = v
return W.astype(np.float32)
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
return tf.float32
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return initial
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return initial
def conv2d(x, W, stride=[1,1,1,1], pad='SAME'):
return tf.nn.conv2d(x, W, strides=stride, padding=pad)
def max_pool(x,k,stride=[1,1,1,1],pad='SAME'):
return tf.nn.max_pool(x, k, strides=stride,padding=pad)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
tfx = tf.placeholder(tf.float32, shape=[None,image_size,image_size,num_channels])
tfy = tf.placeholder(tf.float32, shape=[None,num_labels])
class_weight = tf.constant(age_group_ratios,dtype=tf.float32)
#Conv Layer 1
w1 = tf.Variable(weight_variable([7,7,3,96]),name="w1")
b1 = tf.Variable(bias_variable([96]),name="b1")
c1 = tf.nn.relu(conv2d(tfx,w1,stride=[1,4,4,1],pad='SAME') + b1)
mxp1 = max_pool(c1,k=[1,3,3,1],stride=[1,2,2,1],pad='VALID')
lrn1 = tf.nn.local_response_normalization(mxp1, alpha=0.0001, beta=0.75)
#Conv Layer 2
w2 = tf.Variable(weight_variable([5,5,96,256]),name="w2")
b2 = tf.Variable(bias_variable([256]),name="b2")
c2 = tf.nn.relu(conv2d(lrn1,w2,stride=[1,1,1,1],pad='SAME') + b2)
mxp2 = max_pool(c2,k=[1,3,3,1],stride=[1,2,2,1],pad='SAME')
lrn2 = tf.nn.local_response_normalization(mxp2, alpha=0.0001, beta=0.75)
#Conv Layer 3
w3 = tf.Variable(weight_variable([3,3,256,384]),name="w3")
b3 = tf.Variable(bias_variable([384]),name="b3")
c3 = tf.nn.relu(conv2d(lrn2,w3,stride=[1,1,1,1],pad='SAME') + b3)
mxp3 = max_pool(c3,k=[1,3,3,1],stride=[1,2,2,1],pad='SAME')
#FC Layer 1
wfc1 = tf.Variable(weight_variable([7 * 7 * 384, 512]),name="wfc1")
bfc1 = tf.Variable(bias_variable([512]),name="bfc1")
mxp1_flat = tf.reshape(mxp3, [-1, 7 * 7 * 384])
fc1 = tf.nn.relu(tf.matmul(mxp1_flat, wfc1) + bfc1)
dfc1 = tf.nn.dropout(fc1, 0.5)
#FC Layer 2
wfc2 = tf.Variable(weight_variable([512, 512]),name="wfc2")
bfc2 = tf.Variable(bias_variable([512]),name="bfc2")
fc2 = tf.nn.relu(tf.matmul(dfc1, wfc2) + bfc2)
dfc2 = tf.nn.dropout(fc2, 0.7)
#FC Layer 3
wfc3 = tf.Variable(weight_variable([512, num_labels]),name="wfc3")
bfc3 = tf.Variable(bias_variable([num_labels]),name="bfc3")
fc3 = (tf.matmul(dfc2, wfc3) + bfc3)
print fc3.get_shape
weighted_logits = tf.mul(fc3, class_weight) # shape [batch_size, num_labels]
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(weighted_logits,tfy))
# L2 regularization for the fully connected parameters.
regularizers = ( tf.nn.l2_loss(wfc3) + tf.nn.l2_loss(bfc3) +
tf.nn.l2_loss(wfc2) + tf.nn.l2_loss(bfc2) +
tf.nn.l2_loss(wfc1) + tf.nn.l2_loss(bfc1) +
tf.nn.l2_loss(w2) + tf.nn.l2_loss(b2) +
tf.nn.l2_loss(w1) + tf.nn.l2_loss(b1) +
tf.nn.l2_loss(w3) + tf.nn.l2_loss(b3)
)
# Add the regularization term to the loss.
cross_entropy += 5e-4 * regularizers
prediction=tf.nn.softmax(fc3)
learning_rate = tf.placeholder(tf.float32, shape=[])
#train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
#train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
1e-3, # Base learning rate.
batch * batch_size, # Current index into the dataset.
10000, # Decay step.
0.0005, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
train_step = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(cross_entropy,global_step=batch)
# Add an op to initialize the variables.
init_op = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
sess.run(init_op)
num_steps = 50000
'''
for i in range(num_steps):
indices = np.random.permutation(X_train.shape[0])[:batch_size]
X_batch = X_train[indices,:,:,:]
y_batch = y_train[indices,:]
rowseed = random.randint(0,29)
colseed = random.randint(0,29)
X_batch = X_batch[:,rowseed:rowseed+227,colseed:colseed+227,:]
#random ud flipping
if random.random() < .5:
X_batch = X_batch[:,::-1,:,:]
#random lr flipping
if random.random() < .5:
X_batch = X_batch[:,:,::-1,:]
lr = 1e-3
feed_dict = {tfx : X_batch, tfy : y_batch, learning_rate: lr}
_, l, predictions = sess.run([train_step, cross_entropy, prediction], feed_dict=feed_dict)
if (i % 100 == 0):
print("Iteration: %i. Train loss %.5f, Minibatch accuracy: %.1f%%" % (i,l,accuracy(predictions,y_batch)))
#validation accuracy
if (i % 1000 == 0):
val_accuracies = []
val_losses = []
for j in range(0,X_val.shape[0],batch_size):
X_batch = X_val[j:j+batch_size,:,:,:]
y_batch = y_val[j:j+batch_size,:]
#Center Crop
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
X_batch = X_batch[:,left:right,top:bottom,:]
feed_dict = {tfx : X_batch, tfy : y_batch}
l, predictions = sess.run([cross_entropy,prediction], feed_dict=feed_dict)
val_accuracies.append(accuracy(predictions,y_batch))
val_losses.append(l)
print("Iteration: %i. Val loss %.5f Validation Minibatch accuracy: %.1f%%" % (i, np.mean(val_losses), np.mean(val_accuracies)))
if (i % 1000 == 0):
test_accuracies = []
test_losses = []
preds = []
for j in range(0,X_test.shape[0],batch_size):
X_batch = X_test[j:j+batch_size,:,:,:]
y_batch = y_test[j:j+batch_size,:]
#Center Crop
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
X_batch = X_batch[:,left:right,top:bottom,:]
feed_dict={tfx:X_batch,tfy:y_batch}
l, predictions = sess.run([cross_entropy,prediction], feed_dict=feed_dict)
test_accuracies.append(accuracy(predictions,y_batch))
test_losses.append(l)
preds.append(np.argmax(predictions, 1))
tacc = np.mean(test_accuracies)
print("Iteration: %i. Test loss %.5f. Test Minibatch accuracy: %.5f" % (i, np.mean(test_losses),tacc))
# Save the variables to disk.
if tacc > past_tacc:
past_tacc = tacc
save_path = saver.save(sess, "/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/male/saved_model/model.ckpt")
print("Model saved in file: %s" % save_path)
pred = np.concatenate(preds)
np.savetxt('/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/male/male_age_prediction.txt',pred,fmt='%.0f')
'''
print "Restoring the model and predicting....."
ckpt = tf.train.get_checkpoint_state("/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/old_but_successful_exps/male/best_observations/saved_model/")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, "/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/old_but_successful_exps/male/best_observations/saved_model/model.ckpt")
print "Model loaded"
for i in range(num_steps):
#run model on test
if (i % 1000 == 0):
print i
preds = []
for j in range(0,X_test.shape[0],batch_size):
X_batch = X_test[j:j+batch_size,:,:,:]
#Center Crop
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
X_batch = X_batch[:,left:right,top:bottom,:]
feed_dict={tfx:X_batch}
p = sess.run(prediction, feed_dict=feed_dict)
preds.append(np.argmax(p, 1))
pred = np.concatenate(preds)
p = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/final_test_data_based_on_predicted_genders/male/'
np.savetxt(p+'predicted_male_age_predictions_'+str(i)+'.txt',pred,fmt='%.0f')
print ("Done predicitng...")
else:
print ("No checkpoint file found")
def main():
train_and_test()
if __name__=='__main__':
main()
| 31.218468
| 206
| 0.681985
|
4a027435ecf3af6487f16abe96dd5f791b830c08
| 3,166
|
py
|
Python
|
{{cookiecutter.project_name}}/dodo_continue.py
|
carmelom/cookiecutter-xmds-gpe
|
b2aad5a6d448078e5d0442990ea96df3ea10e65c
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/dodo_continue.py
|
carmelom/cookiecutter-xmds-gpe
|
b2aad5a6d448078e5d0442990ea96df3ea10e65c
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/dodo_continue.py
|
carmelom/cookiecutter-xmds-gpe
|
b2aad5a6d448078e5d0442990ea96df3ea10e65c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Create: 01-2020 - Carmelo Mordini <carmelo> <carmelo.mordini@unitn.it>
"""Module docstring
"""
from pathlib import Path
from src import tasks
from doit import get_var
import numpy as np
import h5py
from ruamel import yaml
from copy import deepcopy
DOIT_CONFIG = {
'verbosity': 2,
'backend': 'json',
# 'default_tasks': [
# 'merge',
# ],
'dep_file': '.doit_continue.db',
}
config_file = get_var('config_file', 'configure.yaml')
with open(config_file, 'r') as f:
conf = yaml.safe_load(f)
build_dir = Path(conf['build_dir'])
build_dir.mkdir(parents=True, exist_ok=True)
run_dir = Path(conf['run_dir'])
sequence_index = int(get_var('sequence_index', None)) # this will raise a TypeError unless you specify it
run_number = int(get_var('run_number', 0))
h5filepath = run_dir / \
conf['h5filepath'].format(
sequence_index=sequence_index, run_number=run_number)
init_filename = build_dir / conf['continue']['init_filename']
output_filename = build_dir / conf['continue']['output_filename']
def task_init():
def _init(source, dest):
with h5py.File(source, 'r') as fs:
g = fs['realtime/1']
# just put them by hand now, don't try to be general
x = g['x'][:]
# y = g['y'][:]
psiI = g['psiI'][-1, ...]
psiR = g['psiR'][-1, ...]
with h5py.File(dest, 'w') as fd:
g = fd.require_group('1')
g['x'] = x
# g['y'] = y
g['psiI'] = psiI
g['psiR'] = psiR
return {
'actions': [(_init, (h5filepath, init_filename))],
'file_dep': [h5filepath],
'targets': [init_filename]
}
def task_continue():
name = 'continue'
_conf = deepcopy(conf)
_conf['exec_filename'] = 'realtime'
_conf = tasks.rec_update(_conf, _conf[name])
return tasks.xmds_run(build_dir, _conf)
def task_merge():
tmpfile = run_dir / 'tmp.h5'
def _merge(source, dest):
with h5py.File(source, 'r') as f1:
g1 = f1['1']
t1 = g1['t'][:]
psiI1 = g1['psiI'][:]
psiR1 = g1['psiR'][:]
with h5py.File(dest, 'a') as f0:
g0 = f0['realtime/1']
# please make sure that realtime has initial_sample="no"
# otherwise read all the datasets with [1:]
t0 = g0['t'][:]
del g0['t']
g0['t'] = np.concatenate([t0, t1 + t0[-1]])
psiI0 = g0['psiI'][:]
del g0['psiI']
g0['psiI'] = np.concatenate([psiI0, psiI1], axis=0)
psiR0 = g0['psiR'][:]
del g0['psiR']
g0['psiR'] = np.concatenate([psiR0, psiR1], axis=0)
return {
'actions': [
(_merge, (output_filename, h5filepath)),
# del[datatset] does not shrink the file size!
# I would need to change the collect task to make the datasets expandable (maxshape=(None,))
# but using h5repack is faster right now
f"h5repack {h5filepath} {tmpfile}",
f"mv {tmpfile} {h5filepath}"
]
}
| 26.830508
| 106
| 0.554011
|
4a0274bc9245d99163cf8abf990e733bd3dc901d
| 4,474
|
py
|
Python
|
pymcuprog/serialupdi/readwrite.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 28
|
2021-05-08T19:28:33.000Z
|
2022-03-23T06:23:13.000Z
|
pymcuprog/serialupdi/readwrite.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 20
|
2021-05-24T19:20:39.000Z
|
2022-03-12T20:10:30.000Z
|
pymcuprog/serialupdi/readwrite.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 11
|
2021-06-24T20:59:16.000Z
|
2022-03-23T23:59:38.000Z
|
"""
Read/write access provider for UPDI
"""
from logging import getLogger
from pymcuprog.pymcuprog_errors import PymcuprogError
from . import constants
class UpdiReadWrite(object):
"""
Provides various forms of reads and writes for UPDI applications
Makes us of the datalink provided
"""
def __init__(self, datalink):
self.logger = getLogger(__name__)
self.datalink = datalink
def read_cs(self, address):
"""
Read from Control/Status space
:param address: address (index) to read
:return: value read
"""
return self.datalink.ldcs(address)
def write_cs(self, address, value):
"""
Write to Control/Status space
:param address: address (index) to write
:param value: 8-bit value to write
"""
return self.datalink.stcs(address, value)
def write_key(self, size, key):
"""
Write a KEY into UPDI
:param size: size of key to send
:param key: key value
"""
return self.datalink.key(size, key)
def read_sib(self):
"""
Read the SIB from UPDI
:return: SIB string (bytearray) read
"""
return self.datalink.read_sib()
def read_byte(self, address):
"""
Read a single byte from UPDI
:param address: address to read from
:return: value read
"""
return self.datalink.ld(address)
def write_byte(self, address, value):
"""
Writes a single byte to UPDI
:param address: address to write to
:param value: value to write
"""
return self.datalink.st(address, value)
def read_data(self, address, size):
"""
Reads a number of bytes of data from UPDI
:param address: address to write to
:param size: number of bytes to read
"""
self.logger.debug("Reading %d bytes from 0x%04X", size, address)
# Range check
if size > constants.UPDI_MAX_REPEAT_SIZE:
raise PymcuprogError("Cant read that many bytes in one go")
# Store the address
self.datalink.st_ptr(address)
# Fire up the repeat
if size > 1:
self.datalink.repeat(size)
# Do the read(s)
return self.datalink.ld_ptr_inc(size)
def read_data_words(self, address, words):
"""
Reads a number of words of data from UPDI
:param address: address to write to
:param words: number of words to read
"""
self.logger.debug("Reading %d words from 0x%04X", words, address)
# Range check
if words > constants.UPDI_MAX_REPEAT_SIZE >> 1:
raise PymcuprogError("Cant read that many words in one go")
# Store the address
self.datalink.st_ptr(address)
# Fire up the repeat
if words > 1:
self.datalink.repeat(words)
# Do the read
return self.datalink.ld_ptr_inc16(words)
def write_data_words(self, address, data):
"""
Writes a number of words to memory
:param address: address to write to
:param data: data to write
"""
# Special-case of 1 word
if len(data) == 2:
value = data[0] + (data[1] << 8)
return self.datalink.st16(address, value)
# Range check
if len(data) > constants.UPDI_MAX_REPEAT_SIZE << 1:
raise PymcuprogError("Invalid length")
# Store the address
self.datalink.st_ptr(address)
# Fire up the repeat
self.datalink.repeat(len(data) >> 1)
return self.datalink.st_ptr_inc16(data)
def write_data(self, address, data):
"""
Writes a number of bytes to memory
:param address: address to write to
:param data: data to write
"""
# Special case of 1 byte
if len(data) == 1:
return self.datalink.st(address, data[0])
# Special case of 2 byte
if len(data) == 2:
self.datalink.st(address, data[0])
return self.datalink.st(address + 1, data[1])
# Range check
if len(data) > constants.UPDI_MAX_REPEAT_SIZE:
raise PymcuprogError("Invalid length")
# Store the address
self.datalink.st_ptr(address)
# Fire up the repeat
self.datalink.repeat(len(data))
return self.datalink.st_ptr_inc(data)
| 27.115152
| 73
| 0.584935
|
4a027560f8313c509107b8f641b713c28a37b406
| 518
|
py
|
Python
|
python/MultipartFileUpload.py
|
partyyoung/mooncloud-fileupload-java-sdk
|
0db0fc52bf8c008a321b579be36e691bae9a4e25
|
[
"Apache-2.0"
] | null | null | null |
python/MultipartFileUpload.py
|
partyyoung/mooncloud-fileupload-java-sdk
|
0db0fc52bf8c008a321b579be36e691bae9a4e25
|
[
"Apache-2.0"
] | null | null | null |
python/MultipartFileUpload.py
|
partyyoung/mooncloud-fileupload-java-sdk
|
0db0fc52bf8c008a321b579be36e691bae9a4e25
|
[
"Apache-2.0"
] | null | null | null |
import requests
# 演示用,一般随便搞个就可以,此地址会返回404,但不影响观看请求体
url = "http://127.0.0.1:8080/file/upload2path"
# 折中方案,参数按如下方式组织,也是模拟multipart/form-data的核心
files = {'file': ('MultipartFileUpload.java', open('src/main/java/net/mooncloud/fileupload/MultipartFileUpload.java', 'rb'), 'application/octet-stream'), 'path': (None, "/tmp/upload/", 'text/plain')}
res = requests.post(url, files=files)
# 查看请求体是否符合要求,有具体接口可以直接用具体接口,成功则符合要求,此处主要是演示用
# print (res.request.body)
# 查看请求头
# print (res.request.headers)
# 查看请求头
print (res.text)
| 37
| 199
| 0.745174
|
4a02758cdc0231313c81ed97a775c294ca10bbe7
| 4,460
|
py
|
Python
|
indicators/models/__init__.py
|
WPRDC/neighborhood-simulacrum
|
46892dfdbc8bc3201e31fee4ee991c49b208753e
|
[
"MIT"
] | null | null | null |
indicators/models/__init__.py
|
WPRDC/neighborhood-simulacrum
|
46892dfdbc8bc3201e31fee4ee991c49b208753e
|
[
"MIT"
] | null | null | null |
indicators/models/__init__.py
|
WPRDC/neighborhood-simulacrum
|
46892dfdbc8bc3201e31fee4ee991c49b208753e
|
[
"MIT"
] | null | null | null |
from django.db import models
from profiles.abstract_models import Described
from .source import Source, CensusSource, CKANSource, CKANGeomSource, CKANRegionalSource
from .time import TimeAxis, RelativeTimeAxis, StaticTimeAxis, StaticConsecutiveTimeAxis
from .variable import Variable, CensusVariable, CKANVariable, CensusVariableSource
from .viz import DataViz, Table, Chart, MiniMap, VizVariable
class SubdomainIndicator(models.Model):
subdomain = models.ForeignKey('Subdomain', on_delete=models.CASCADE, related_name='subdomain_to_indicator')
indicator = models.ForeignKey('Indicator', on_delete=models.CASCADE, related_name='indicator_to_subdomain')
order = models.IntegerField(default=0)
class Meta:
unique_together = ('subdomain', 'indicator',)
ordering = ('order',)
class Domain(Described):
""" Main categories for organizing indicators """
order = models.IntegerField(default=0)
class Meta:
ordering = ('order',)
def __str__(self):
return self.name
class Subdomain(Described):
domain = models.ForeignKey('Domain', related_name='subdomains', on_delete=models.PROTECT)
order = models.IntegerField(default=0)
inds = models.ManyToManyField('Indicator', related_name='subdomains', through='SubdomainIndicator')
@property
def indicators(self):
return self.inds.order_by('indicator_to_subdomain')
class Meta:
ordering = ('order',)
def __str__(self):
return self.name
class IndicatorDataViz(models.Model):
indicator = models.ForeignKey('Indicator', related_name='indicator_to_dataviz', on_delete=models.CASCADE)
data_viz = models.ForeignKey('DataViz', related_name='dataviz_to_indicator', on_delete=models.CASCADE)
order = models.IntegerField(default=0)
def __str__(self):
return f'{self.indicator.slug} - {self.data_viz.slug}'
class Meta:
ordering = ('order',)
unique_together = ('indicator', 'data_viz',)
class Indicator(Described):
LAYOUT_CHOICES = (
('A', 'Style A'),
('B', 'Style B'),
('C', 'Style C'),
('D', 'Style D'),
)
""" Indicators """
long_description = models.TextField(
help_text='A thorough description for long-form representation.',
blank=True,
null=True,
)
limitations = models.TextField(
help_text='Describe what limitations the data may have '
'(e.g. small sample size, difficulties in collecting data',
blank=True,
null=True,
)
importance = models.TextField(
help_text='Describe the data collection process, highlighting areas '
'where bias and assumptions made during the collection '
'can impact how the data are interpreted',
blank=True,
null=True,
)
source = models.TextField(
help_text='Describe the data collection process, highlighting areas '
'where bias and assumptions made during the collection '
'can impact how the data are interpreted',
blank=True,
null=True,
)
provenance = models.TextField(
help_text='Describe the data collection process, highlighting areas '
'where bias and assumptions made during the collection '
'can impact how the data are interpreted',
blank=True,
null=True,
)
vizes = models.ManyToManyField('DataViz', related_name='new_indicator', through='IndicatorDataViz')
@property
def data_vizes(self):
return self.vizes.order_by('dataviz_to_indicator')
def __str__(self):
return f'{self.name} ({self.id})'
@property
def hierarchies(self):
""" Collect possible hierarchies. """
result = []
for subdomainThrough in SubdomainIndicator.objects.filter(indicator=self):
subdomain = subdomainThrough.subdomain
result.append({'domain': subdomain.domain, 'subdomain': subdomain})
return result
class Value(models.Model):
geog = models.ForeignKey('geo.AdminRegion', on_delete=models.CASCADE, db_index=True)
variable = models.ForeignKey('Variable', on_delete=models.CASCADE, db_index=True)
value = models.FloatField(null=True, blank=True)
margin = models.FloatField(null=True, blank=True)
def __str__(self):
return f'{self.variable}/{self.geog} ({self.value}, {self.margin})'
| 34.045802
| 111
| 0.669731
|
4a02777d2326105d0ac3d482dc946eb9bce13e77
| 28,850
|
py
|
Python
|
superset/models/core.py
|
ds2g/superset
|
0cfb321e7ebfb5c26092f6493df890fed7dbc787
|
[
"Apache-2.0"
] | null | null | null |
superset/models/core.py
|
ds2g/superset
|
0cfb321e7ebfb5c26092f6493df890fed7dbc787
|
[
"Apache-2.0"
] | null | null | null |
superset/models/core.py
|
ds2g/superset
|
0cfb321e7ebfb5c26092f6493df890fed7dbc787
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""A collection of ORM sqlalchemy models for Superset"""
import enum
import json
import logging
import textwrap
from ast import literal_eval
from contextlib import closing
from copy import deepcopy
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type
import numpy
import pandas as pd
import sqlalchemy as sqla
import sqlparse
from flask import g, request
from flask_appbuilder import Model
from sqlalchemy import (
Boolean,
Column,
create_engine,
DateTime,
ForeignKey,
Integer,
MetaData,
String,
Table,
Text,
)
from sqlalchemy.engine import Connection, Dialect, Engine, url
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.exc import ArgumentError
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.pool import NullPool
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.sql import expression, Select
from superset import app, db_engine_specs, is_feature_enabled
from superset.db_engine_specs.base import TimeGrain
from superset.extensions import cache_manager, encrypted_field_factory, security_manager
from superset.models.helpers import AuditMixinNullable, ImportExportMixin
from superset.models.tags import FavStarUpdater
from superset.result_set import SupersetResultSet
from superset.utils import cache as cache_util, core as utils
from superset.utils.memoized import memoized
config = app.config
custom_password_store = config["SQLALCHEMY_CUSTOM_PASSWORD_STORE"]
stats_logger = config["STATS_LOGGER"]
log_query = config["QUERY_LOGGER"]
metadata = Model.metadata # pylint: disable=no-member
logger = logging.getLogger(__name__)
PASSWORD_MASK = "X" * 10
DB_CONNECTION_MUTATOR = config["DB_CONNECTION_MUTATOR"]
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = "url"
id = Column(Integer, primary_key=True)
url = Column(Text)
slug = Column(String(255))
class KeyValue(Model): # pylint: disable=too-few-public-methods
"""Used for any type of key-value store"""
__tablename__ = "keyvalue"
id = Column(Integer, primary_key=True)
value = Column(Text, nullable=False)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = "css_templates"
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default="")
class ConfigurationMethod(str, enum.Enum):
SQLALCHEMY_FORM = "sqlalchemy_form"
DYNAMIC_FORM = "dynamic_form"
class Database(
Model, AuditMixinNullable, ImportExportMixin
): # pylint: disable=too-many-public-methods
"""An ORM object that stores Database related information"""
__tablename__ = "dbs"
type = "table"
__table_args__ = (UniqueConstraint("database_name"),)
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
database_name = Column(String(250), unique=True, nullable=False)
sqlalchemy_uri = Column(String(1024), nullable=False)
password = Column(encrypted_field_factory.create(String(1024)))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=True)
configuration_method = Column(
String(255), server_default=ConfigurationMethod.SQLALCHEMY_FORM.value
)
allow_run_async = Column(Boolean, default=False)
allow_file_upload = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_cvas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
allow_multi_schema_metadata_fetch = Column( # pylint: disable=invalid-name
Boolean, default=False
)
extra = Column(
Text,
default=textwrap.dedent(
"""\
{
"metadata_params": {},
"engine_params": {},
"metadata_cache_timeout": {},
"schemas_allowed_for_file_upload": []
}
"""
),
)
encrypted_extra = Column(encrypted_field_factory.create(Text), nullable=True)
impersonate_user = Column(Boolean, default=False)
server_cert = Column(encrypted_field_factory.create(Text), nullable=True)
export_fields = [
"database_name",
"sqlalchemy_uri",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_ctas",
"allow_cvas",
"allow_file_upload",
"extra",
]
extra_import_fields = ["password"]
export_children = ["tables"]
def __repr__(self) -> str:
return self.name
@property
def name(self) -> str:
return self.verbose_name if self.verbose_name else self.database_name
@property
def allows_subquery(self) -> bool:
return self.db_engine_spec.allows_subqueries
@property
def function_names(self) -> List[str]:
try:
return self.db_engine_spec.get_function_names(self)
except Exception as ex: # pylint: disable=broad-except
# function_names property is used in bulk APIs and should not hard crash
# more info in: https://github.com/apache/superset/issues/9678
logger.error(
"Failed to fetch database function names with error: %s",
str(ex),
exc_info=True,
)
return []
@property
def allows_cost_estimate(self) -> bool:
extra = self.get_extra() or {}
cost_estimate_enabled: bool = extra.get("cost_estimate_enabled") # type: ignore
return (
self.db_engine_spec.get_allow_cost_estimate(extra) and cost_estimate_enabled
)
@property
def allows_virtual_table_explore(self) -> bool:
extra = self.get_extra()
return bool(extra.get("allows_virtual_table_explore", True))
@property
def explore_database_id(self) -> int:
return self.get_extra().get("explore_database_id", self.id)
@property
def data(self) -> Dict[str, Any]:
return {
"id": self.id,
"name": self.database_name,
"backend": self.backend,
"configuration_method": self.configuration_method,
"allow_multi_schema_metadata_fetch": self.allow_multi_schema_metadata_fetch,
"allows_subquery": self.allows_subquery,
"allows_cost_estimate": self.allows_cost_estimate,
"allows_virtual_table_explore": self.allows_virtual_table_explore,
"explore_database_id": self.explore_database_id,
"parameters": self.parameters,
"parameters_schema": self.parameters_schema,
}
@property
def unique_name(self) -> str:
return self.database_name
@property
def url_object(self) -> URL:
return make_url(self.sqlalchemy_uri_decrypted)
@property
def backend(self) -> str:
sqlalchemy_url = make_url(self.sqlalchemy_uri_decrypted)
return sqlalchemy_url.get_backend_name() # pylint: disable=no-member
@property
def parameters(self) -> Dict[str, Any]:
uri = make_url(self.sqlalchemy_uri_decrypted)
encrypted_extra = self.get_encrypted_extra()
try:
parameters = self.db_engine_spec.get_parameters_from_uri(uri, encrypted_extra=encrypted_extra) # type: ignore # pylint: disable=line-too-long,useless-suppression
except Exception: # pylint: disable=broad-except
parameters = {}
return parameters
@property
def parameters_schema(self) -> Dict[str, Any]:
try:
parameters_schema = self.db_engine_spec.parameters_json_schema() # type: ignore
except Exception: # pylint: disable=broad-except
parameters_schema = {}
return parameters_schema
@property
def metadata_cache_timeout(self) -> Dict[str, Any]:
return self.get_extra().get("metadata_cache_timeout", {})
@property
def schema_cache_enabled(self) -> bool:
return "schema_cache_timeout" in self.metadata_cache_timeout
@property
def schema_cache_timeout(self) -> Optional[int]:
return self.metadata_cache_timeout.get("schema_cache_timeout")
@property
def table_cache_enabled(self) -> bool:
return "table_cache_timeout" in self.metadata_cache_timeout
@property
def table_cache_timeout(self) -> Optional[int]:
return self.metadata_cache_timeout.get("table_cache_timeout")
@property
def default_schemas(self) -> List[str]:
return self.get_extra().get("default_schemas", [])
@property
def connect_args(self) -> Dict[str, Any]:
return self.get_extra().get("engine_params", {}).get("connect_args", {})
@classmethod
def get_password_masked_url_from_uri( # pylint: disable=invalid-name
cls, uri: str
) -> URL:
sqlalchemy_url = make_url(uri)
return cls.get_password_masked_url(sqlalchemy_url)
@classmethod
def get_password_masked_url(cls, masked_url: URL) -> URL:
url_copy = deepcopy(masked_url)
if url_copy.password is not None:
url_copy.password = PASSWORD_MASK
return url_copy
def set_sqlalchemy_uri(self, uri: str) -> None:
conn = sqla.engine.url.make_url(uri.strip())
if conn.password != PASSWORD_MASK and not custom_password_store:
# do not over-write the password with the password mask
self.password = conn.password
conn.password = PASSWORD_MASK if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_effective_user(
self, object_url: URL, user_name: Optional[str] = None,
) -> Optional[str]:
"""
Get the effective user, especially during impersonation.
:param object_url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username
"""
effective_username = None
if self.impersonate_user:
effective_username = object_url.username
if user_name:
effective_username = user_name
elif (
hasattr(g, "user")
and hasattr(g.user, "username")
and g.user.username is not None
):
effective_username = g.user.username
return effective_username
@memoized(watch=("impersonate_user", "sqlalchemy_uri_decrypted", "extra"))
def get_sqla_engine(
self,
schema: Optional[str] = None,
nullpool: bool = True,
user_name: Optional[str] = None,
source: Optional[utils.QuerySource] = None,
) -> Engine:
extra = self.get_extra()
sqlalchemy_url = make_url(self.sqlalchemy_uri_decrypted)
self.db_engine_spec.adjust_database_uri(sqlalchemy_url, schema)
effective_username = self.get_effective_user(sqlalchemy_url, user_name)
# If using MySQL or Presto for example, will set url.username
# If using Hive, will not do anything yet since that relies on a
# configuration parameter instead.
self.db_engine_spec.modify_url_for_impersonation(
sqlalchemy_url, self.impersonate_user, effective_username
)
masked_url = self.get_password_masked_url(sqlalchemy_url)
logger.debug("Database.get_sqla_engine(). Masked URL: %s", str(masked_url))
params = extra.get("engine_params", {})
if nullpool:
params["poolclass"] = NullPool
connect_args = params.get("connect_args", {})
if self.impersonate_user:
self.db_engine_spec.update_impersonation_config(
connect_args, str(sqlalchemy_url), effective_username
)
if connect_args:
params["connect_args"] = connect_args
params.update(self.get_encrypted_extra())
if DB_CONNECTION_MUTATOR:
if not source and request and request.referrer:
if "/superset/dashboard/" in request.referrer:
source = utils.QuerySource.DASHBOARD
elif "/superset/explore/" in request.referrer:
source = utils.QuerySource.CHART
elif "/superset/sqllab/" in request.referrer:
source = utils.QuerySource.SQL_LAB
sqlalchemy_url, params = DB_CONNECTION_MUTATOR(
sqlalchemy_url, params, effective_username, security_manager, source
)
try:
return create_engine(sqlalchemy_url, **params)
except Exception as ex:
raise self.db_engine_spec.get_dbapi_mapped_exception(ex)
def get_reserved_words(self) -> Set[str]:
return self.get_dialect().preparer.reserved_words
def get_quoter(self) -> Callable[[str, Any], str]:
return self.get_dialect().identifier_preparer.quote
def get_df( # pylint: disable=too-many-locals
self,
sql: str,
schema: Optional[str] = None,
mutator: Optional[Callable[[pd.DataFrame], None]] = None,
) -> pd.DataFrame:
sqls = [str(s).strip(" ;") for s in sqlparse.parse(sql)]
engine = self.get_sqla_engine(schema=schema)
username = utils.get_username()
def needs_conversion(df_series: pd.Series) -> bool:
return (
not df_series.empty
and isinstance(df_series, pd.Series)
and isinstance(df_series[0], (list, dict))
)
def _log_query(sql: str) -> None:
if log_query:
log_query(engine.url, sql, schema, username, __name__, security_manager)
with closing(engine.raw_connection()) as conn:
cursor = conn.cursor()
for sql_ in sqls[:-1]:
_log_query(sql_)
self.db_engine_spec.execute(cursor, sql_)
cursor.fetchall()
_log_query(sqls[-1])
self.db_engine_spec.execute(cursor, sqls[-1])
data = self.db_engine_spec.fetch_data(cursor)
result_set = SupersetResultSet(
data, cursor.description, self.db_engine_spec
)
df = result_set.to_pandas_df()
if mutator:
df = mutator(df)
for col, coltype in df.dtypes.to_dict().items():
if coltype == numpy.object_ and needs_conversion(df[col]):
df[col] = df[col].apply(utils.json_dumps_w_dates)
return df
def compile_sqla_query(self, qry: Select, schema: Optional[str] = None) -> str:
engine = self.get_sqla_engine(schema=schema)
sql = str(qry.compile(engine, compile_kwargs={"literal_binds": True}))
if (
engine.dialect.identifier_preparer._double_percents # pylint: disable=protected-access
):
sql = sql.replace("%%", "%")
return sql
def select_star( # pylint: disable=too-many-arguments
self,
table_name: str,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = False,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""Generates a ``select *`` statement in the proper dialect"""
eng = self.get_sqla_engine(schema=schema, source=utils.QuerySource.SQL_LAB)
return self.db_engine_spec.select_star(
self,
table_name,
schema=schema,
engine=eng,
limit=limit,
show_cols=show_cols,
indent=indent,
latest_partition=latest_partition,
cols=cols,
)
def apply_limit_to_sql(
self, sql: str, limit: int = 1000, force: bool = False
) -> str:
return self.db_engine_spec.apply_limit_to_sql(sql, limit, self, force=force)
def safe_sqlalchemy_uri(self) -> str:
return self.sqlalchemy_uri
@property
def inspector(self) -> Inspector:
engine = self.get_sqla_engine()
return sqla.inspect(engine)
@cache_util.memoized_func(
key=lambda self, *args, **kwargs: f"db:{self.id}:schema:None:table_list",
cache=cache_manager.data_cache,
)
def get_all_table_names_in_database( # pylint: disable=unused-argument
self,
cache: bool = False,
cache_timeout: Optional[bool] = None,
force: bool = False,
) -> List[utils.DatasourceName]:
"""Parameters need to be passed as keyword arguments."""
if not self.allow_multi_schema_metadata_fetch:
return []
return self.db_engine_spec.get_all_datasource_names(self, "table")
@cache_util.memoized_func(
key=lambda self, *args, **kwargs: f"db:{self.id}:schema:None:view_list",
cache=cache_manager.data_cache,
)
def get_all_view_names_in_database( # pylint: disable=unused-argument
self,
cache: bool = False,
cache_timeout: Optional[bool] = None,
force: bool = False,
) -> List[utils.DatasourceName]:
"""Parameters need to be passed as keyword arguments."""
if not self.allow_multi_schema_metadata_fetch:
return []
return self.db_engine_spec.get_all_datasource_names(self, "view")
@cache_util.memoized_func(
key=lambda self, schema, *args, **kwargs: f"db:{self.id}:schema:{schema}:table_list", # pylint: disable=line-too-long,useless-suppression
cache=cache_manager.data_cache,
)
def get_all_table_names_in_schema( # pylint: disable=unused-argument
self,
schema: str,
cache: bool = False,
cache_timeout: Optional[int] = None,
force: bool = False,
) -> List[utils.DatasourceName]:
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:param cache: whether cache is enabled for the function
:param cache_timeout: timeout in seconds for the cache
:param force: whether to force refresh the cache
:return: list of tables
"""
try:
tables = self.db_engine_spec.get_table_names(
database=self, inspector=self.inspector, schema=schema
)
return [
utils.DatasourceName(table=table, schema=schema) for table in tables
]
except Exception as ex: # pylint: disable=broad-except
logger.warning(ex)
return []
@cache_util.memoized_func(
key=lambda self, schema, *args, **kwargs: f"db:{self.id}:schema:{schema}:view_list", # pylint: disable=line-too-long,useless-suppression
cache=cache_manager.data_cache,
)
def get_all_view_names_in_schema( # pylint: disable=unused-argument
self,
schema: str,
cache: bool = False,
cache_timeout: Optional[int] = None,
force: bool = False,
) -> List[utils.DatasourceName]:
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:param cache: whether cache is enabled for the function
:param cache_timeout: timeout in seconds for the cache
:param force: whether to force refresh the cache
:return: list of views
"""
try:
views = self.db_engine_spec.get_view_names(
database=self, inspector=self.inspector, schema=schema
)
return [utils.DatasourceName(table=view, schema=schema) for view in views]
except Exception as ex: # pylint: disable=broad-except
logger.warning(ex)
return []
@cache_util.memoized_func(
key=lambda self, *args, **kwargs: f"db:{self.id}:schema_list",
cache=cache_manager.data_cache,
)
def get_all_schema_names( # pylint: disable=unused-argument
self,
cache: bool = False,
cache_timeout: Optional[int] = None,
force: bool = False,
) -> List[str]:
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param cache: whether cache is enabled for the function
:param cache_timeout: timeout in seconds for the cache
:param force: whether to force refresh the cache
:return: schema list
"""
return self.db_engine_spec.get_schema_names(self.inspector)
@property
def db_engine_spec(self) -> Type[db_engine_specs.BaseEngineSpec]:
return self.get_db_engine_spec_for_backend(self.backend)
@classmethod
@memoized
def get_db_engine_spec_for_backend(
cls, backend: str
) -> Type[db_engine_specs.BaseEngineSpec]:
engines = db_engine_specs.get_engine_specs()
return engines.get(backend, db_engine_specs.BaseEngineSpec)
def grains(self) -> Tuple[TimeGrain, ...]:
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
from a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
return self.db_engine_spec.get_time_grains()
def get_extra(self) -> Dict[str, Any]:
return self.db_engine_spec.get_extra_params(self)
def get_encrypted_extra(self) -> Dict[str, Any]:
encrypted_extra = {}
if self.encrypted_extra:
try:
encrypted_extra = json.loads(self.encrypted_extra)
except json.JSONDecodeError as ex:
logger.error(ex, exc_info=True)
raise ex
return encrypted_extra
def get_table(self, table_name: str, schema: Optional[str] = None) -> Table:
extra = self.get_extra()
meta = MetaData(**extra.get("metadata_params", {}))
return Table(
table_name,
meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine(),
)
def get_table_comment(
self, table_name: str, schema: Optional[str] = None
) -> Optional[str]:
return self.db_engine_spec.get_table_comment(self.inspector, table_name, schema)
def get_columns(
self, table_name: str, schema: Optional[str] = None
) -> List[Dict[str, Any]]:
return self.db_engine_spec.get_columns(self.inspector, table_name, schema)
def get_indexes(
self, table_name: str, schema: Optional[str] = None
) -> List[Dict[str, Any]]:
indexes = self.inspector.get_indexes(table_name, schema)
return self.db_engine_spec.normalize_indexes(indexes)
def get_pk_constraint(
self, table_name: str, schema: Optional[str] = None
) -> Dict[str, Any]:
pk_constraint = self.inspector.get_pk_constraint(table_name, schema) or {}
return {
key: utils.base_json_conv(value) for key, value in pk_constraint.items()
}
def get_foreign_keys(
self, table_name: str, schema: Optional[str] = None
) -> List[Dict[str, Any]]:
return self.inspector.get_foreign_keys(table_name, schema)
def get_schema_access_for_file_upload( # pylint: disable=invalid-name
self,
) -> List[str]:
allowed_databases = self.get_extra().get("schemas_allowed_for_file_upload", [])
if isinstance(allowed_databases, str):
allowed_databases = literal_eval(allowed_databases)
if hasattr(g, "user"):
extra_allowed_databases = config["ALLOWED_USER_CSV_SCHEMA_FUNC"](
self, g.user
)
allowed_databases += extra_allowed_databases
return sorted(set(allowed_databases))
@property
def sqlalchemy_uri_decrypted(self) -> str:
try:
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
except (ArgumentError, ValueError):
# if the URI is invalid, ignore and return a placeholder url
# (so users see 500 less often)
return "dialect://invalid_uri"
if custom_password_store:
conn.password = custom_password_store(conn)
else:
conn.password = self.password
return str(conn)
@property
def sql_url(self) -> str:
return f"/superset/sql/{self.id}/"
@hybrid_property
def perm(self) -> str:
return f"[{self.database_name}].(id:{self.id})"
@perm.expression # type: ignore
def perm(cls) -> str: # pylint: disable=no-self-argument
return (
"[" + cls.database_name + "].(id:" + expression.cast(cls.id, String) + ")"
)
def get_perm(self) -> str:
return self.perm # type: ignore
def has_table(self, table: Table) -> bool:
engine = self.get_sqla_engine()
return engine.has_table(table.table_name, table.schema or None)
def has_table_by_name(self, table_name: str, schema: Optional[str] = None) -> bool:
engine = self.get_sqla_engine()
return engine.has_table(table_name, schema)
@classmethod
def _has_view(
cls,
conn: Connection,
dialect: Dialect,
view_name: str,
schema: Optional[str] = None,
) -> bool:
view_names: List[str] = []
try:
view_names = dialect.get_view_names(connection=conn, schema=schema)
except Exception as ex: # pylint: disable=broad-except
logger.warning(ex)
return view_name in view_names
def has_view(self, view_name: str, schema: Optional[str] = None) -> bool:
engine = self.get_sqla_engine()
return engine.run_callable(self._has_view, engine.dialect, view_name, schema)
def has_view_by_name(self, view_name: str, schema: Optional[str] = None) -> bool:
return self.has_view(view_name=view_name, schema=schema)
@memoized
def get_dialect(self) -> Dialect:
sqla_url = url.make_url(self.sqlalchemy_uri_decrypted)
return sqla_url.get_dialect()()
sqla.event.listen(Database, "after_insert", security_manager.set_perm)
sqla.event.listen(Database, "after_update", security_manager.set_perm)
class Log(Model): # pylint: disable=too-few-public-methods
"""ORM object used to log Superset actions to the database"""
__tablename__ = "logs"
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey("ab_user.id"))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship(
security_manager.user_model, backref="logs", foreign_keys=[user_id]
)
dttm = Column(DateTime, default=datetime.utcnow)
duration_ms = Column(Integer)
referrer = Column(String(1024))
class FavStarClassName(str, enum.Enum):
CHART = "slice"
DASHBOARD = "Dashboard"
class FavStar(Model): # pylint: disable=too-few-public-methods
__tablename__ = "favstar"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("ab_user.id"))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=datetime.utcnow)
# events for updating tags
if is_feature_enabled("TAGGING_SYSTEM"):
sqla.event.listen(FavStar, "after_insert", FavStarUpdater.after_insert)
sqla.event.listen(FavStar, "after_delete", FavStarUpdater.after_delete)
| 35.705446
| 174
| 0.654523
|
4a02779bb071af9ef7c365c178574a6675c0e031
| 19,387
|
py
|
Python
|
pycoind/protocol/format.py
|
TemplarJQ/pycoindAnnotationVersion
|
d33b4c455c4654e0fde35500d230437210d5325e
|
[
"MIT"
] | null | null | null |
pycoind/protocol/format.py
|
TemplarJQ/pycoindAnnotationVersion
|
d33b4c455c4654e0fde35500d230437210d5325e
|
[
"MIT"
] | null | null | null |
pycoind/protocol/format.py
|
TemplarJQ/pycoindAnnotationVersion
|
d33b4c455c4654e0fde35500d230437210d5325e
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import struct
from .. import util
def parse_variable_set(data, kind):
'''Reads a set of Parsable objects prefixed with a VarInteger.
Any object can be used that supports parse(data), which returns
a tuple of (bytes_consumed, value).'''
(offset, count) = FormatTypeVarInteger.parse(data)
result = [ ]
index = 0
while index < count:
(item_length, item_obj) = kind.parse(data[offset:])
result.append(item_obj)
index += 1
offset += item_length
return (offset, result)
class ParameterException(Exception):
def __init__(self, name, value, kind = None):
if kind is None: kind = type(value)
Exception.__init__(self, "Bad Parameter: %s = %r (%s)" % (name, value, kind))
self._name = name
self._value = value
self._kind = kind
name = property(lambda s: s._name)
value = property(lambda s: s._value)
kind = property(lambda s: s._kind)
# This metaclass will convert all the (name, kind) pairs in properties into
# class properties and if the base class has a register(cls) method, call it.
class _AutoPopulateAndRegister(type):
def __init__(cls, name, bases, dct):
super(_AutoPopulateAndRegister, cls).__init__(name, bases, dct)
for (key, vt) in cls.properties:
def get_parameter(k):
return property(lambda s: s._properties[k])
setattr(cls, key, get_parameter(key))
cls._name = name
for base in bases:
if hasattr(base, 'register'):
if hasattr(base, 'do_not_register') and not base.do_not_register:
base.register(cls)
break
#import time
#profile = dict(count = 0)
class CompoundType(object):
properties = []
def __init__(self, *args, **kw):
keys = [k for (k, t) in self.properties]
# convert the positional arguments into keywords
params = dict(zip(keys, args))
# did we specify a parameter both positionally and as a keyword?
for k in kw:
if k in params:
raise TypeError('got multiple values for keyword argument %r' % k)
# do we have any unknown keywords?
keys = set(keys)
for k in kw:
if k not in keys:
raise TypeError('got an unexpected keyword argument %r' % k)
# add in the keyword arguments
params.update(kw)
# check for the correct number of properties
if len(params) != len(keys):
suffix = ''
if suffix != 1: suffix = 's'
raise TypeError("takes exactly %d argument%s (%d given)" % (len(keys), suffix, len(params)))
# verify all properties and convert to immutable types.
for (key, vt) in self.properties:
value = vt.validate(params[key])
if value is None:
raise ParameterException(key, params[key])
params[key] = value
self._properties = params
__metaclass__ = _AutoPopulateAndRegister
def binary(self):
'Returns the binary representation of the message.'
return "".join(vt.binary(self._properties[key]) for (key, vt) in self.properties)
@classmethod
def parse(cls, data):
#t0 = time.time()
kw = dict()
offset = 0
for (key, vt) in cls.properties:
try:
(length, kw[key]) = vt.parse(data[offset:])
offset += length
except Exception as e:
raise ParameterException(key, data[offset:], vt)
#dt = time.time() - t0
#if cls not in profile: profile[cls] = [0.0, 0]
#profile[cls][0] += dt
#profile[cls][1] += 1
#profile['count'] += 1
#if profile['count'] % 100000 == 0:
# print "PROFILE"
# for key in profile:
# if key == 'count': continue
# (t, c) = profile[key]
# print ' %s: %f ms/call (%d calls, %f total time)' % (key.__name__, 1000 * t / c, c, t)
# create without __init__ (would unnecessarily verify the parameters)
self = cls.__new__(cls)
self._properties = kw
return (offset, self)
def __str__(self):
output = [self._name]
for (key, vt) in self.properties:
output.append('%s=%s' % (key, vt.str(self._properties[key])))
return '<%s>' % " ".join(output)
class FormatType(object):
def validate(self, obj):
'''Returns the object to store if obj is valid for this type, otherwise
None. The type returned should be immutable.'''
raise NotImplemented()
def binary(self, obj):
'Returns the binary form for this type.'
raise NotImplemented()
def parse(self, data):
'''Returns a (length, value) tuple where length is the amount of
data that was consumed.'''
raise NotImplemented()
def str(self, obj):
return str(obj)
def __str__(self):
cls = str(self.__class__).split('.')[-1].strip(">'")
return '<%s>' % cls
class FormatTypeCompoundType(object):
expected_type = None
@classmethod
def validate(cls, obj):
if isinstance(obj, cls.expected_type):
return obj
return None
@staticmethod
def binary(obj):
return obj.binary()
@classmethod
def parse(cls, data):
return cls.expected_type.parse(data)
@classmethod
def str(cls, obj):
return str(obj)
class FormatTypeOptional(FormatType):
def __init__(self, child, default):
self._child = child
self._default = default
def validate(self, obj):
try:
value = self._child.validate(obj)
if value is not None:
return value
except Exception as e:
print(e)
return self._default
def binary(self, obj):
return self._child.binary(obj)
def parse(self, data):
try:
return self._child.parse(data)
except Exception as e:
pass
return (0, self._default)
def __str__(self):
return '<FormatTypeOptional child=%s default=%s>' % (self._child, self._default)
def str(self, obj):
return self._child.str(obj)
# Simple formats (don't use any CompoundTypes nor FormatTypes)
class FormatTypeNumber(FormatType):
"""Number format.
Allows the object type to be the expected_type (default: int) using
the endian and format to pack the value (default: little endian, signed
4-byte integer).
A tuple can be passed in for expected_type to accept multiple types.
Possible Formats:
b, B - signed, unsigned 1-byte char
i, I - signed, unsigned 4-byte integer
q, Q - signed, unsigned 8-byte integer"""
def __init__(self, format = 'i', big_endian = False, allow_float = False):
if format not in self._ranges:
raise ValueError('invalid format type: %s' % format)
self._format = {True: '>', False: '<'}[big_endian] + format
self._allow_float = allow_float
_ranges = dict(
b = (-128, 128),
B = (0, 256),
h = (-32768, 32768),
H = (0, 65536),
i = (-2147483648, 2147483648),
I = (0, 4294967296),
q = (-9223372036854775808, 9223372036854775808),
Q = (0, 18446744073709551616)
)
def validate(self, obj):
# check type
if not (self._allow_float and isinstance(obj, float)):
if self._format[1] in 'qQ':
if not isinstance(obj, (int, int)):
return None
elif not isinstance(obj, int):
return None
# check valid range
(min_value, max_value) = self._ranges[self._format[1]]
if min_value <= obj < max_value:
return obj
return None
def binary(self, obj):
return struct.pack(self._format, int(obj))
def parse(self, data):
length = dict(b = 1, h = 2, i = 4, q = 8)[self._format.lower()[-1]]
return (length, struct.unpack(self._format, data[:length])[0])
def __str__(self):
return '<FormatTypeNumber format=%s>' % (self._format, self._expected_type)
class FormatTypeVarInteger(FormatType):
@staticmethod
def validate(obj):
if isinstance(obj, int):
return obj
return None
@staticmethod
def binary(obj):
if obj < 0xfd:
return struct.pack('<B', obj)
elif obj < 0xffff:
return chr(0xfd) + struct.pack('<H', obj)
elif obj < 0xffffffff:
return chr(0xfe) + struct.pack('<I', obj)
return chr(0xff) + struct.pack('<Q', obj)
@staticmethod
def parse(data):
value = ord(data[0])
if value == 0xfd:
return (3, struct.unpack('<H', data[1:3])[0])
elif value == 0xfe:
return (5, struct.unpack('<I', data[1:5])[0])
elif value == 0xfd:
return (9, struct.unpack('<Q', data[1:9])[0])
return (1, value)
def str(self, obj):
return str(obj)
# @TODO: test ipv6...
class FormatTypeIPAddress(FormatType):
@staticmethod
def _ipv4_groups(obj):
# convert each group to its value
try:
groups = map(int, obj.split('.'))
except ValueError as e:
return None
# too many or not enough groups
if len(groups) != 4:
return None
# is each group in the correct range?
for group in groups:
if not (0x00 <= group <= 0xff):
return None
return groups
@staticmethod
def _ipv6_groups(obj):
# multiple double-colons or more than 8 groups; bad address
objs = obj.split(':')
if objs.count('') > 1 or len(objs) > 8:
return None
# calculate each group's value
groups = [ ]
for group in objs:
if group == '':
groups.extend([ 0 ] * (8 - len(objs)))
else:
groups.append(int(group, 16))
# is each group in the correct range?
for group in groups:
if not (0x0000 <= group <= 0xffff):
return None
return groups
@staticmethod
def validate(obj):
if not isinstance(obj, str):
return None
if FormatTypeIPAddress._ipv4_groups(obj) is not None:
return obj
if FormatTypeIPAddress._ipv6_groups(obj) is not None:
return obj
return None
@staticmethod
def parse(data):
if data[0:10] == (chr(0) * 10) and data[10:12] == (chr(255) * 2):
return (16, '.'.join(str(i) for i in struct.unpack('>BBBB', data[12:16])))
return (16, ':'.join(("%x" % i) for i in struct.unpack('>HHHHHHHH', data[:16])))
def binary(self, obj):
groups = self._ipv4_groups(obj)
if groups is not None:
return (chr(0) * 10) + (chr(255) * 2) + struct.pack('>BBBB', * groups)
groups = self._ipv6_groups(obj)
if groups is not None:
return struct.pack('>HHHHHHHH', *groups)
raise Exception('should not be able to reach here')
class FormatTypeBytes(FormatType):
'''String format.
Allows the object to be a fixed length string.'''
def __init__(self, length):
self._length = length
def validate(self, obj):
if isinstance(obj, str) and len(obj) == self._length:
return obj
return None
def binary(self, obj):
return obj
def parse(self, data):
return (self._length, data[:self._length])
def str(self, obj):
return '0x' + obj.encode('hex')
def __str__(self):
return '<FormatTypeBytes length=%d>' % self._length
class FormatTypeVarString(FormatType):
'''VarString format.
The parameter must be a string, but may have variable length.'''
@staticmethod
def validate(obj):
if isinstance(obj, str):
return obj
return None
@staticmethod
def binary(obj):
return FormatTypeVarInteger.binary(len(obj)) + obj
@staticmethod
def parse(data):
(vl, length) = FormatTypeVarInteger.parse(data)
obj = data[vl:vl + length]
return (vl + len(obj), obj)
def str(self, obj):
return repr(obj)
class FormatTypeArray(FormatType):
'''Array format.
The properties must be an array of objects, each of child_type. If
min_length is specified, the array must contain at least that many
children.
A tuple is returned to ensure the structure is immutable.'''
def __init__(self, child_type, min_length = None, max_length = None):
self._child_type = child_type
self._min_length = min_length
self._max_length = max_length
def validate(self, obj):
if not isinstance(obj, (list, tuple)):
return None
if self._min_length and len(obj) < self._min_length:
return None
if self._max_length and len(obj) > self._max_length:
return None
obj = [self._child_type.validate(o) for o in obj]
if None in obj:
return None
return tuple(obj)
def binary(self, obj):
return (FormatTypeVarInteger.binary(len(obj)) +
"".join(self._child_type.binary(o) for o in obj))
def parse(self, data):
return parse_variable_set(data, self._child_type)
def str(self, obj):
return "[%s]" % ", ".join(self._child_type.str(o) for o in obj)
def __str__(self):
return '<FormatTypeArray child=%s length=[%s, %s]>' % (self._child_type, self._min_length, self._max_length)
#class FormatTypeRemaining(FormatType):
# def validate(self, obj):
# if isinstance(obj, str):
# return obj
# return None
#
# def binary(self, obj):
# return obj
#
# def parse(self, data):
# return (len(data), data)
#
# def str(self, obj):
# return '0x' + obj.encode('hex')
# Network Address types and format
class NetworkAddress(CompoundType):
properties = [
('timestamp', FormatTypeNumber('I', allow_float = True)),
('services', FormatTypeNumber('Q')),
('address', FormatTypeIPAddress()),
('port', FormatTypeNumber('H', big_endian = True)),
]
class FormatTypeNetworkAddress(FormatTypeCompoundType):
'''NetowrkAddress format.
The properties must be a NetworkAddress.'''
expected_type = NetworkAddress
class FormatTypeNetworkAddressWithoutTimestamp(FormatTypeNetworkAddress):
'''NetowrkAddress format.
The properties must be a NetworkAddress. The timestamp will be zero
when deserialized and will be ommitted when serialized'''
@classmethod
def parse(cls, data):
(vl, obj) = FormatTypeNetworkAddress.parse((chr(0) * 4) + data)
return (vl - 4, obj)
def binary(self, obj):
return FormatTypeNetworkAddress.binary(obj)[4:]
# Inventory Vectors type and format
class InventoryVector(CompoundType):
properties = [
('object_type', FormatTypeNumber('I')),
('hash', FormatTypeBytes(32)),
]
class FormatTypeInventoryVector(FormatTypeCompoundType):
'''InventoryVector format.
The properties must be an InventoryVector.'''
expected_type = InventoryVector
# Txn types and formats
class OutPoint(CompoundType):
properties = [
('hash', FormatTypeBytes(32)),
('index', FormatTypeNumber('I')),
]
def __hash__(self):
return hash((self.hash, self.index))
def __eq__(self, other):
if not isinstance(other, OutPoint):
return False
return (self.hash == other.hash) and (self.index == other.index)
class FormatTypeOutPoint(FormatTypeInventoryVector):
expected_type = OutPoint
class TxnIn(CompoundType):
properties = [
('previous_output', FormatTypeOutPoint()),
('signature_script', FormatTypeVarString()),
('sequence', FormatTypeNumber('I')),
]
class FormatTypeTxnIn(FormatTypeCompoundType):
"""TxnIn format.
The properties must be a TxnIn."""
expected_type = TxnIn
class TxnOut(CompoundType):
properties = [
('value', FormatTypeNumber('q')),
('pk_script', FormatTypeVarString()),
]
class FormatTypeTxnOut(FormatTypeCompoundType):
'''TxnOut format.
The properties must be a TxnOut.'''
expected_type = TxnOut
class Txn(CompoundType):
properties = [
('version', FormatTypeNumber('I')),
('tx_in', FormatTypeArray(FormatTypeTxnIn, 1)),
('tx_out', FormatTypeArray(FormatTypeTxnOut, 1)),
('lock_time', FormatTypeNumber('I')),
]
@property
def hash(self):
if '__hash' not in self._properties:
self._properties['__hash'] = util.sha256d(self.binary())
return self._properties['__hash']
class FormatTypeTxn(FormatTypeInventoryVector):
'''Txn format.
The properties must be a Txn.'''
expected_type = Txn
# Block Header type and format
class BlockHeader(CompoundType):
properties = [
('version', FormatTypeNumber('I')),
('prev_block', FormatTypeBytes(32)),
('merkle_root', FormatTypeBytes(32)),
('timestamp', FormatTypeNumber('I', allow_float = True)),
('bits', FormatTypeNumber('I')),
('nonce', FormatTypeNumber('I')),
('txn_count', FormatTypeVarInteger()),
]
@staticmethod
def from_block(block):
return BlockHeader(block.version, block.previous_hash,
block.merkle_root, block.timestamp,
block.bits, block.nonce,
len(block.transactions))
@property
def hash(self):
if '__hash' not in self._properties:
self._properties['__hash'] = util.sha256d(self.binary()[:80])
return self._properties['__hash']
class FormatTypeBlockHeader(FormatTypeInventoryVector):
'''BlockHeader format.
The properties must be a BlockHeader.'''
expected_type = BlockHeader
| 27.695714
| 116
| 0.598803
|
4a0277d8203d3352a90dbb6cb0beac6b99cf85e5
| 598
|
py
|
Python
|
Mission_to_Mars/app.py
|
cceni03/Web-Scraping-Challenge
|
646da11477fbbfd9aa4a1c9900dd80d0b28199d5
|
[
"ADSL"
] | null | null | null |
Mission_to_Mars/app.py
|
cceni03/Web-Scraping-Challenge
|
646da11477fbbfd9aa4a1c9900dd80d0b28199d5
|
[
"ADSL"
] | null | null | null |
Mission_to_Mars/app.py
|
cceni03/Web-Scraping-Challenge
|
646da11477fbbfd9aa4a1c9900dd80d0b28199d5
|
[
"ADSL"
] | null | null | null |
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
# Create Connection Variable
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
@app.route("/")
def index():
mars_dict = mongo.db.mars_dict.find_one()
return render_template("index.html", mars=mars_dict)
@app.route("/scrape")
def scrape():
mars_dict = mongo.db.mars_dict
mars_data = scrape_mars.scrape()
mars_dict.update({}, mars_data, upsert=True)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
| 24.916667
| 62
| 0.720736
|
4a0278914e11430cad7015bb090aaf823350d284
| 16,812
|
py
|
Python
|
toontown/minigame/DropPlacer.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/minigame/DropPlacer.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/minigame/DropPlacer.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from direct.showbase.RandomNumGen import RandomNumGen
from . import CatchGameGlobals
from . import DropScheduler
from toontown.parties.PartyGlobals import CatchActivityDuration as PartyCatchDuration
class DropPlacer:
def __init__(self, game, numPlayers, dropTypes, startTime = None):
self.game = game
self.numPlayers = numPlayers
self.dropTypes = dropTypes
self.dtIndex = 0
self._createScheduler(startTime)
self._createRng()
def _createScheduler(self, startTime):
self.scheduler = DropScheduler.DropScheduler(CatchGameGlobals.GameDuration, self.game.FirstDropDelay, self.game.DropPeriod, self.game.MaxDropDuration, self.game.FasterDropDelay, self.game.FasterDropPeriodMult, startTime=startTime)
def _createRng(self):
self.rng = self.game.randomNumGen
def skipPercent(self, percent):
numSkips = self.scheduler.skipPercent(percent)
self.dtIndex += numSkips
return numSkips
def doneDropping(self, continuous = None):
return self.scheduler.doneDropping(continuous)
def getDuration(self):
return self.scheduler.getDuration()
def getT(self):
return self.scheduler.getT()
def stepT(self):
self.scheduler.stepT()
def getNextDropTypeName(self):
if self.dtIndex >= len(self.dropTypes):
self.game.notify.debug('warning: defaulting to anvil')
typeName = 'anvil'
else:
typeName = self.dropTypes[self.dtIndex]
self.dtIndex += 1
return typeName
def getRandomColRow(self):
col = self.rng.randrange(0, self.game.DropColumns)
row = self.rng.randrange(0, self.game.DropRows)
return [col, row]
def getNextDrop(self):
raise RuntimeError('DropPlacer.getNextDrop should never be called')
class RandomDropPlacer(DropPlacer):
def __init__(self, game, numPlayers, dropTypes, startTime = None):
DropPlacer.__init__(self, game, numPlayers, dropTypes, startTime=startTime)
def getNextDrop(self):
col, row = self.getRandomColRow()
drop = [self.getT(), self.getNextDropTypeName(), [col, row]]
self.stepT()
return drop
class RegionDropPlacer(DropPlacer):
DropRegionTables = [[[1,
1,
2,
3,
3],
[1,
1,
2,
3,
3],
[0,
1,
2,
3,
4],
[0,
1,
2,
3,
4],
[0,
1,
2,
3,
4]],
[[1,
2,
2,
3,
3,
4],
[1,
1,
2,
3,
4,
4],
[1,
1,
2,
3,
4,
4],
[0,
1,
2,
3,
4,
5],
[0,
1,
2,
3,
4,
5],
[0,
1,
2,
3,
4,
5]],
[[1,
1,
2,
2,
2,
3,
3],
[1,
1,
2,
2,
2,
3,
3],
[0,
1,
2,
2,
2,
3,
4],
[0,
1,
2,
2,
2,
3,
4],
[0,
1,
2,
2,
2,
3,
4],
[0,
1,
2,
2,
2,
3,
4],
[0,
1,
2,
2,
2,
3,
4]],
[[1,
2,
2,
5,
6,
7,
7,
3],
[1,
1,
2,
5,
6,
7,
3,
3],
[0,
1,
2,
5,
6,
7,
3,
4],
[0,
1,
2,
5,
6,
7,
3,
4],
[0,
1,
2,
5,
6,
7,
3,
4],
[0,
1,
2,
5,
6,
7,
3,
4],
[0,
1,
2,
5,
6,
7,
3,
4],
[0,
0,
1,
5,
6,
3,
4,
4]],
[[1,
2,
2,
5,
8,
6,
7,
7,
3],
[1,
1,
2,
5,
8,
6,
7,
3,
3],
[0,
1,
2,
5,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
6,
7,
3,
4],
[0,
0,
1,
5,
8,
6,
3,
4,
4]],
[[1,
2,
2,
5,
8,
8,
6,
7,
7,
3],
[1,
1,
2,
5,
8,
8,
6,
7,
3,
3],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
8,
6,
7,
3,
4],
[0,
0,
1,
5,
8,
8,
6,
3,
4,
4]],
[[1,
2,
2,
5,
8,
10,
9,
6,
7,
7,
3],
[1,
1,
2,
5,
8,
10,
9,
6,
7,
3,
3],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
9,
6,
7,
3,
4],
[0,
0,
1,
5,
8,
10,
9,
6,
3,
4,
4]],
[[1,
2,
2,
5,
8,
10,
10,
9,
6,
7,
7,
3],
[1,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
3],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
10,
9,
6,
7,
3,
4],
[0,
0,
1,
5,
8,
10,
10,
9,
6,
3,
4,
4]],
[[1,
2,
2,
5,
8,
10,
11,
12,
9,
6,
7,
7,
3],
[1,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
3],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
1,
2,
5,
8,
10,
11,
12,
9,
6,
7,
3,
4],
[0,
0,
1,
5,
8,
10,
11,
12,
9,
6,
3,
4,
4]]]
Players2dropTable = [DropRegionTables[0],
DropRegionTables[0],
DropRegionTables[0],
DropRegionTables[1],
DropRegionTables[1],
DropRegionTables[2],
DropRegionTables[3],
DropRegionTables[3],
DropRegionTables[4],
DropRegionTables[4],
DropRegionTables[5],
DropRegionTables[5],
DropRegionTables[5],
DropRegionTables[6],
DropRegionTables[6],
DropRegionTables[7],
DropRegionTables[7],
DropRegionTables[7],
DropRegionTables[8],
DropRegionTables[8]]
@classmethod
def getDropRegionTable(cls, numPlayers):
return cls.Players2dropTable[min(len(cls.Players2dropTable) - 1, numPlayers)]
def __init__(self, game, numPlayers, dropTypes, startTime = None):
DropPlacer.__init__(self, game, numPlayers, dropTypes, startTime=startTime)
self.DropRegionTable = self.getDropRegionTable(self.numPlayers)
self.DropRegion2GridCoordList = {}
for row in range(len(self.DropRegionTable)):
rowList = self.DropRegionTable[row]
for column in range(len(rowList)):
region = rowList[column]
if region not in self.DropRegion2GridCoordList:
self.DropRegion2GridCoordList[region] = []
self.DropRegion2GridCoordList[region].append([row, column])
self.DropRegions = list(self.DropRegion2GridCoordList.keys())
self.DropRegions.sort()
self.emptyDropRegions = self.DropRegions[:]
self.fallingObjs = []
def getNextDrop(self):
t = self.getT()
while len(self.fallingObjs):
landTime, dropRegion = self.fallingObjs[0]
if landTime > t:
break
self.fallingObjs = self.fallingObjs[1:]
if dropRegion not in self.emptyDropRegions:
self.emptyDropRegions.append(dropRegion)
candidates = self.emptyDropRegions
if len(candidates) == 0:
candidates = self.DropRegions
dropRegion = self.rng.choice(candidates)
row, col = self.rng.choice(self.DropRegion2GridCoordList[dropRegion])
dropTypeName = self.getNextDropTypeName()
drop = [t, dropTypeName, [row, col]]
duration = self.game.BaselineDropDuration
self.fallingObjs.append([t + duration, dropRegion])
if dropRegion in self.emptyDropRegions:
self.emptyDropRegions.remove(dropRegion)
self.stepT()
return drop
class PartyRegionDropPlacer(RegionDropPlacer):
def __init__(self, game, numPlayers, generationId, dropTypes, startTime = None):
self.generationId = generationId
RegionDropPlacer.__init__(self, game, numPlayers, dropTypes, startTime=startTime)
def _createRng(self):
self.rng = RandomNumGen(self.generationId + self.game.doId)
def _createScheduler(self, startTime):
self.scheduler = DropScheduler.ThreePhaseDropScheduler(PartyCatchDuration, self.game.FirstDropDelay, self.game.DropPeriod, self.game.MaxDropDuration, self.game.SlowerDropPeriodMult, self.game.NormalDropDelay, self.game.FasterDropDelay, self.game.FasterDropPeriodMult, startTime=startTime)
class PathDropPlacer(DropPlacer):
def __init__(self, game, numPlayers, dropTypes, startTime = None):
DropPlacer.__init__(self, game, numPlayers, dropTypes, startTime=startTime)
self.moves = [[0, -1],
[1, -1],
[1, 0],
[1, 1],
[0, 1],
[-1, 1],
[-1, 0],
[-1, -1]]
self.paths = []
for i in range(self.numPlayers):
dir = self.rng.randrange(0, len(self.moves))
col, row = self.getRandomColRow()
path = {'direction': dir,
'location': [col, row]}
self.paths.append(path)
self.curPathIndex = 0
def getValidDirection(self, col, row, dir):
redirectTop = [(6, 2),
2,
2,
3,
4,
5,
6,
6]
redirectRight = [0,
0,
(0, 4),
4,
4,
5,
6,
7]
redirectBottom = [0,
1,
2,
2,
(2, 6),
6,
6,
7]
redirectLeft = [0,
1,
2,
3,
4,
4,
(4, 0),
0]
redirectTopRight = [6,
(6, 4),
4,
4,
4,
5,
6,
6]
redirectBottomRight = [0,
0,
0,
(0, 6),
6,
6,
6,
7]
redirectBottomLeft = [0,
1,
2,
2,
2,
(2, 0),
0,
0]
redirectTopLeft = [2,
2,
2,
3,
4,
4,
4,
(4, 2)]
tables = [None,
redirectTop,
redirectBottom,
None,
redirectLeft,
redirectTopLeft,
redirectBottomLeft,
None,
redirectRight,
redirectTopRight,
redirectBottomRight]
if col == 0:
colIndex = 1
elif col == self.game.DropColumns - 1:
colIndex = 2
else:
colIndex = 0
if row == 0:
rowIndex = 1
elif row == self.game.DropRows - 1:
rowIndex = 2
else:
rowIndex = 0
index = (colIndex << 2) + rowIndex
redirectTable = tables[index]
if not redirectTable:
return dir
newDir = redirectTable[dir]
if type(newDir) != type(1):
newDir = self.rng.choice(newDir)
return newDir
def getNextDrop(self):
path = self.paths[self.curPathIndex]
col, row = path['location']
dir = path['direction']
turns = [-1,
0,
0,
1]
turn = self.rng.choice(turns)
if turn:
dir = (dir + turn) % len(self.moves)
dir = self.getValidDirection(col, row, dir)
dCol, dRow = self.moves[dir]
col += dCol
row += dRow
col = min(max(col, 0), self.game.DropColumns - 1)
row = min(max(row, 0), self.game.DropRows - 1)
path['location'] = [col, row]
path['direction'] = dir
self.curPathIndex = (self.curPathIndex + 1) % len(self.paths)
drop = [self.getT(), self.getNextDropTypeName(), [col, row]]
self.stepT()
return drop
| 15.552266
| 296
| 0.353616
|
4a027906c171767165b39bd29c637b396c765a47
| 217
|
py
|
Python
|
helpingnetwork/organization/admin.py
|
neopentane/Techathon_19
|
defaea24e55bdc4a26138a7a8c8813f5b12156f2
|
[
"MIT"
] | null | null | null |
helpingnetwork/organization/admin.py
|
neopentane/Techathon_19
|
defaea24e55bdc4a26138a7a8c8813f5b12156f2
|
[
"MIT"
] | 22
|
2020-02-12T00:01:41.000Z
|
2022-03-11T23:44:42.000Z
|
helpingnetwork/organization/admin.py
|
neopentane/Techathon_19
|
defaea24e55bdc4a26138a7a8c8813f5b12156f2
|
[
"MIT"
] | 2
|
2019-04-03T18:01:54.000Z
|
2019-04-04T03:57:55.000Z
|
from django.contrib import admin
from .models import Organization,OrganizationImages
admin.site.register(Organization)
#admin.site.register(Event)
#admin.site.register(Signup)
admin.site.register(OrganizationImages)
| 27.125
| 51
| 0.843318
|
4a0279a5efc8435ccfc41d688a41f9a7bca8d8ff
| 632
|
py
|
Python
|
backend/manage.py
|
mabdullahabid/test-app-1-1
|
9fe7689c8e6734db626dc8fd790cdb9397475ff2
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
mabdullahabid/test-app-1-1
|
9fe7689c8e6734db626dc8fd790cdb9397475ff2
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
mabdullahabid/test-app-1-1
|
9fe7689c8e6734db626dc8fd790cdb9397475ff2
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_app_1_1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.727273
| 76
| 0.685127
|
4a027a0a3a4cecf3908949c11ba72ee8e3c01e29
| 8,629
|
py
|
Python
|
preprocessing.py
|
tomjoju/Fast-AI-Disaster-Tweet-Classifier
|
589aa763296a98fde6f9c9eb1cba7b0fecd330aa
|
[
"MIT"
] | 3
|
2020-07-17T14:55:15.000Z
|
2020-07-26T03:03:27.000Z
|
preprocessing.py
|
blackonyyx/Fast-AI-Disaster-Tweet-Classifier
|
589aa763296a98fde6f9c9eb1cba7b0fecd330aa
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
blackonyyx/Fast-AI-Disaster-Tweet-Classifier
|
589aa763296a98fde6f9c9eb1cba7b0fecd330aa
|
[
"MIT"
] | 2
|
2020-07-30T14:24:54.000Z
|
2020-08-18T05:04:51.000Z
|
"""
preprocessing.py
Contains preprocessing methods to apply on the dataset.
Dependencies
!pip install numpy pandas pyspellchecker spacy nltk
!python -m spacy download en_core_web_sm
"""
import string
import numpy as np
import pandas as pd
import re
from spellchecker import SpellChecker
spell = SpellChecker()
import spacy
nlp = spacy.load("en_core_web_sm")
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
stop = stopwords.words('english')
emoticons_happy = {':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)', '<3'}
emoticons_sad = {':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<', ':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c', ':c', ':{', '>:\\', ';('}
emotes = emoticons_happy.union(emoticons_sad)
# ===== TRANSFORM METHODS =====
def tokenize(text):
"""Given string, apply Spacy's nlp then return list of text"""
return [token.text for token in nlp(text)]
def spellcorrect(text):
"""Given string, list-split, apply SpellChecker's correction,
return space-delimited list and no. of misspelt words"""
original_text = text.split()
corrected_text = [spell.correction(word) for word in original_text]
return " ".join(corrected_text)
def remove_url(text):
"""Given string, remove url by regex."""
# url = re.compile(r'https?://\S+|www\.\S+') # Axel
url = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') # Tom
return url.sub(r'',text)
def remove_html(text):
"""Given string, remove html by regex."""
html=re.compile(r'<.*?>')
return html.sub(r'',text)
def remove_digits(text):
"""Given string, remove digits."""
text = ''.join([i for i in text if not i.isdigit()])
return text
def remove_punctuations(text):
"""Given string, remove punctuations."""
table=str.maketrans('','',string.punctuation)
return text.translate(table)
def transform_lower_chars(text):
"""Given string, transform into lower characters."""
return str(text).lower()
def remove_emojis(text):
"""Given text, remove emojis."""
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
# ===== COLLECT METHODS =====
def collect_url(string):
"""Given string, collect urls by regex"""
text = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',string)
return "".join(text)
def collect_stopwords(tokens):
"""Given list of words, collect only NLTK stopwords"""
return [token for token in tokens if token in stop]
def collect_punctuations(text):
"""Given list of words, collect only string punctuations"""
return [c for c in text if c in string.punctuation]
def collect_digits(text):
"""Given string, collect only digits"""
return " ".join([c for c in text if c.isdigit()])
def collect_uppercase_words(tokens):
"""Given list of tokens, collect only uppercase words"""
return [1 for token in tokens if token.isupper()]
def collect_uppercase_chars(text):
"""Given string, collect only uppercase characters"""
return [1 for c in text if c.isupper()]
def collect_url(string):
"""Given string, collect urls by regex."""
text = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',string)
return "".join(text)
def collect_at_mentions(text):
"""Given string, collect @mentions by regex."""
line=re.findall(r'(?<=@)\w+',text)
return " ".join(line)
def collect_hashtags(text):
"""Given string, collect #hashtags by regex."""
line=re.findall(r'(?<=#)\w+',text)
return " ".join(line)
def collect_numbers(text):
"""Given string, collect raw numbers by regex."""
line=re.findall(r'[0-9]+',text)
return " ".join(line)
def collect_entities(text):
"""Given list of tokens, collect entities using Spacy."""
return [token.text for token in nlp(text).ents]
# ===== NUMERIC METHODS =====
def num_words(tokens):
"""Given list of words, return no. of words (int)"""
return len(tokens)
def num_chars(text):
"""Given string, return no. of characters (int)"""
return len(text)
def num_stopwords(tokens):
"""Given list of words, return no. of NLTK stopwords (int)"""
return len(collect_stopwords(tokens))
def num_special_chars(text):
"""Given string, return no. of punctuation characters (int)"""
return len(collect_punctuations(text))
def num_numeric(text):
"""Given string, return no. of digits (int)"""
return len(collect_digits(text))
def num_uppercase_words(tokens):
"""Given list of words, return no. of uppercase words (int)"""
return len(collect_uppercase_words(tokens))
def num_uppercase_chars(text):
"""Given string, return no. of uppercase characters (int)"""
return len(collect_uppercase_chars(text))
def num_misspelt_words(text):
"""Given string, return no. of misspelt words."""
original_text = text.split()
corrected_text = spellcorrect(text)
return sum([1 for o, c in zip(original_text, corrected_text) if o != c])
def num_entities(text):
"""Given text, get no. of entities."""
return len(collect_entities(text))
# ===== DERIVED FEATURES =====
def sum_word_len(tokens):
"""Given list of words, return sum of length of words (int)"""
return sum([len(token) for token in tokens])
def avg_word_len(tokens):
"""Given list of words, return average word length (int)"""
return sum_word_len(tokens) / num_words(tokens)
def ratio_uppercase_chars(text):
"""Given text, return ratio of uppercase words (float)"""
return num_uppercase_chars(text) / num_chars(text)
# ===== BOOLEAN METHODS =====
def is_emote(tokens):
return [1 for token in tokens if token in emotes]
"""
preprocess(df) creates columns of preprocessed data in the DataFrame in-place.
"""
def preprocess(df):
# Transformations
df['text'] = df['text'].apply(remove_html)
df['num_misspelt_words'] = df['text'].apply(num_misspelt_words)
df['text'] = df['text'].apply(spellcorrect)
df['location'].fillna(0, inplace=True)
df['keyword'].fillna(0, inplace=True)
# Feature creation
df['tokens'] = df['text'].apply(tokenize)
df['url'] = df['text'].apply(collect_url)
df['at_mentions'] = df['text'].apply(collect_at_mentions)
df['hashtags'] = df['text'].apply(collect_hashtags)
df['numbers'] = df['text'].apply(collect_numbers)
df['digits'] = df['text'].apply(collect_digits)
# Numeric features
df['num_special_chars'] = df['text'].apply(num_special_chars)
df['num_chars'] = df['text'].apply(num_chars)
df['num_words'] = df['tokens'].apply(num_words)
df['num_stopwords'] = df['tokens'].apply(num_stopwords)
df['num_numeric'] = df['text'].apply(num_numeric)
df['num_uppercase_words'] = df['tokens'].apply(num_uppercase_words)
df['num_uppercase_chars'] = df['text'].apply(num_uppercase_chars)
df['length'] = df['text'].apply(len)
df['num_hashtags'] = df['text'].apply(lambda x: len([c for c in str(x) if c == '#']))
df['num_mentions'] = df['text'].apply(lambda x: len([c for c in str(x) if c == '@']))
df['count_capital_letters'] = df['text'].apply(lambda x: len(re.findall(r'[A-Z]', x)))
df['ratio_capital_letters'] = df['length'] / df['count of capital letters']
df['external_url'] = df['text'].apply(collect_url)
df['num_entities'] = df['text'].apply(num_entities)
# Derived features
df['sum_word_len'] = df['tokens'].apply(sum_word_len)
df['avg_word_len'] = df['tokens'].apply(avg_word_len)
df['ratio_uppercase_chars'] = df['text'].apply(ratio_uppercase_chars)
# Final text cleaning
df['text'] = df['text'].apply(remove_url)
df['text'] = df['text'].apply(transform_lower_chars)
df['text'] = df['text'].apply(remove_digits)
df['text'] = df['text'].apply(remove_punctuations)
df['text'] = df['text'].apply(remove_emojis)
| 37.193966
| 320
| 0.618959
|
4a027a6658cf463a27ae6b51a4100b2f294871f3
| 7,668
|
py
|
Python
|
source/assetmgr_hiero/utils/defaults.py
|
IngenuityEngine/ftrack-connect-foundry
|
a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d
|
[
"MIT"
] | 1
|
2019-10-22T06:33:08.000Z
|
2019-10-22T06:33:08.000Z
|
source/assetmgr_hiero/utils/defaults.py
|
IngenuityEngine/ftrack-connect-foundry
|
a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d
|
[
"MIT"
] | null | null | null |
source/assetmgr_hiero/utils/defaults.py
|
IngenuityEngine/ftrack-connect-foundry
|
a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d
|
[
"MIT"
] | null | null | null |
import hiero.core
import FnAssetAPI
from FnAssetAPI.core.decorators import debugStaticCall
from . import _utils
from . import entity as entityUtils
from . import tag as tagUtils
from .. import constants
kTrackItemTimingOptionsKey = "trackItemTimingsOptions"
def managerSpecificKey(key):
"""
Produces a manager-localised key for persistent options, etc..
"""
manager = FnAssetAPI.SessionManager.currentManager()
if not manager:
return key
identifier = manager.getIdentifier()
# Hiero doesn't seem to like dots in tag names
safeIdentifier = identifier.replace(".", "-")
localizedKey = "%s_%s" % (key, safeIdentifier)
return localizedKey
@debugStaticCall
def getDefaultParentEntityForProjects(projects, context):
if not projects:
return None
projects = _utils.ensureList(projects)
session = FnAssetAPI.SessionManager.currentSession()
if not session:
return None
manager = session.currentManager()
if not manager:
return
try:
# 1) Check the last used parent (stored on the somewhere randomly in memory
# so we don't dirty the document state)
field = constants.kHieroField_defaultProjectParent
ref = tagUtils.getTemporaryAssetTagField(projects[0], field, None)
if ref:
return session.getEntity(ref, context)
# 2) Look on our projects to see if any have been published already
# or if there are any entities in the project
parent = entityUtils.getFirstParentOfSomeEntity(projects, context)
if parent:
return parent
# 3) Ask the Asset Manager if it has any opinion
with context.scopedOverride():
context.access = context.kWriteMultiple
spec = FnAssetAPI.specifications.HieroProjectSpecification()
entity = manager.getDefaultEntity(spec, context)
if entity:
return entity
except Exception as e:
FnAssetAPI.logging.debug("Exception trying to find default parent "+
"entity for Project: %s" % e)
return None
@debugStaticCall
def setDefaultParentEntityForProjects(entity, projects):
for p in projects:
ref = entity.reference
field = constants.kHieroField_defaultProjectParent
tagUtils.setTemporaryAssetTagField(p, field, ref)
@debugStaticCall
def getDefaultParentEntityForClips(objs, context):
if not objs:
return None
objs = _utils.ensureList(objs)
session = FnAssetAPI.SessionManager.currentSession()
if not session:
return None
manager = session.currentManager()
if not manager:
return
try:
# 1) Check the last used parent (stored on the project)
project = objs[0].project() if hasattr(objs[0], 'project') else None
sequence = objs[0].parentSequence() if hasattr(objs[0], 'parentSequence') else None
for o in (sequence, project):
if o:
field = constants.kHieroField_defaultClipParent
ref = tagUtils.getAssetTagField(o, field, None)
if ref:
return session.getEntity(ref, context)
# 2) Look for some entity that we know about (prob a clip or a project)
parent = entityUtils.getFirstParentOfSomeEntity(objs, context)
if parent:
return parent
# 3) Finally ask the Asset Manager if it has any opinion
with context.scopedOverride():
context.access = context.kWriteMultiple
spec = FnAssetAPI.specifications.ImageSpecification()
entity = manager.getDefaultEntity(spec, context)
if entity:
return entity
except Exception as e:
FnAssetAPI.logging.debug("Exception trying to find default parent "+
"entity for Clips: %s" % e)
return None
@debugStaticCall
def setDefaultParentEntityForClips(entity, objs):
if not objs:
return
objs = _utils.ensureList(objs)
targetObj = None
if hasattr(objs[0], 'parentSequence'):
targetObj = objs[0].parentSequence()
else:
targetObj = objs[0].project() if hasattr(objs[0], 'project') else None
if targetObj:
ref = entity.reference
field = constants.kHieroField_defaultClipParent
tagUtils.setAssetTagField(targetObj, field, ref)
@debugStaticCall
def getDefaultParentEntityForShots(trackItemsOrSeq, context):
if not trackItemsOrSeq:
return None
trackItems = _utils.ensureList(trackItemsOrSeq)
session = FnAssetAPI.SessionManager.currentSession()
if not session:
return None
manager = session.currentManager()
if not manager:
return
try:
# 1) Check the last used parent (stored on a sequence)
sequence = trackItems[0]
if isinstance(sequence, hiero.core.TrackItem):
sequence = sequence.parentSequence()
if sequence:
field = constants.kHieroField_defaultShotParent
ref = tagUtils.getAssetTagField(sequence, field, None)
if ref:
return session.getEntity(ref, context)
# 2) Try and find a parent based on any entities in the selection
shot = None
relationship = FnAssetAPI.specifications.ParentGroupingRelationship()
shotEntities = entityUtils.entitiesFromObjs(trackItems, sparse=False)
if shotEntities:
shot = shotEntities[0]
if not shot:
# see if we have any clip entities, and get a parent of one of them
# This is not necessarily a 'shot' (but does it matter?)
clipEntities = entityUtils.someEntitiesFromObjs(trackItems,
includeChildren=True, includeParents=False, sparse=False)
if clipEntities:
# We need an additional query for grouping parent here as we want the
# parent of the *shot* the clip is in not the parent of the clip.
shots = clipEntities[0].getRelatedEntities([relationship,], context)[0]
if shots:
shot = shots[0]
if shot:
# For now, we'll be lazy and get the parent of the first shot
parents = shot.getRelatedEntities([relationship,], context)[0]
if parents:
return parents[0]
# 3) Finally see if the manager has any opinion
with context.scopedOverride():
context.access = context.kWrite
spec = FnAssetAPI.specifications.ShotSpecification()
entity = manager.getDefaultEntity(spec, context)
if entity:
return entity
except Exception as e:
FnAssetAPI.logging.debug("Exception trying to find default parent "+
"entity for Clips: %s" % e)
return None
@debugStaticCall
def setDefaultParentEntityForShots(entity, trackItemsOrSeq):
if not trackItemsOrSeq:
return
trackItems = _utils.ensureList(trackItemsOrSeq)
sequence = trackItems[0]
if isinstance(sequence, hiero.core.TrackItem):
sequence = sequence.parentSequence()
if sequence:
ref = entity.reference
field = constants.kHieroField_defaultShotParent
tagUtils.setAssetTagField(sequence, field, ref)
@debugStaticCall
def setDefaultsInObjTag(obj, key, options):
"""
Stores the supplied options in a tag on the object using the supplied key.
"""
tagName = "%s_%s" % (tagUtils.kAssetTag, key)
tag = tagUtils.getNamedTag(obj, tagName, create=True)
if not tag:
return
data = tag.metadata()
if not data.readOnly():
for k,v in options.items():
# Hiero presently only supports string data
data.setValue(k, repr(v))
@debugStaticCall
def getDefaultsFromObjTag(obj, key):
opts = {}
tagName = "%s_%s" % (tagUtils.kAssetTag, key)
tag = tagUtils.getNamedTag(obj, tagName)
if not tag:
return opts
data = tag.metadata()
for k,v in data.dict().items():
# Hiero adds its own metadata
if k.startswith('tag.'): continue
# Hiero only supports string data so we repr'd on the way in
try:
opts[k] = eval(v)
except NameError:
# It was a string, and its lost its quotes...
opts[k] = v
return opts
| 26.905263
| 87
| 0.704747
|
4a027ae024f24dca4a3cf947e1385a03e76d4b4f
| 983
|
py
|
Python
|
app/__init__.py
|
Mutembeijoe/personal_blog
|
bab1dcff6f9b2a24b96cef20a5c57baab7433f8e
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Mutembeijoe/personal_blog
|
bab1dcff6f9b2a24b96cef20a5c57baab7433f8e
|
[
"MIT"
] | 3
|
2021-09-08T01:23:44.000Z
|
2022-03-12T00:03:29.000Z
|
app/__init__.py
|
Mutembeijoe/personal_blog
|
bab1dcff6f9b2a24b96cef20a5c57baab7433f8e
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from app.config import config_options
db = SQLAlchemy()
bcrypt = Bcrypt()
migrate = Migrate()
mail = Mail()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
migrate.init_app(app,db)
mail.init_app(app)
from app.main.routes import main
from app.users.routes import users
from app.posts.routes import posts
from app.comments.routes import comments
app.register_blueprint(main)
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(comments)
return app
| 24.575
| 55
| 0.769074
|
4a027af22d16dec478a45af668250e4da83860c0
| 504
|
py
|
Python
|
python_experiments/paper_figures_performance_study/run_all_py_scripts.py
|
CheYulin/AccTriCnt
|
fb62c9e821a453ed6027e85ec3b52d7ddc642a96
|
[
"MIT"
] | 3
|
2019-07-28T14:35:00.000Z
|
2019-07-30T12:35:39.000Z
|
python_experiments/paper_figures_performance_study/run_all_py_scripts.py
|
CheYulin/AccTriCnt
|
fb62c9e821a453ed6027e85ec3b52d7ddc642a96
|
[
"MIT"
] | null | null | null |
python_experiments/paper_figures_performance_study/run_all_py_scripts.py
|
CheYulin/AccTriCnt
|
fb62c9e821a453ed6027e85ec3b52d7ddc642a96
|
[
"MIT"
] | 1
|
2020-01-02T19:04:05.000Z
|
2020-01-02T19:04:05.000Z
|
import os
if __name__ == '__main__':
for file in ['draw_bitmap_range_filtering_optimization.py',
'draw_comparison_of_algorithms.py',
'draw_degree_skew_handling.py',
'draw_mcdram_opt.py',
'draw_multi_pass.py',
'draw_scalability_to_num_of_threads.py',
'draw_simd_optimization.py',
'draw_varying_block_size.py'
]:
os.system('~/anaconda2/bin/python ' + file)
| 36
| 63
| 0.563492
|
4a027bd5444be733fdaa5d06b10f3ac5224beeaa
| 52,859
|
py
|
Python
|
pde/grids/base.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | null | null | null |
pde/grids/base.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | null | null | null |
pde/grids/base.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | null | null | null |
"""
Bases classes
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from __future__ import annotations
import functools
import inspect
import itertools
import json
import logging
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
NamedTuple,
Sequence,
Set,
Tuple,
Union,
)
import numba as nb
import numpy as np
from numba.extending import is_jitted, register_jitable
from ..tools.cache import cached_method, cached_property
from ..tools.docstrings import fill_in_docstring
from ..tools.misc import Number, classproperty
from ..tools.numba import jit, jit_allocate_out
from ..tools.typing import CellVolume, FloatNumerical, NumberOrArray, OperatorType
if TYPE_CHECKING:
from .boundaries.axes import Boundaries, BoundariesData # @UnusedImport
PI_4 = 4 * np.pi
PI_43 = 4 / 3 * np.pi
class OperatorInfo(NamedTuple):
"""stores information about an operator"""
factory: Callable[..., OperatorType]
rank_in: int
rank_out: int
def _check_shape(shape) -> Tuple[int, ...]:
"""checks the consistency of shape tuples"""
if not hasattr(shape, "__iter__"):
shape = [shape] # support single numbers
if len(shape) == 0:
raise ValueError("Require at least one dimension")
# convert the shape to a tuple of integers
result = []
for dim in shape:
if dim == int(dim) and dim >= 1:
result.append(int(dim))
else:
raise ValueError(f"{repr(dim)} is not a valid number of support points")
return tuple(result)
def discretize_interval(
x_min: float, x_max: float, num: int
) -> Tuple[np.ndarray, float]:
r""" construct a list of equidistantly placed intervals
The discretization is defined as
.. math::
x_i &= x_\mathrm{min} + \left(i + \frac12\right) \Delta x
\quad \text{for} \quad i = 0, \ldots, N - 1
\\
\Delta x &= \frac{x_\mathrm{max} - x_\mathrm{min}}{N}
where :math:`N` is the number of intervals given by `num`.
Args:
x_min (float): Minimal value of the axis
x_max (float): Maximal value of the axis
num (int): Number of intervals
Returns:
tuple: (midpoints, dx): the midpoints of the intervals and the used
discretization `dx`.
"""
dx = (x_max - x_min) / num
return (np.arange(num) + 0.5) * dx + x_min, dx
class DomainError(ValueError):
"""exception indicating that point lies outside domain"""
pass
class DimensionError(ValueError):
"""exception indicating that dimensions were inconsistent"""
pass
class PeriodicityError(RuntimeError):
"""exception indicating that the grid periodicity is inconsistent"""
pass
class GridBase(metaclass=ABCMeta):
"""Base class for all grids defining common methods and interfaces"""
_subclasses: Dict[str, "GridBase"] = {} # all classes inheriting from this
_operators: Dict[str, OperatorInfo] = {} # all operators defined for the grid
# properties that are defined in subclasses
dim: int # int: The spatial dimension in which the grid is embedded
axes: List[str] # list: Name of all axes that are described by the grid
axes_symmetric: List[str] = []
""" list: The names of the additional axes that the fields do not depend on,
e.g. along which they are constant. """
cell_volume_data: Sequence[FloatNumerical]
coordinate_constraints: List[int] = [] # axes not described explicitly
num_axes: int
periodic: List[bool]
# mandatory, immutable, private attributes
_axes_bounds: Tuple[Tuple[float, float], ...]
_axes_coords: Tuple[np.ndarray, ...]
_discretization: np.ndarray
_shape: Tuple[int, ...]
# to help sphinx, we here list docstrings for classproperties
operators: Set[str]
""" set: names of all operators defined for this grid """
def __init__(self):
"""initialize the grid"""
self._logger = logging.getLogger(self.__class__.__name__)
def __init_subclass__(cls, **kwargs): # @NoSelf
"""register all subclassess to reconstruct them later"""
super().__init_subclass__(**kwargs)
cls._subclasses[cls.__name__] = cls
cls._operators: Dict[str, Callable] = {}
@classmethod
def from_state(cls, state: Union[str, Dict[str, Any]]) -> GridBase:
"""create a field from a stored `state`.
Args:
state (`str` or `dict`):
The state from which the grid is reconstructed. If `state` is a
string, it is decoded as JSON, which should yield a `dict`.
"""
# decode the json data
if isinstance(state, str):
state = dict(json.loads(state))
# create the instance
# create the instance of the correct class
class_name = state.pop("class")
if class_name == cls.__name__:
raise RuntimeError(f"Cannot reconstruct abstract class `{class_name}`")
grid_cls = cls._subclasses[class_name]
return grid_cls.from_state(state)
@property
def axes_bounds(self) -> Tuple[Tuple[float, float], ...]:
"""tuple: lower and upper bounds of each axis"""
return self._axes_bounds
@property
def axes_coords(self) -> Tuple[np.ndarray, ...]:
"""tuple: coordinates of the cells for each axis"""
return self._axes_coords
def get_axis_index(self, key: Union[int, str], allow_symmetric: bool = True) -> int:
"""return the index belonging to an axis
Args:
key (int or str): The index or name of an axis
allow_symmetric (bool): Whether axes with assumed symmetry are included
Returns:
int: The index of the axis
"""
if isinstance(key, str):
# determine key index from name of the axis
if allow_symmetric:
axes = self.axes + self.axes_symmetric
else:
axes = self.axes
if key in axes:
return axes.index(key)
else:
raise IndexError(f"`{key}` is not in the axes {axes}")
elif isinstance(key, int):
# assume that it is already an index
return key
raise IndexError("Index must be an integer or the name of an axes")
@property
def discretization(self) -> np.ndarray:
""":class:`numpy.array`: the linear size of a cell along each axis"""
return self._discretization
@property
def shape(self) -> Tuple[int, ...]:
"""tuple of int: the number of support points of each axis"""
return self._shape
@property
def _shape_full(self) -> Tuple[int, ...]:
"""tuple of int: number of support points including ghost points"""
return tuple(num + 2 for num in self.shape)
@property
def _idx_valid(self) -> Tuple[slice, ...]:
"""tuple: slices to extract valid data from full data"""
return tuple(slice(1, s + 1) for s in self.shape)
def _make_get_valid(self) -> Callable[[np.ndarray], np.ndarray]:
"""callable: function to extract the valid part of a full data array"""
num_axes = self.num_axes
@register_jitable
def get_valid(arr: np.ndarray) -> np.ndarray:
"""return valid part of the data (without ghost cells)"""
if num_axes == 1:
return arr[..., 1:-1] # type: ignore
elif num_axes == 2:
return arr[..., 1:-1, 1:-1] # type: ignore
elif num_axes == 3:
return arr[..., 1:-1, 1:-1, 1:-1] # type: ignore
else:
raise NotImplementedError
return get_valid # type: ignore
def _make_set_valid(self) -> Callable[[np.ndarray, np.ndarray], None]:
"""callable: function to extract the valid part of a full data array"""
num_axes = self.num_axes
@register_jitable
def set_valid(arr: np.ndarray, value: np.ndarray) -> None:
"""return valid part of the data (without ghost cells)"""
if num_axes == 1:
arr[..., 1:-1] = value
elif num_axes == 2:
arr[..., 1:-1, 1:-1] = value
elif num_axes == 3:
arr[..., 1:-1, 1:-1, 1:-1] = value
else:
raise NotImplementedError
return set_valid # type: ignore
@abstractproperty
def state(self) -> Dict[str, Any]:
pass
@property
def state_serialized(self) -> str:
"""str: JSON-serialized version of the state of this grid"""
state = self.state
state["class"] = self.__class__.__name__
return json.dumps(state)
def copy(self) -> GridBase:
"""return a copy of the grid"""
return self.__class__.from_state(self.state)
__copy__ = copy
def __deepcopy__(self, memo: Dict[int, Any]) -> GridBase:
"""create a deep copy of the grid. This function is for instance called when
a grid instance appears in another object that is copied using `copy.deepcopy`
"""
# this implementation assumes that a simple call to copy is sufficient
result = self.copy()
memo[id(self)] = result
return result
def __repr__(self) -> str:
"""return instance as string"""
args = ", ".join(str(k) + "=" + str(v) for k, v in self.state.items())
return f"{self.__class__.__name__}({args})"
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.shape == other.shape
and self.axes_bounds == other.axes_bounds
and self.periodic == other.periodic
)
def _cache_hash(self) -> int:
"""returns a value to determine when a cache needs to be updated"""
return hash(
(
self.__class__.__name__,
self.shape,
self.axes_bounds,
hash(tuple(self.periodic)),
)
)
def compatible_with(self, other: GridBase) -> bool:
"""tests whether this class is compatible with other grids.
Grids are compatible when they cover the same area with the same
discretization. The difference to equality is that compatible grids do
not need to have the same periodicity in their boundaries.
Args:
other (:class:`~pde.grids.base.GridBase`):
The other grid to test against
Returns:
bool: Whether the grid is compatible
"""
return (
self.__class__ == other.__class__
and self.shape == other.shape
and self.axes_bounds == other.axes_bounds
)
def assert_grid_compatible(self, other: GridBase) -> None:
"""checks whether `other` is compatible with the current grid
Args:
other (:class:`~pde.grids.base.GridBase`):
The grid compared to this one
Raises:
ValueError: if grids are not compatible
"""
if not self.compatible_with(other):
raise ValueError(f"Grids {self} and {other} are incompatible")
@property
def numba_type(self) -> str:
"""str: represents type of the grid data in numba signatures"""
return "f8[" + ", ".join([":"] * self.num_axes) + "]"
@cached_property()
def coordinate_arrays(self) -> Tuple[np.ndarray, ...]:
"""tuple: for each axes: coordinate values for all cells"""
return tuple(np.meshgrid(*self.axes_coords, indexing="ij"))
@cached_property()
def cell_coords(self) -> np.ndarray:
""":class:`~numpy.ndarray`: coordinate values for all axes of each cell"""
return np.moveaxis(self.coordinate_arrays, 0, -1)
@cached_property()
def cell_volumes(self) -> np.ndarray:
""":class:`~numpy.ndarray`: volume of each cell"""
vols = functools.reduce(np.outer, self.cell_volume_data)
return np.broadcast_to(vols, self.shape) # type: ignore
@cached_property()
def uniform_cell_volumes(self) -> bool:
"""bool: returns True if all cell volumes are the same"""
return all(np.asarray(vols).ndim == 0 for vols in self.cell_volume_data)
def distance_real(self, p1: np.ndarray, p2: np.ndarray) -> float:
"""Calculate the distance between two points given in real coordinates
This takes periodic boundary conditions into account if need be
Args:
p1 (:class:`~numpy.ndarray`): First position
p2 (:class:`~numpy.ndarray`): Second position
Returns:
float: Distance between the two positions
"""
diff = self.difference_vector_real(p1, p2)
return np.linalg.norm(diff, axis=-1) # type: ignore
def _iter_boundaries(self) -> Iterator[Tuple[int, bool]]:
"""iterate over all boundaries of the grid
Yields:
tuple: for each boundary, the generator returns a tuple indicating
the axis of the boundary together with a boolean value indicating
whether the boundary lies on the upper side of the axis.
"""
return itertools.product(range(self.num_axes), [True, False])
def _boundary_coordinates(self, axis: int, upper: bool) -> np.ndarray:
"""get coordinates of points on the boundary
Args:
axis (int):
The axis perpendicular to the boundary
upper (bool):
Whether the boundary is at the upper side of the axis
Returns:
:class:`~numpy.ndarray`: Coordinates of the boundary points. This array has
one less dimension than the grid has axes.
"""
# get coordinate along the axis determining the boundary
if upper:
c_bndry = np.array([self._axes_bounds[axis][1]])
else:
c_bndry = np.array([self._axes_bounds[axis][0]])
# get orthogonal coordinates
coords = tuple(
c_bndry if i == axis else self._axes_coords[i] for i in range(self.num_axes)
)
points = np.meshgrid(*coords, indexing="ij")
# assemble into array
shape_bndry = tuple(self.shape[i] for i in range(self.num_axes) if i != axis)
shape = shape_bndry + (self.num_axes,)
return np.stack(points, -1).reshape(shape)
@abstractproperty
def volume(self) -> float:
pass
@abstractmethod
def cell_to_point(self, cells: np.ndarray, cartesian: bool = True) -> np.ndarray:
pass
@abstractmethod
def point_to_cell(self, points: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def point_to_cartesian(self, points: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def point_from_cartesian(self, points: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def difference_vector_real(self, p1: np.ndarray, p2: np.ndarray):
pass
@abstractmethod
def polar_coordinates_real(
self, origin: np.ndarray, *, ret_angle: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
pass
@abstractmethod
def contains_point(self, point: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def iter_mirror_points(
self, point: np.ndarray, with_self: bool = False, only_periodic: bool = True
) -> Generator:
pass
@abstractmethod
def get_boundary_conditions(
self, bc: BoundariesData = "natural", rank: int = 0
) -> Boundaries:
pass
@abstractmethod
def get_line_data(self, data: np.ndarray, extract: str = "auto") -> Dict[str, Any]:
pass
@abstractmethod
def get_image_data(self, data: np.ndarray) -> Dict[str, Any]:
pass
@abstractmethod
def get_random_point(
self, boundary_distance: float = 0, cartesian: bool = True
) -> np.ndarray:
pass
def normalize_point(self, point: np.ndarray, reflect: bool = True) -> np.ndarray:
"""normalize coordinates by applying periodic boundary conditions
Here, the point is assumed to be specified by the physical values along
the non-symmetric axes of the grid. Normalizing points is useful to make sure
they lie within the domain of the grid. This function respects periodic
boundary conditions and can also reflect points off the boundary.
Args:
point (:class:`~numpy.ndarray`):
Coordinates of a single point
reflect (bool):
Flag determining whether coordinates along non-periodic axes are
reflected to lie in the valid range. If `False`, such coordinates are
left unchanged and only periodic boundary conditions are enforced.
Returns:
:class:`~numpy.ndarray`: The respective coordinates with periodic
boundary conditions applied.
"""
point = np.asarray(point, dtype=np.double)
if point.size == 0:
return np.zeros((0, self.num_axes))
if point.ndim == 0:
if self.num_axes > 1:
raise DimensionError(
f"Point {point} is not of dimension {self.num_axes}"
)
elif point.shape[-1] != self.num_axes:
raise DimensionError(
f"Array of shape {point.shape} does not describe points of dimension "
f"{self.num_axes}"
)
# normalize the coordinates for the periodic dimensions
bounds = np.array(self.axes_bounds)
xmin = bounds[:, 0]
xmax = bounds[:, 1]
xdim = xmax - xmin
if self.num_axes == 1:
# single dimension
if self.periodic[0]:
point = (point - xmin[0]) % xdim[0] + xmin[0]
elif reflect:
arg = (point - xmax[0]) % (2 * xdim[0]) - xdim[0]
point = xmin[0] + np.abs(arg)
else:
# multiple dimensions
for i in range(self.num_axes):
if self.periodic[i]:
point[..., i] = (point[..., i] - xmin[i]) % xdim[i] + xmin[i]
elif reflect:
arg = (point[..., i] - xmax[i]) % (2 * xdim[i]) - xdim[i]
point[..., i] = xmin[i] + np.abs(arg)
return point
@classmethod
def register_operator(
cls,
name: str,
factory_func: Callable = None,
rank_in: int = 0,
rank_out: int = 0,
):
"""register an operator for this grid
Example:
The method can either be used directly::
GridClass.register_operator("operator", make_operator)
or as a decorator for the factory function::
@GridClass.register_operator("operator")
def make_operator(bcs: Boundaries):
...
Args:
name (str):
The name of the operator to register
factory_func (callable):
A function with signature ``(bcs: Boundaries, **kwargs)``, which
takes boundary conditions and optional keyword arguments and
returns an implementation of the given operator. This
implementation is a function that takes a
:class:`~numpy.ndarray` of discretized values as arguments and
returns the resulting discretized data in a
:class:`~numpy.ndarray` after applying the operator.
rank_in (int):
The rank of the input field for the operator
rank_out (int):
The rank of the field that is returned by the operator
"""
def register_operator(factor_func_arg: Callable):
"""helper function to register the operator"""
cls._operators[name] = OperatorInfo(
factory=factor_func_arg, rank_in=rank_in, rank_out=rank_out
)
return factor_func_arg
if factory_func is None:
# method is used as a decorator, so return the helper function
return register_operator
else:
# method is used directly
register_operator(factory_func)
@classproperty # type: ignore
def operators(cls) -> Set[str]: # @NoSelf
"""set: all operators defined for this class"""
result = set()
classes = inspect.getmro(cls)[:-1] # type: ignore
for anycls in classes:
result |= set(anycls._operators.keys()) # type: ignore
return result
def _get_operator_info(self, operator: Union[str, OperatorInfo]) -> OperatorInfo:
"""return the operator defined on this grid
Args:
operator (str):
Identifier for the operator. Some examples are 'laplace', 'gradient', or
'divergence'. The registered operators for this grid can be obtained
from the :attr:`~pde.grids.base.GridBase.operators` attribute.
Returns:
:class:`~pde.grids.base.OperatorInfo`: information for the operator
"""
if isinstance(operator, OperatorInfo):
return operator
# obtain all parent classes, except `object`
classes = inspect.getmro(self.__class__)[:-1]
for cls in classes:
if operator in cls._operators: # type: ignore
return cls._operators[operator] # type: ignore
# operator was not found
op_list = ", ".join(sorted(self.operators))
raise ValueError(
f"'{operator}' is not one of the defined operators ({op_list}). Custom "
"operators can be added using the `register_operator` method."
)
@cached_method()
def make_operator_no_bc(
self,
operator: Union[str, OperatorInfo],
**kwargs,
) -> OperatorType:
"""return a compiled function applying an operator without boundary conditions
A function that takes the discretized full data as an input and an array of
valid data points to which the result of applying the operator is written.
Note:
The resulting function does not check whether the ghost cells of the input
array have been supplied with sensible values. It is the responsibility of
the user to set the values of the ghost cells beforehand. Use this function
only if you absolutely know what you're doing. In all other cases,
:meth:`make_operator` is probably the better choice.
Args:
operator (str):
Identifier for the operator. Some examples are 'laplace', 'gradient', or
'divergence'. The registered operators for this grid can be obtained
from the :attr:`~pde.grids.base.GridBase.operators` attribute.
**kwargs:
Specifies extra arguments influencing how the operator is created.
Returns:
callable: the function that applies the operator
"""
return self._get_operator_info(operator).factory(self, **kwargs)
@cached_method()
@fill_in_docstring
def make_operator(
self,
operator: Union[str, OperatorInfo],
bc: BoundariesData,
**kwargs,
) -> Callable[[np.ndarray], np.ndarray]:
"""return a compiled function applying an operator with boundary conditions
The returned function takes the discretized data on the grid as an input and
returns the data to which the operator `operator` has been applied. The function
only takes the valid grid points and allocates memory for the ghost points
internally to apply the boundary conditions specified as `bc`. Note that the
function supports an optional argument `out`, which if given should provide
space for the valid output array without the ghost cells. The result of the
operator is then written into this output array.
Args:
operator (str):
Identifier for the operator. Some examples are 'laplace', 'gradient', or
'divergence'. The registered operators for this grid can be obtained
from the :attr:`~pde.grids.base.GridBase.operators` attribute.
bc (str or list or tuple or dict):
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
**kwargs:
Specifies extra arguments influencing how the operator is created.
Returns:
callable: the function that applies the operator
"""
backend = kwargs.get("backend", "numba") # numba is the default backend
# instantiate the operator
operator = self._get_operator_info(operator)
operator_raw = operator.factory(self, **kwargs)
# set the boundary conditions before applying this operator
bcs = self.get_boundary_conditions(bc, rank=operator.rank_in)
# calculate shapes of the full data
shape_in_full = (self.dim,) * operator.rank_in + self._shape_full
shape_out = (self.dim,) * operator.rank_out + self.shape
if backend == "numba":
# create a compiled function to apply to the operator
set_ghost_cells = bcs.make_ghost_cell_setter()
get_valid = self._make_get_valid()
if not is_jitted(operator_raw):
operator_raw = jit(operator_raw)
@jit_allocate_out(out_shape=shape_out)
def apply_op(arr: np.ndarray, out: np.ndarray = None) -> np.ndarray:
"""applies operator to the data"""
# prepare input with boundary conditions
arr_full = np.empty(shape_in_full, dtype=arr.dtype)
arr_valid = get_valid(arr_full)
arr_valid[:] = arr
set_ghost_cells(arr_full)
# apply operator
operator_raw(arr_full, out) # type: ignore
# return valid part of the output
return out # type: ignore
elif backend in {"numpy", "scipy"}:
# create a numpy/scipy function to apply to the operator
def apply_op(arr: np.ndarray, out: np.ndarray = None) -> np.ndarray:
"""set boundary conditions and apply operator"""
# prepare input with boundary conditions
arr_full = np.empty(shape_in_full, dtype=arr.dtype)
arr_full[(...,) + self._idx_valid] = arr
bcs.set_ghost_cells(arr_full)
# apply operator
if out is None:
out = np.empty(shape_out, dtype=arr.dtype)
else:
assert out.shape == shape_out
operator_raw(arr_full, out)
# return valid part of the output
return out
else:
raise NotImplementedError(f"Undefined backend '{backend}'")
return apply_op # type: ignore
def get_operator(
self,
operator: Union[str, OperatorInfo],
bc: BoundariesData,
**kwargs,
) -> Callable[[np.ndarray], np.ndarray]:
"""deprecated alias of method `make_operator`"""
# this was deprecated on 2021-08-05
warnings.warn(
"`get_operator` is deprecated. Use `make_operator` instead",
DeprecationWarning,
)
return self.make_operator(operator, bc, **kwargs)
def get_subgrid(self, indices: Sequence[int]) -> GridBase:
"""return a subgrid of only the specified axes"""
raise NotImplementedError(
f"Subgrids are not implemented for class {self.__class__.__name__}"
)
def plot(self):
"""visualize the grid"""
raise NotImplementedError(
f"Plotting is not implemented for class {self.__class__.__name__}"
)
@property
def typical_discretization(self) -> float:
"""float: the average side length of the cells"""
return np.mean(self.discretization) # type: ignore
def integrate(
self, data: NumberOrArray, axes: Union[int, Sequence[int]] = None
) -> np.ndarray:
"""Integrates the discretized data over the grid
Args:
data (:class:`~numpy.ndarray`):
The values at the support points of the grid that need to be
integrated.
axes (list of int, optional):
The axes along which the integral is performed. If omitted, all
axes are integrated over.
Returns:
:class:`~numpy.ndarray`: The values integrated over the entire grid
"""
# determine the volumes of the individual cells
if axes is None:
volume_list = self.cell_volume_data
else:
# use stored value for the default case of integrating over all axes
if isinstance(axes, int):
axes = (axes,)
else:
axes = tuple(axes) # required for numpy.sum
volume_list = [
cell_vol if ax in axes else 1
for ax, cell_vol in enumerate(self.cell_volume_data)
]
cell_volumes = functools.reduce(np.outer, volume_list)
# determine the axes over which we will integrate
if not isinstance(data, np.ndarray) or data.ndim < self.num_axes:
# deal with the case where data is not supplied for each support
# point, e.g., when a single scalar is integrated over the grid
data = np.broadcast_to(data, self.shape)
elif data.ndim > self.num_axes:
# deal with the case where more than a single value is provided per
# support point, e.g., when a tensorial field is integrated
offset = data.ndim - self.num_axes
if axes is None:
# integrate over all axes of the grid
axes = tuple(range(offset, data.ndim))
else:
# shift the indices to account for the data shape
axes = tuple(offset + i for i in axes)
# calculate integral using a weighted sum along the chosen axes
return (data * cell_volumes).sum(axis=axes) # type: ignore
@cached_method()
def make_normalize_point_compiled(
self, reflect: bool = True
) -> Callable[[np.ndarray], None]:
"""return a compiled function that normalizes a point
Here, the point is assumed to be specified by the physical values along
the non-symmetric axes of the grid. Normalizing points is useful to make sure
they lie within the domain of the grid. This function respects periodic
boundary conditions and can also reflect points off the boundary.
Args:
reflect (bool):
Flag determining whether coordinates along non-periodic axes are
reflected to lie in the valid range. If `False`, such coordinates are
left unchanged and only periodic boundary conditions are enforced.
Returns:
callable: A function that takes a :class:`~numpy.ndarray` as an argument,
which describes the coordinates of the points. This array is modified
in-place!
"""
num_axes = self.num_axes
periodic = np.array(self.periodic) # using a tuple instead led to a numba error
bounds = np.array(self.axes_bounds)
xmin = bounds[:, 0]
xmax = bounds[:, 1]
size = bounds[:, 1] - bounds[:, 0]
@jit
def normalize_point(point: np.ndarray) -> None:
"""helper function normalizing a single point"""
assert point.ndim == 1 # only support single points
for i in range(num_axes):
if periodic[i]:
point[i] = (point[i] - xmin[i]) % size[i] + xmin[i]
elif reflect:
arg = (point[i] - xmax[i]) % (2 * size[i]) - size[i]
point[i] = xmin[i] + abs(arg)
# else: do nothing
return normalize_point # type: ignore
@cached_method()
def make_cell_volume_compiled(self, flat_index: bool = False) -> CellVolume:
"""return a compiled function returning the volume of a grid cell
Args:
flat_index (bool):
When True, cell_volumes are indexed by a single integer into the
flattened array.
Returns:
function: returning the volume of the chosen cell
"""
if all(np.isscalar(d) for d in self.cell_volume_data):
# all cells have the same volume
cell_volume = np.product(self.cell_volume_data)
@jit
def get_cell_volume(*args) -> float:
return cell_volume # type: ignore
else:
# some cells have a different volume
cell_volumes = self.cell_volumes
if flat_index:
@jit
def get_cell_volume(idx: int) -> float:
return cell_volumes.flat[idx] # type: ignore
else:
@jit
def get_cell_volume(*args) -> float:
return cell_volumes[args] # type: ignore
return get_cell_volume # type: ignore
def _make_interpolation_axis_data(
self,
axis: int,
*,
full_data: bool = False,
cell_coords: bool = False,
) -> Callable[[float], Tuple[int, int, float, float]]:
"""factory for obtaining interpolation information
Args:
axis (int):
The axis along which interpolation is performed
full_data (bool):
Flag indicating that the interpolator should work on the full data array
that includes values for the grid points. If this is the case, the
boundaries are not checked and the coordinates are used as is.
cell_coords (bool):
Flag indicating whether points are given in cell coordinates or actual
point coordinates.
Returns:
A function that is called with a coordinate value for the axis. The function
returns the indices of the neighboring support points as well as the
associated weights
"""
# obtain information on how this axis is discretized
size = self.shape[axis]
periodic = self.periodic[axis]
lo = self.axes_bounds[axis][0]
dx = self.discretization[axis]
@register_jitable
def get_axis_data(coord: float) -> Tuple[int, int, float, float]:
"""determines data for interpolating along one axis"""
if cell_coords:
c_l, d_l = divmod(coord, 1.0)
else:
c_l, d_l = divmod((coord - lo) / dx - 0.5, 1.0)
if full_data:
c_li = int(c_l) + 1 # left support point
c_hi = c_li + 1 # right support point
elif periodic: # periodic domain
c_li = int(c_l) % size # left support point
c_hi = (c_li + 1) % size # right support point
elif 0 <= c_l + d_l < size - 1: # in bulk part of domain
c_li = int(c_l) # left support point
c_hi = c_li + 1 # right support point
elif size - 1 <= c_l + d_l <= size - 0.5: # close to upper boundary
c_li = c_hi = int(c_l) # both support points close to boundary
# This branch also covers the special case, where size == 1 and data is
# evaluated at the only support point (c_l == d_l == 0.)
elif -0.5 <= c_l + d_l <= 0: # close to lower boundary
c_li = c_hi = int(c_l) + 1 # both support points close to boundary
else:
return -42, -42, 0.0, 0.0 # indicates out of bounds
# determine the weights
w_l, w_h = 1 - d_l, d_l
# set small weights to zero. If this is not done, invalid data at the corner
# of the grid (where two rows of ghost cells intersect) could be accessed.
# If this random data is very large, e.g., 1e100, it contributes
# significantly, even if the weight is low, e.g., 1e-16.
if w_l < 1e-15:
w_l = 0
if w_h < 1e-15:
w_h = 0
return c_li, c_hi, w_l, w_h
return get_axis_data # type: ignore
def _make_interpolator_compiled(
self, *, fill: Number = None, full_data: bool = False, cell_coords: bool = False
) -> Callable[[np.ndarray, np.ndarray], np.ndarray]:
"""return a compiled function for linear interpolation on the grid
Args:
fill (Number, optional):
Determines how values out of bounds are handled. If `None`, a
`ValueError` is raised when out-of-bounds points are requested.
Otherwise, the given value is returned.
full_data (bool):
Flag indicating that the interpolator should work on the full data array
that includes values for the grid points. If this is the case, the
boundaries are not checked and the coordinates are used as is.
cell_coords (bool):
Flag indicating whether points are given in cell coordinates or actual
point coordinates.
Returns:
A function which returns interpolated values when called with
arbitrary positions within the space of the grid. The signature of
this function is (data, point), where `data` is the numpy array
containing the field data and position is denotes the position in
grid coordinates.
"""
if full_data and fill is not None:
self._logger.warning("Interpolation of full data does not use `fill`.")
args = {"full_data": full_data, "cell_coords": cell_coords}
if self.num_axes == 1:
# specialize for 1-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, **args)
@jit
def interpolate_single(
data: np.ndarray, point: np.ndarray
) -> NumberOrArray:
"""obtain interpolated value of data at a point
Args:
data (:class:`~numpy.ndarray`):
A 1d array of valid values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate
system
Returns:
:class:`~numpy.ndarray`: The interpolated value at the point
"""
c_li, c_hi, w_l, w_h = data_x(point[0])
if c_li == -42: # out of bounds
if fill is None: # outside the domain
raise DomainError("Point lies outside the grid domain")
else:
return fill
# do the linear interpolation
return w_l * data[..., c_li] + w_h * data[..., c_hi] # type: ignore
elif self.num_axes == 2:
# specialize for 2-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, **args)
data_y = self._make_interpolation_axis_data(1, **args)
@jit
def interpolate_single(
data: np.ndarray, point: np.ndarray
) -> NumberOrArray:
"""obtain interpolated value of data at a point
Args:
data (:class:`~numpy.ndarray`):
A 2d array of valid values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate
system
Returns:
:class:`~numpy.ndarray`: The interpolated value at the point
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
if c_xli == -42 or c_yli == -42: # out of bounds
if fill is None: # outside the domain
raise DomainError("Point lies outside the grid domain")
else:
return fill
# do the linear interpolation
return ( # type: ignore
w_xl * w_yl * data[..., c_xli, c_yli]
+ w_xl * w_yh * data[..., c_xli, c_yhi]
+ w_xh * w_yl * data[..., c_xhi, c_yli]
+ w_xh * w_yh * data[..., c_xhi, c_yhi]
)
elif self.num_axes == 3:
# specialize for 3-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, **args)
data_y = self._make_interpolation_axis_data(1, **args)
data_z = self._make_interpolation_axis_data(2, **args)
@jit
def interpolate_single(
data: np.ndarray, point: np.ndarray
) -> NumberOrArray:
"""obtain interpolated value of data at a point
Args:
data (:class:`~numpy.ndarray`):
A 2d array of valid values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate
system
Returns:
:class:`~numpy.ndarray`: The interpolated value at the point
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
c_zli, c_zhi, w_zl, w_zh = data_z(point[2])
if c_xli == -42 or c_yli == -42 or c_zli == -42: # out of bounds
if fill is None: # outside the domain
raise DomainError("Point lies outside the grid domain")
else:
return fill
# do the linear interpolation
return ( # type: ignore
w_xl * w_yl * w_zl * data[..., c_xli, c_yli, c_zli]
+ w_xl * w_yl * w_zh * data[..., c_xli, c_yli, c_zhi]
+ w_xl * w_yh * w_zl * data[..., c_xli, c_yhi, c_zli]
+ w_xl * w_yh * w_zh * data[..., c_xli, c_yhi, c_zhi]
+ w_xh * w_yl * w_zl * data[..., c_xhi, c_yli, c_zli]
+ w_xh * w_yl * w_zh * data[..., c_xhi, c_yli, c_zhi]
+ w_xh * w_yh * w_zl * data[..., c_xhi, c_yhi, c_zli]
+ w_xh * w_yh * w_zh * data[..., c_xhi, c_yhi, c_zhi]
)
else:
raise NotImplementedError(
f"Compiled interpolation not implemented for dimension {self.num_axes}"
)
return interpolate_single # type: ignore
def make_inserter_compiled(
self, *, full_data: bool = False
) -> Callable[[np.ndarray, np.ndarray, NumberOrArray], None]:
"""return a compiled function to insert values at interpolated positions
Args:
full_data (bool):
Flag indicating that the interpolator should work on the full data array
that includes values for the grid points. If this is the case, the
boundaries are not checked and the coordinates are used as is.
Returns:
A function with signature (data, position, amount), where `data` is
the numpy array containing the field data, position is denotes the
position in grid coordinates, and `amount` is the that is to be
added to the field.
"""
cell_volume = self.make_cell_volume_compiled()
if self.num_axes == 1:
# specialize for 1-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, full_data=full_data)
@jit
def insert(
data: np.ndarray, point: np.ndarray, amount: NumberOrArray
) -> None:
"""add an amount to a field at an interpolated position
Args:
data (:class:`~numpy.ndarray`):
The values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate system
amount (Number or :class:`~numpy.ndarray`):
The amount that will be added to the data. This value describes
an integrated quantity (given by the field value times the
discretization volume). This is important for consistency with
different discretizations and in particular grids with
non-uniform discretizations
"""
c_li, c_hi, w_l, w_h = data_x(point[0])
if c_li == -42: # out of bounds
raise DomainError("Point lies outside the grid domain")
data[..., c_li] += w_l * amount / cell_volume(c_li)
data[..., c_hi] += w_h * amount / cell_volume(c_hi)
elif self.num_axes == 2:
# specialize for 2-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, full_data=full_data)
data_y = self._make_interpolation_axis_data(1, full_data=full_data)
@jit
def insert(
data: np.ndarray, point: np.ndarray, amount: NumberOrArray
) -> None:
"""add an amount to a field at an interpolated position
Args:
data (:class:`~numpy.ndarray`):
The values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate system
amount (Number or :class:`~numpy.ndarray`):
The amount that will be added to the data. This value describes
an integrated quantity (given by the field value times the
discretization volume). This is important for consistency with
different discretizations and in particular grids with
non-uniform discretizations
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
if c_xli == -42 or c_yli == -42: # out of bounds
raise DomainError("Point lies outside the grid domain")
cell_vol = cell_volume(c_xli, c_yli)
data[..., c_xli, c_yli] += w_xl * w_yl * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yhi)
data[..., c_xli, c_yhi] += w_xl * w_yh * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yli)
data[..., c_xhi, c_yli] += w_xh * w_yl * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yhi)
data[..., c_xhi, c_yhi] += w_xh * w_yh * amount / cell_vol
elif self.num_axes == 3:
# specialize for 3-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, full_data=full_data)
data_y = self._make_interpolation_axis_data(1, full_data=full_data)
data_z = self._make_interpolation_axis_data(2, full_data=full_data)
@jit
def insert(
data: np.ndarray, point: np.ndarray, amount: NumberOrArray
) -> None:
"""add an amount to a field at an interpolated position
Args:
data (:class:`~numpy.ndarray`):
The values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate system
amount (Number or :class:`~numpy.ndarray`):
The amount that will be added to the data. This value describes
an integrated quantity (given by the field value times the
discretization volume). This is important for consistency with
different discretizations and in particular grids with
non-uniform discretizations
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
c_zli, c_zhi, w_zl, w_zh = data_z(point[2])
if c_xli == -42 or c_yli == -42 or c_zli == -42: # out of bounds
raise DomainError("Point lies outside the grid domain")
cell_vol = cell_volume(c_xli, c_yli, c_zli)
data[..., c_xli, c_yli, c_zli] += w_xl * w_yl * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yli, c_zhi)
data[..., c_xli, c_yli, c_zhi] += w_xl * w_yl * w_zh * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yhi, c_zli)
data[..., c_xli, c_yhi, c_zli] += w_xl * w_yh * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yhi, c_zhi)
data[..., c_xli, c_yhi, c_zhi] += w_xl * w_yh * w_zh * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yli, c_zli)
data[..., c_xhi, c_yli, c_zli] += w_xh * w_yl * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yli, c_zhi)
data[..., c_xhi, c_yli, c_zhi] += w_xh * w_yl * w_zh * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yhi, c_zli)
data[..., c_xhi, c_yhi, c_zli] += w_xh * w_yh * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yhi, c_zhi)
data[..., c_xhi, c_yhi, c_zhi] += w_xh * w_yh * w_zh * amount / cell_vol
else:
raise NotImplementedError(
f"Compiled interpolation not implemented for dimension {self.num_axes}"
)
return insert # type: ignore
def make_integrator(self) -> Callable[[np.ndarray], np.ndarray]:
"""Return function that can be used to integrates discretized data over the grid
Note that currently only scalar fields are supported.
Returns:
callable: A function that takes a numpy array and returns the integral with
the correct weights given by the cell volumes.
"""
num_axes = self.num_axes
if self.uniform_cell_volumes:
# all cells have the same volume
cell_volume = np.product(self.cell_volume_data)
@jit
def integrate(arr: np.ndarray) -> Number:
"""function that integrates data over a uniform grid"""
assert arr.ndim == num_axes
return cell_volume * arr.sum() # type: ignore
else:
# cell volume varies with position
get_cell_volume = self.make_cell_volume_compiled(flat_index=True)
@jit
def integrate(arr: np.ndarray) -> Number:
"""function that integrates scalar data over a non-uniform grid"""
assert arr.ndim == num_axes
total = 0
for i in nb.prange(arr.size):
total += get_cell_volume(i) * arr.flat[i]
return total
return integrate # type: ignore
def registered_operators() -> Dict[str, List[str]]:
"""returns all operators that are currently defined
Returns:
dict: a dictionary with the names of the operators defined for each grid class
"""
return {
name: sorted(cls.operators)
for name, cls in GridBase._subclasses.items()
if not (name.endswith("Base") or hasattr(cls, "deprecated") and cls.deprecated) # type: ignore
}
| 39.125833
| 103
| 0.57572
|
4a027c4b638b0b50d68c4b230e7aff2e70182ee6
| 5,908
|
py
|
Python
|
typed_python/ast_util.py
|
mjwoolf/nativepython
|
3f469f6d3c8c0f03cb9f51eb2a851d68310c7f90
|
[
"Apache-2.0"
] | 7
|
2018-08-07T15:41:54.000Z
|
2019-02-19T12:47:57.000Z
|
typed_python/ast_util.py
|
mjwoolf/nativepython
|
3f469f6d3c8c0f03cb9f51eb2a851d68310c7f90
|
[
"Apache-2.0"
] | 38
|
2018-10-17T13:37:46.000Z
|
2019-04-11T20:50:14.000Z
|
typed_python/ast_util.py
|
mjwoolf/nativepython
|
3f469f6d3c8c0f03cb9f51eb2a851d68310c7f90
|
[
"Apache-2.0"
] | 4
|
2019-02-11T17:44:55.000Z
|
2019-03-20T07:38:18.000Z
|
# Copyright 2017 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typed_python.inspect_override as inspect
import ast
import os
import textwrap
LINENO_ATTRIBUTE_NAME = 'lineno'
class CantGetSourceTextError(Exception):
pass
class VisitDone(Exception):
"""Raise this exception to short-circuit the visitor once we're done
searching."""
pass
def areAstsIdentical(ast1, ast2):
return ast.dump(ast1) == ast.dump(ast2)
_all_caches = []
def clearAllCaches():
inspect.pathExistsOnDiskCache_.clear()
for a in _all_caches:
a.clear()
def CachedByArgs(f):
"""Function decorator that adds a simple memo to 'f' on its arguments"""
cache = {}
_all_caches.append(cache)
def inner(*args, **kwargs):
keys = sorted(kwargs)
all_args = args + tuple((k, kwargs[k]) for k in keys)
if (all_args) not in cache:
cache[all_args] = f(*args, **kwargs)
return cache[all_args]
return inner
def getSourceText(pyObject):
source, lineno = getSourceLines(pyObject)
# Create a prefix of (lineno - 1) blank lines to keep track of line numbers
# for error reporting
blankLines = os.linesep * (lineno - 1)
# We don't know how to avoid the use of `textwrap.dedent to get the code
# though `ast.parse, which means that the computed column_numbers may be
# off and we shouldn't report them.
return textwrap.dedent(blankLines + "".join(source))
sourceFileCache_ = {}
def getSourceFilenameAndText(pyObject):
try:
sourceFile = inspect.getsourcefile(pyObject)
except TypeError as e:
raise CantGetSourceTextError(e.message)
if sourceFile is None:
raise CantGetSourceTextError(
"can't get source lines for file %s" % sourceFile
)
linesOrNone = inspect.getlines(sourceFile)
if linesOrNone is None:
raise CantGetSourceTextError(
"can't get source lines for file %s" % sourceFile
)
if sourceFile not in sourceFileCache_:
sourceFileCache_[sourceFile] = "".join(linesOrNone)
return sourceFileCache_[sourceFile], sourceFile
def getSourceLines(pyObject):
try:
tr = inspect.getsourcelines(pyObject)
except (TypeError, IOError) as e:
raise CantGetSourceTextError(str(e))
return tr
@CachedByArgs
def pyAstFromText(text):
return ast.parse(text)
def pyAstFor(pyObject):
return pyAstFromText(getSourceText(pyObject))
@CachedByArgs
def getAstFromFilePath(filename):
linesOrNone = inspect.getlines(filename)
if linesOrNone is not None:
return pyAstFromText("".join(linesOrNone))
return None
class FindEnclosingFunctionVisitor(ast.NodeVisitor):
""""Visitor used to find the enclosing function at a given line of code.
The class method 'find' is the preferred API entry point."""
def __init__(self, line):
self.targetLine = line
self.enclosingFunction = None
self._currentFunction = None
self._stash = []
def generic_visit(self, node):
if hasattr(node, LINENO_ATTRIBUTE_NAME):
if node.lineno >= self.targetLine:
self.enclosingFunction = self._currentFunction
raise VisitDone
super(FindEnclosingFunctionVisitor, self).generic_visit(node)
def visit_FunctionDef(self, node):
if node.lineno > self.targetLine:
raise VisitDone
self._stash.append(self._currentFunction)
self._currentFunction = node.name
self.generic_visit(node)
self._currentFunction = self._stash.pop()
def find(self, node):
if hasattr(node, LINENO_ATTRIBUTE_NAME):
if node.lineno > self.targetLine:
return None
try:
self.visit(node)
except VisitDone:
return self.enclosingFunction
return None
def findEnclosingFunctionName(astNode, lineno):
vis = FindEnclosingFunctionVisitor(lineno)
return vis.find(astNode)
class _AtLineNumberVisitor(ast.NodeVisitor):
"""Collects various types of nodes occurring at a given line number."""
def __init__(self, lineNumber):
self.funcDefSubnodesAtLineNumber = []
self.lambdaSubnodesAtLineNumber = []
self.lineNumber = lineNumber
def visit_FunctionDef(self, node):
if node.lineno == self.lineNumber:
self.funcDefSubnodesAtLineNumber.append(node)
ast.NodeVisitor.generic_visit(self, node)
def visit_Lambda(self, node):
if node.lineno == self.lineNumber:
self.lambdaSubnodesAtLineNumber.append(node)
ast.NodeVisitor.generic_visit(self, node)
@CachedByArgs
def functionDefOrLambdaAtLineNumber(sourceAst, lineNumber):
visitor = _AtLineNumberVisitor(lineNumber)
visitor.visit(sourceAst)
subnodesAtLineNumber = (
visitor.funcDefSubnodesAtLineNumber +
visitor.lambdaSubnodesAtLineNumber
)
if len(subnodesAtLineNumber) == 0:
raise CantGetSourceTextError(
"can't find a function definition at line %s." % lineNumber
)
if len(subnodesAtLineNumber) > 1:
raise CantGetSourceTextError(
"can't find a unique function definition at line %s. Do you " +
"have two lambdas on the same line?" % lineNumber
)
return subnodesAtLineNumber[0]
| 28.403846
| 79
| 0.680095
|
4a027ce357c3a746bfccc84aa5a830523d0b0ff4
| 636
|
py
|
Python
|
orchespy/devicetype/base.py
|
SX-Aurora/orchespy
|
6b85a78831c8e3e05df7143101ca3418817fcbbd
|
[
"BSD-3-Clause"
] | null | null | null |
orchespy/devicetype/base.py
|
SX-Aurora/orchespy
|
6b85a78831c8e3e05df7143101ca3418817fcbbd
|
[
"BSD-3-Clause"
] | null | null | null |
orchespy/devicetype/base.py
|
SX-Aurora/orchespy
|
6b85a78831c8e3e05df7143101ca3418817fcbbd
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import ABC, abstractmethod
class Base(ABC):
@abstractmethod
def get_ndarray_on_host(self, ary):
pass
@abstractmethod
def get_ndarray_on_device(self, ary):
pass
@property
@abstractmethod
def numpy_class(self):
pass
@classmethod
@abstractmethod
def find_device(self, ary):
pass
def __enter__(self):
return self.numpy_class
def __exit__(self, exc_type, exc_value, traceback):
pass
def func_to_transfer_ndarray_from(self, srcdev):
return None
def func_to_transfer_ndarray_to(self, dstdev):
return None
| 18.705882
| 55
| 0.654088
|
4a027d684854301c86a867f863c5f24b52a86038
| 236
|
py
|
Python
|
clarifai/rest/__init__.py
|
jdehotin/clarifaipy
|
aaf1eb272bd21601e530bd07378d1190815bb0c1
|
[
"Apache-2.0"
] | null | null | null |
clarifai/rest/__init__.py
|
jdehotin/clarifaipy
|
aaf1eb272bd21601e530bd07378d1190815bb0c1
|
[
"Apache-2.0"
] | null | null | null |
clarifai/rest/__init__.py
|
jdehotin/clarifaipy
|
aaf1eb272bd21601e530bd07378d1190815bb0c1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .client import ApiClient, ApiError, UserError, TokenError
from .client import ClarifaiApp
from .client import Model, Image, Concept
from .client import InputSearchTerm, OutputSearchTerm, SearchQueryBuilder
| 33.714286
| 73
| 0.792373
|
4a027e1ea60b707e7b1e604edc1b6846afc844db
| 172,355
|
py
|
Python
|
nova/tests/unit/virt/xenapi/test_xenapi.py
|
jeckxie/gxzw-nova
|
edbc620439cf3dfc959c6eb8355ab35adc8268d7
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/xenapi/test_xenapi.py
|
jeckxie/gxzw-nova
|
edbc620439cf3dfc959c6eb8355ab35adc8268d7
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/tests/unit/virt/xenapi/test_xenapi.py
|
jeckxie/gxzw-nova
|
edbc620439cf3dfc959c6eb8355ab35adc8268d7
|
[
"Apache-2.0"
] | 1
|
2020-07-22T22:06:24.000Z
|
2020-07-22T22:06:24.000Z
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import copy
import functools
import os
import re
import mock
from mox3 import mox
from os_xenapi.client import session as xenapi_session
from os_xenapi.client import XenAPI
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import uuidutils
import six
import testtools
from nova.compute import api as compute_api
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_processutils
import nova.tests.unit.image.fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_aggregate
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.xenapi import stubs
from nova.tests import uuidsentinel as uuids
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
IMAGE_MACHINE = uuids.image_ref
IMAGE_KERNEL = uuids.image_kernel_id
IMAGE_RAMDISK = uuids.image_ramdisk_id
IMAGE_RAW = uuids.image_raw
IMAGE_VHD = uuids.image_vhd
IMAGE_ISO = uuids.image_iso
IMAGE_IPXE_ISO = uuids.image_ipxe_iso
IMAGE_FROM_VOLUME = uuids.image_from_volume
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami',
'id': 'fake-image'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki',
'id': 'fake-kernel'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari',
'id': 'fake-ramdisk'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare',
'id': 'fake-image-raw'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf',
'id': 'fake-image-vhd'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-iso'},
},
IMAGE_IPXE_ISO: {
'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-pxe',
'properties': {'ipxe_boot': 'true'}},
},
IMAGE_FROM_VOLUME: {
'image_meta': {'name': 'fake_ipxe_iso',
'id': 'fake-image-volume',
'properties': {'foo': 'bar'}},
},
}
def get_session():
return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached(function):
"""vm_utils.with_vdi_attached needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached = vm_utils.vdi_attached
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached = fake_vdi_attached
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached = orig_vdi_attached
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
inst = objects.Instance(context=context,
system_metadata={})
for k, v in instance_values.items():
setattr(inst, k, v)
inst.flavor = objects.Flavor.get_by_id(context,
instance_values['instance_type_id'])
inst.old_flavor = None
inst.new_flavor = None
inst.create()
inst.pci_devices = objects.PciDeviceList(objects=[])
return inst
class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.instance = fake_instance.fake_db_instance(name='foo')
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password'}}
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
conn_info = self._make_connection_info()
self.assertIsNone(
conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(self.instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
None, {'driver_volume_type': 'nonexist'},
self.instance, '/dev/sdc')
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
db_fakes.stub_out_db_instance_api(self)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self)
self.user_id = 'fake'
self.project_id = fakes.FAKE_PROJECT_ID
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn._session.is_local_connection = False
fake_image.stub_out_image_service(self)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def fake_unpause_and_wait(self, vm_ref, instance, power_on):
self._update_last_dom_id(vm_ref)
self.stubs.Set(vmops.VMOps, '_unpause_and_wait',
fake_unpause_and_wait)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = get_session()
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
@mock.patch.object(vm_utils, 'lookup', return_value=True)
def test_instance_exists(self, mock_lookup):
self.stubs.Set(objects.Instance, 'name', 'foo')
instance = objects.Instance(uuid=uuids.instance)
self.assertTrue(self.conn.instance_exists(instance))
mock_lookup.assert_called_once_with(mock.ANY, 'foo')
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_instance_not_exists(self, mock_lookup):
self.stubs.Set(objects.Instance, 'name', 'bar')
instance = objects.Instance(uuid=uuids.instance)
self.assertFalse(self.conn.instance_exists(instance))
mock_lookup.assert_called_once_with(mock.ANY, 'bar')
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEqual(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in range(1, 4):
instance = self._create_instance()
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(connection_url='myscheme://myaddress/',
group='xenserver')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
expected_raw_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = self.expected_raw_diagnostics
instance = self._create_instance()
actual = self.conn.get_diagnostics(instance)
self.assertThat(actual, matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = {
'config_drive': False,
'state': 'running',
'driver': 'xenapi',
'version': '1.0',
'uptime': 0,
'hypervisor_os': None,
'cpu_details': [{'time': 0}, {'time': 0},
{'time': 0}, {'time': 0}],
'nic_details': [{'mac_address': '00:00:00:00:00:00',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 0,
'rx_packets': 0,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 0,
'read_requests': 0,
'write_bytes': 0,
'write_requests': 0}],
'memory_details': {'maximum': 8192, 'used': 0}}
instance = self._create_instance(obj=True)
actual = self.conn.get_instance_diagnostics(instance)
self.assertEqual(expected, actual.serialize())
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, self.context, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, self.context, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertIsInstance(vdi_uuids, list)
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEqual(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEqual(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assertFalse(name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEqual(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in six.iteritems(xenapi_fake.get_all_records('VM'))
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
flavor = objects.Flavor.get_by_id(self.context, instance_type_id)
mem_kib = int(flavor['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = flavor['vcpus']
vcpu_weight = flavor['vcpu_weight']
self.assertEqual(self.vm_info.max_mem_kb, mem_kib)
self.assertEqual(self.vm_info.mem_kb, mem_kib)
self.assertEqual(self.vm['memory_static_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight is None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight), 'cap': '0'})
# Check that the VM is running according to Nova
self.assertEqual(self.vm_info.state, power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEqual(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertNotIn('vm-data/hostname', xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertJsonEqual({'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1:dcad:beff:feef:1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data)
def check_vm_params_for_windows(self):
self.assertEqual(self.vm['platform']['nx'], 'true')
self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], '')
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEqual(self.vm['PV_kernel'], '')
self.assertNotEqual(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
session = get_session()
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
session = get_session()
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
flavor = objects.Flavor.get_by_id(self.context,
instance_type_id)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.user_id = self.user_id
instance.image_ref = image_ref
instance.kernel_id = kernel_id
instance.ramdisk_id = ramdisk_id
instance.root_gb = flavor.root_gb
instance.ephemeral_gb = flavor.ephemeral_gb
instance.instance_type_id = instance_type_id
instance.os_type = os_type
instance.hostname = hostname
instance.key_data = key_data
instance.architecture = architecture
instance.system_metadata = {}
instance.flavor = flavor
instance.create()
else:
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['flavor'])
network_info = fake_network.fake_get_instance_nw_info(self)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = objects.ImageMeta.from_dict(
IMAGE_FIXTURES[image_ref]["image_meta"])
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertEqual(instance['os_type'], os_type)
self.assertEqual(instance['architecture'], architecture)
def test_spawn_ipxe_iso_success(self):
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url='http://boot.example.com',
ipxe_mkisofs_cmd='/root/mkisofs',
group='xenserver')
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized(
'ipxe.py', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_network_name(self):
self.flags(ipxe_network_name=None,
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_boot_menu_url(self):
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url=None,
group='xenserver')
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_unknown_network_name(self):
self.flags(ipxe_network_name='test2',
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL,
IMAGE_RAMDISK, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(XenAPI.Failure, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(XenAPI.Failure, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(XenAPI.Failure, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None, os_type=None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_glance_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_FROM_VOLUME, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_VHD, None, None,
block_device_info=dev_info)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
actual = kwargs.get('process_input', None)
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether DE:AD:BE:EF:00:01
address 192.168.1.100
netmask 255.255.255.0
broadcast 192.168.1.255
gateway 192.168.1.1
dns-nameservers 192.168.1.3 192.168.1.4
iface eth0 inet6 static
hwaddress ether DE:AD:BE:EF:00:01
address 2001:db8:0:1:dcad:beff:feef:1
netmask 64
gateway 2001:db8:0:1::1
"""
self.assertEqual(expected, actual)
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug('Creating files in %s to simulate guest agent',
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normally make files in the mounted filesystem
# disappear, so do that here
LOG.debug('Removing simulated guest agent files in %s',
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(IMAGE_MACHINE, IMAGE_KERNEL,
IMAGE_RAMDISK, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False, obj=True)
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
image_meta = objects.ImageMeta.from_dict(
IMAGE_FIXTURES[IMAGE_MACHINE]["image_meta"])
self.conn.spawn(self.context, instance, image_meta, [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
inst2 = self._create_instance(False, obj=True)
networks = self.network.db.network_get_all(ctxt)
with mock.patch('nova.objects.network.Network._from_db_object'):
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=inst2.id,
instance_uuid=inst2.uuid,
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=inst2.id,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = (b'\n# The following ssh key was injected by '
b'Nova\nssh-rsa fake_keydata\n')
injected_files = [(b'/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = (b'\n# The following ssh key was injected by '
b'Nova\nssh-dsa fake_keydata\n')
injected_files = [(b'/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [(b'/tmp/foo', b'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEqual(actual_injected_files, injected_files)
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade(self, mock_get):
self.flags(use_agent_default=True,
group='xenserver')
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade_fails_silently(self, mock_get):
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn_fails_silently_with(exception.AgentError,
method="_plugin_agent_agentupdate", failure="fake_error")
def test_spawn_with_resetnetwork_alternative_returncode(self):
self.flags(use_agent_default=True,
group='xenserver')
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
# NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_resetnetwork', fake_resetnetwork)
fake_resetnetwork.called = False
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertTrue(fake_resetnetwork.called)
def _test_spawn_fails_silently_with(self, expected_exception_cls,
method="_plugin_agent_version",
failure=None, value=None):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
def fake_agent_call(self, method, args):
if failure:
raise XenAPI.Failure([failure])
else:
return value
self.stubs.Set(stubs.FakeSessionForVMTests,
method, fake_agent_call)
called = {}
def fake_add_instance_fault(*args, **kwargs):
called["fake_add_instance_fault"] = args[2]
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
fake_add_instance_fault)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
actual_exception = called["fake_add_instance_fault"]
self.assertIsInstance(actual_exception, expected_exception_cls)
def test_spawn_fails_silently_with_agent_timeout(self):
self._test_spawn_fails_silently_with(exception.AgentTimeout,
failure="TIMEOUT:fake")
def test_spawn_fails_silently_with_agent_not_implemented(self):
self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
failure="NOT IMPLEMENTED:fake")
def test_spawn_fails_silently_with_agent_error(self):
self._test_spawn_fails_silently_with(exception.AgentError,
failure="fake_error")
def test_spawn_fails_silently_with_agent_bad_return(self):
error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
self._test_spawn_fails_silently_with(exception.AgentError,
value=error)
def test_spawn_sets_last_dom_id(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertEqual(self.vm['domid'],
self.vm['other_config']['last_dom_id'])
def test_rescue(self):
instance = self._create_instance(spawn=False, obj=True)
xenapi_fake.create_vm(instance['name'], 'Running')
session = get_session()
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
other_config={'osvol': True})
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict(
{'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_refs = {}
for vbd_ref in rescue_vm['VBDs']:
vbd = xenapi_fake.get_record('VBD', vbd_ref)
vdi_refs[vbd['VDI']] = vbd['userdevice']
self.assertEqual('1', vdi_refs[root_vdi_ref])
self.assertEqual('2', vdi_refs[swap_vdi_ref])
self.assertEqual('4', vdi_refs[eph1_vdi_ref])
self.assertEqual('5', vdi_refs[eph2_vdi_ref])
self.assertNotIn(vol_vdi_ref, vdi_refs)
def test_rescue_preserve_disk_on_failure(self):
# test that the original disk is preserved if rescue setup fails
# bug #1227898
instance = self._create_instance(obj=True)
session = get_session()
image_meta = objects.ImageMeta.from_dict(
{'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
# raise an error in the spawn setup process and trigger the
# undo manager logic:
def fake_start(*args, **kwargs):
raise test.TestingException('Start Error')
self.stubs.Set(self.conn._vmops, '_start', fake_start)
self.assertRaises(test.TestingException, self.conn.rescue,
self.context, instance, [], image_meta, '')
# confirm original disk still exists:
vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
self.assertEqual(vdi_ref, vdi_ref2)
self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
def test_unrescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock(object):
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = get_session()
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEqual(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(XenAPI.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
with mock.patch.object(vm_utils, 'lookup',
return_value=real_result) as mock_lookup:
conn.reboot(self.context, instance, None, "SOFT")
mock_lookup.assert_called_once_with(conn._session,
instance['name'], True)
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output('context', "instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [test_aggregate.fake_aggregate]
else:
return []
self.stub_out('nova.db.aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
host_ref = "foo"
expected = (vm_ref, host_ref, {"live": "true"})
self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
flavor = objects.Flavor.get_by_id(self.context, 3)
expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(self.context, instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True, obj=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'uuid': uuidutils.generate_uuid(),
'display_name': 'host-',
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': IMAGE_KERNEL,
'ramdisk_id': IMAGE_RAMDISK,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance_values.update(attrs)
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id,
'disk_format': 'vhd'})
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
if obj:
return instance
return base.obj_to_primitive(instance)
def test_destroy_clean_up_kernel_and_ramdisk(self):
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
fake_destroy_kernel_ramdisk.called = True
self.assertEqual("kernel", kernel)
self.assertEqual("ramdisk", ramdisk)
fake_destroy_kernel_ramdisk.called = False
self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
fake_destroy_kernel_ramdisk)
instance = self._create_instance(spawn=True, obj=True)
network_info = fake_network.fake_get_instance_nw_info(self)
self.conn.destroy(self.context, instance, network_info)
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
self.assertIsNone(vm_ref)
self.assertTrue(fake_destroy_kernel_ramdisk.called)
class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEqual(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEqual(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in range(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
REQUIRES_LOCKING = True
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_unpause_and_wait(self, vm_ref, instance, power_on):
pass
self.stubs.Set(vmops.VMOps, '_unpause_and_wait',
fake_unpause_and_wait)
def _create_instance(self, **kw):
values = self.instance_values.copy()
values.update(kw)
instance = objects.Instance(context=self.context, **values)
instance.flavor = objects.Flavor(root_gb=80,
ephemeral_gb=0)
instance.create()
return instance
def test_migrate_disk_and_power_off(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
instance = self._create_instance()
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'fake_dest', flavor, None)
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
instance = self._create_instance(root_gb=0, ephemeral_gb=0)
instance.flavor.root_gb = 0
instance.flavor.ephemeral_gb = 0
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertTrue(self.called)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(context, instance, network_info)
self.assertTrue(self.fake_finish_revert_migration_called)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertTrue(self.called)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = create_instance_with_system_metadata(self.context, values)
instance.flavor.root_gb = 0
instance.flavor.ephemeral_gb = 0
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
# Resize instance would be determined by the compute call
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached
def test_migrate_too_many_partitions_no_resize_down(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached
def test_migrate_bad_fs_type_no_resize_down(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2", "", "boot")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
instance = objects.Instance(context=self.context,
auto_disk_config=True,
uuid=uuids.instance)
instance.obj_reset_changes()
vm_ref = "vm_ref"
dest = "dest"
flavor = "type"
sr_path = "sr_path"
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance)
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, flavor, vm_ref, sr_path)
self.assertEqual(3, mock_save.call_count)
self.assertEqual(60.0, instance.progress)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.NoDBTestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEqual(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEqual(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'ami'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'vhd'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.instance = fake_instance.fake_db_instance(name='foo')
def test_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
# Values from fake.create_local_srs (ext SR)
self.assertEqual(stats['disk_total'], 40000)
self.assertEqual(stats['disk_used'], 20000)
# Values from fake._plugin_xenhost_host_data
self.assertEqual(stats['host_memory_total'], 10)
self.assertEqual(stats['host_memory_overhead'], 20)
self.assertEqual(stats['host_memory_free'], 30)
self.assertEqual(stats['host_memory_free_computed'], 40)
self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
self.assertEqual(stats['host_cpu_info']['cpu_count'], 4)
self.assertThat({
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
]},
matchers.DictMatches(stats['cpu_model']))
# No VMs running
self.assertEqual(stats['vcpus_used'], 0)
def test_host_state_vcpus_used(self):
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 0)
xenapi_fake.create_vm(self.instance['name'], 'Running')
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 4)
def test_pci_passthrough_devices(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual(len(stats['pci_passthrough_devices']), 2)
def test_host_state_missing_sr(self):
# Must trigger construction of 'host_state' property
# before introducing the stub which raises the error
hs = self.conn.host_state
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
hs.get_host_stats,
refresh=True)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def _test_host_action_no_param(self, method, action, expected=None):
result = method(action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action_no_param(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action_no_param(self.conn.host_power_action,
'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
True, 'enabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertFalse(service.disabled)
def test_set_enable_host_disable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
False, 'disabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertTrue(service.disabled)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime()
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertIn('supported_instances', stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
def test_update_stats_caches_hostname(self):
self.mox.StubOutWithMock(host, 'call_xenhost')
self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
self.mox.StubOutWithMock(vm_utils, 'list_vms')
self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
data = {'disk_total': 0,
'disk_used': 0,
'disk_available': 0,
'supported_instances': 0,
'host_capabilities': [],
'host_hostname': 'foo',
'vcpus_used': 0,
}
sr_rec = {
'physical_size': 0,
'physical_utilisation': 0,
'virtual_allocation': 0,
}
for i in range(3):
host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
vm_utils.list_vms(self.conn._session).AndReturn([])
self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
sr_rec)
if i == 2:
# On the third call (the second below) change the hostname
data = dict(data, host_hostname='bar')
self.mox.ReplayAll()
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
class ToSupportedInstancesTestCase(test.NoDBTestCase):
def test_default_return_value(self):
self.assertEqual([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEqual(
[(obj_fields.Architecture.X86_64, obj_fields.HVType.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEqual(
[(obj_fields.Architecture.X86_64, obj_fields.HVType.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEqual(
[
(obj_fields.Architecture.X86_64, obj_fields.HVType.XEN, 'xen'),
(obj_fields.Architecture.I686, obj_fields.HVType.XEN, 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': IMAGE_KERNEL,
'ramdisk_id': IMAGE_RAMDISK,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
flags):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
context.RequestContext(self.user_id, self.project_id)
session = get_session()
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
self.mox.ReplayAll()
self.conn._vmops._attach_disks(self.context, instance, image_meta,
vm_ref, instance['name'], vdis, disk_image_type,
"fake_nw_inf")
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_two_partitions(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(2, 100, 200, 'ext4', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 100, 200, 'asdf', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", "boot")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': IMAGE_KERNEL,
'ramdisk_id': IMAGE_RAMDISK,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
context.RequestContext(self.user_id, self.project_id)
session = get_session()
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
self.conn._vmops._attach_disks(self.context, instance, image_meta,
vm_ref, instance['name'], vdis, disk_image_type,
"fake_nw_inf")
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return six.iteritems(cls.FAKE_VMS)
@staticmethod
def _fake_fetch_bandwidth_mt(session):
return {}
@staticmethod
def _fake_fetch_bandwidth(session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
# share a lot of code. Consider abstracting common code in a base
# class for firewall driver testing.
#
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
REQUIRES_LOCKING = True
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = [l for l in self._in_rules if not l.startswith('#')]
for rule in in_rules:
if 'nova' not in rule:
self.assertIn(rule, self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
match_rules = [rule for rule in self._out_rules if regex.match(rule)]
self.assertGreater(len(match_rules), 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
match_rules = [rule for rule in self._out_rules if regex.match(rule)]
self.assertGreater(len(match_rules), 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
match_rules = [rule for rule in self._out_rules if regex.match(rule)]
self.assertGreater(len(match_rules), 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self, 1)
from nova.compute import utils as compute_utils # noqa
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
match_rules = [rule for rule in self._out_rules
if regex.match(rule)]
self.assertGreater(len(match_rules), 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self,
networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instance_info[instance_ref['id']] = (instance_ref,
network_info)
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
# validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
match_rules = [rule for rule in self._out_rules if regex.match(rule)]
self.assertGreater(len(match_rules), 0,
"Rules were not updated properly. "
"The rule for UDP acceptance is missing")
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada', group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in six.iteritems(values):
for service_host in hosts:
db.service_create(context,
{'host': service_host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(connection_url='http://test_url',
connection_username='test_user',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = manager.ComputeManager()
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = objects.Aggregate(context=self.context, id=1,
**values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = objects.Aggregate.get_by_id(self.context, aggregate.id)
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result.metadata))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
aggregate = objects.Aggregate(context=self.context)
aggregate.name = 'fake_aggregate'
aggregate.metadata = dict(metadata)
aggregate.create()
aggregate.add_host('host')
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual(metadata, aggregate.metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = objects.Aggregate.get_by_id(self.context, aggregate.id)
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result.metadata))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
aggregate = objects.Aggregate(context=self.context)
aggregate.name = aggr_name
aggregate.metadata = {'availability_zone': aggr_zone,
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: aggr_state,
}
if metadata:
aggregate.metadata.update(metadata)
aggregate.create()
for aggregate_host in hosts:
aggregate.add_host(aggregate_host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionAdd is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
self.assertIn('setup in progress', str(ex))
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate deleted', str(ex))
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate in error', str(ex))
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = list(values.keys())[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
self.api.update_aggregate_metadata(self.context,
aggr.id,
metadata)
for aggregate_host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr.id, aggregate_host)
# let's mock the fact that the aggregate is in error!
expected = self.api.remove_host_from_aggregate(self.context,
aggr.id,
values[fake_zone][0])
self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts))
self.assertEqual(expected.metadata[pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
self.aggr.metadata = metadata
self.aggr.hosts = ['fake_host']
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=self.aggr,
slave_info=None)
self.assertEqual(self.aggr.metadata[pool_states.KEY],
pool_states.ERROR)
self.assertEqual(self.aggr.hosts, ['fake_host'])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, host, aggregate_id, host_param,
slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, host, aggregate_id,
host_param, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.NoDBTestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadata': {
'master_compute': 'master',
pool_states.POOL_FLAG: '',
pool_states.KEY: ''
}
}
fake_aggregate = objects.Aggregate(**fake_aggregate)
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", "slave", jsonutils.to_primitive(self.fake_aggregate),
"master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", "slave", 98, "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
def test_swapping(self):
self.assertEqual(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEqual(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEqual(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(self.conn._vmops, "pre_live_migration") as pre:
pre.return_value = True
result = self.conn.pre_live_migration(
"ctx", "inst", "bdi", "nw", "di", "data")
self.assertTrue(result)
pre.assert_called_with("ctx", "inst", "bdi", "nw", "di", "data")
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = {"name": "name"}
fake_network_info = "network_info"
def fake_fw(instance, network_info):
self.assertEqual(instance, fake_instance)
self.assertEqual(network_info, fake_network_info)
fake_fw.call_count += 1
def fake_create_kernel_and_ramdisk(context, session, instance,
name_label):
return "fake-kernel-file", "fake-ramdisk-file"
fake_fw.call_count = 0
_vmops = self.conn._vmops
self.stubs.Set(_vmops.firewall_driver,
'setup_basic_filtering', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'prepare_instance_filter', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'apply_instance_filter', fake_fw)
self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
fake_create_kernel_and_ramdisk)
def fake_get_vm_opaque_ref(instance):
fake_get_vm_opaque_ref.called = True
self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
fake_get_vm_opaque_ref.called = False
def fake_strip_base_mirror_from_vdis(session, vm_ref):
fake_strip_base_mirror_from_vdis.called = True
self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
fake_strip_base_mirror_from_vdis)
fake_strip_base_mirror_from_vdis.called = False
self.conn.post_live_migration_at_destination(None, fake_instance,
fake_network_info, None)
self.assertEqual(fake_fw.call_count, 3)
self.assertTrue(fake_get_vm_opaque_ref.called)
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'is_volume_backed': False,
'migrate_data': {
'migrate_send_data': {'value': 'fake_migrate_data'},
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
result.is_volume_backed = False
self.assertEqual(expected, result.to_legacy_dict())
def test_check_live_migrate_destination_verifies_ip(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
pif_rec['IPv6'] = ''
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
@classmethod
def fake_generate_vdi_map(cls, destination_sr_ref, _vm_ref):
pass
@classmethod
def fake_get_iscsi_srs(cls, destination_sr_ref, _vm_ref):
return []
@classmethod
def fake_get_vm_opaque_ref(cls, instance):
return "fake_vm"
def fake_lookup_kernel_ramdisk(session, vm):
return ("fake_PV_kernel", "fake_PV_ramdisk")
@classmethod
def fake_generate_vif_map(cls, vif_uuid_map):
return {'vif_ref1': 'dest_net_ref'}
self.stub_out('nova.virt.xenapi.vmops.VMOps._generate_vdi_map',
fake_generate_vdi_map)
self.stub_out('nova.virt.xenapi.vmops.VMOps._get_iscsi_srs',
fake_get_iscsi_srs)
self.stub_out('nova.virt.xenapi.vmops.VMOps._get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stub_out('nova.virt.xenapi.vm_utils.lookup_kernel_ramdisk',
fake_lookup_kernel_ramdisk)
self.stub_out('nova.virt.xenapi.vmops.VMOps._generate_vif_network_map',
fake_generate_vif_map)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True, is_volume_backed=False,
destination_sr_ref=None, migrate_send_data={'key': 'value'})
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_is_xsm_sr_check_relaxed():
return True
self.stubs.Set(self.conn._vmops._session,
'is_xsm_sr_check_relaxed',
fake_is_xsm_sr_check_relaxed)
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True,
is_volume_backed=True,
destination_sr_ref=None,
migrate_send_data={'key': 'value'})
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data.to_legacy_dict(),
result.to_legacy_dict())
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_is_xsm_sr_check_relaxed():
return False
self.stubs.Set(self.conn._vmops._session,
'is_xsm_sr_check_relaxed',
fake_is_xsm_sr_check_relaxed)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True, is_volume_backed=True,
migrate_send_data={'key': 'value'}, destination_sr_ref=None)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
@mock.patch.object(objects.AggregateList, 'get_by_host')
def test_check_can_live_migrate_works(self, mock_get_by_host):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
metadata = {'host': 'test_host_uuid'}
aggregate = objects.Aggregate(metadata=metadata)
aggregate_list = objects.AggregateList(objects=[aggregate])
mock_get_by_host.return_value = aggregate_list
instance = objects.Instance(host='host')
self.conn.check_can_live_migrate_destination(
self.context, instance, None, None)
mock_get_by_host.assert_called_once_with(
self.context, CONF.host, key='hypervisor_pool')
@mock.patch.object(objects.AggregateList, 'get_by_host')
def test_check_can_live_migrate_fails(self, mock_get_by_host):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
metadata = {'dest_other': 'test_host_uuid'}
aggregate = objects.Aggregate(metadata=metadata)
aggregate_list = objects.AggregateList(objects=[aggregate])
mock_get_by_host.return_value = aggregate_list
instance = objects.Instance(host='host')
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, instance, None, None)
mock_get_by_host.assert_called_once_with(
self.context, CONF.host, key='hypervisor_pool')
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=False)
self.conn.live_migration(self.conn, None, None, post_method, None,
None, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname):
recover_method.called = True
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=False)
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method,
None, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=True)
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=True)
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref='foo',
migrate_send_data={'bar': 'baz'},
block_migration=True)
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual({'SOMEDATA': 'SOMEVAL'}, migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
migrate_data = objects.XenapiLiveMigrateData(
migrate_send_data={'SOMEDATA': 'SOMEVAL'},
destination_sr_ref='TARGET_SR_OPAQUE_REF',
block_migration=True)
conn.live_migration(
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=migrate_data)
def test_live_migrate_pool_migration_xapi_call_parameters(self):
class Session(xenapi_fake.SessionBase):
def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
self.assertEqual("fake_ref", host_ref)
self.assertEqual({"live": "true"}, options)
raise IOError()
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def dummy_callback(*args, **kwargs):
pass
migrate_data = objects.XenapiLiveMigrateData(
migrate_send_data={'foo': 'bar'},
destination_sr_ref='foo',
block_migration=False)
self.assertRaises(IOError, conn.live_migration,
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data=migrate_data)
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEqual(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEqual(conn._session, _session)
self.assertEqual(vm_ref, _vm_ref)
self.assertEqual("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEqual({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
@mock.patch.object(vmops.VMOps, "_delete_networks_and_bridges")
def test_rollback_live_migration_at_destination(self, mock_delete_network):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = ["fake_vif1"]
with mock.patch.object(conn, "destroy") as mock_destroy:
conn.rollback_live_migration_at_destination("context",
"instance", network_info, {'block_device_mapping': []})
self.assertFalse(mock_destroy.called)
self.assertTrue(mock_delete_network.called)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
self.called_fake_get_vm_opaque_ref = False
def fake_get_vm_opaque_ref(inst, instance):
self.called_fake_get_vm_opaque_ref = True
if instance["uuid"] == "not_found":
raise exception.NotFound
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_not_found(self):
instance = {'uuid': 'not_found'}
self.conn._vmops.change_instance_metadata(instance, "fake_diff")
self.assertTrue(self.called_fake_get_vm_opaque_ref)
class XenAPIFakeTestCase(test.NoDBTestCase):
def test_query_matches(self):
record = {'a': '1', 'b': '2', 'c_d': '3'}
tests = {'field "a"="1"': True,
'field "b"="2"': True,
'field "b"="4"': False,
'not field "b"="4"': True,
'field "a"="1" and field "b"="4"': False,
'field "a"="1" or field "b"="4"': True,
'field "c__d"="3"': True,
'field \'b\'=\'2\'': True,
}
for query in tests.keys():
expected = tests[query]
fail_msg = "for test '%s'" % query
self.assertEqual(xenapi_fake._query_matches(record, query),
expected, fail_msg)
def test_query_bad_format(self):
record = {'a': '1', 'b': '2', 'c': '3'}
tests = ['"a"="1" or "b"="4"',
'a=1',
]
for query in tests:
fail_msg = "for test '%s'" % query
self.assertFalse(xenapi_fake._query_matches(record, query),
fail_msg)
| 42.70441
| 79
| 0.59866
|
4a027ea0b72263bc588a83c4b35c61e5310a293f
| 1,019
|
py
|
Python
|
tests/structures/heap/binary_heap_test.py
|
TylerYep/workshop
|
69b19afc81c1b84b7f60723077670fb789b55744
|
[
"MIT"
] | 1
|
2021-06-14T01:20:09.000Z
|
2021-06-14T01:20:09.000Z
|
tests/structures/heap/binary_heap_test.py
|
TylerYep/workshop
|
69b19afc81c1b84b7f60723077670fb789b55744
|
[
"MIT"
] | null | null | null |
tests/structures/heap/binary_heap_test.py
|
TylerYep/workshop
|
69b19afc81c1b84b7f60723077670fb789b55744
|
[
"MIT"
] | null | null | null |
from cs.structures import BinaryHeap
class TestBinaryHeap:
@staticmethod
def test_max_heap() -> None:
h = BinaryHeap[int]()
h.enqueue(34)
h.enqueue(31)
h.enqueue(37)
assert h.peek() == 37
assert h.pop() == 37
assert h.pop() == 34
assert h.pop() == 31
@staticmethod
def test_min_heap() -> None:
h = BinaryHeap[int](key=lambda x: -x)
h.enqueue(34)
h.enqueue(31)
h.enqueue(37)
assert h.peek() == 31
assert h.pop() == 31
assert h.pop() == 34
assert h.pop() == 37
h.enqueue(45)
h.enqueue(40)
h.enqueue(50)
assert h.peek() == 40
h.update(50, 10)
assert h.peek() == 10
h.dequeue(10)
assert h.peek() == 40
@staticmethod
def test_repr() -> None:
h = BinaryHeap[int]()
h.enqueue(34)
h.enqueue(1)
h.enqueue(7)
assert repr(h) == str(h) == "BinaryHeap(_heap=[34, 1, 7])"
| 23.697674
| 66
| 0.496565
|
4a0280a167e5e3c14583f6289e09cac634edd1aa
| 6,267
|
py
|
Python
|
protofuzz/tests/test_gen.py
|
Spellchaser/protofuzz
|
831bb0a75e124ba12104e6b4ee055d7580ba9ef3
|
[
"MIT"
] | null | null | null |
protofuzz/tests/test_gen.py
|
Spellchaser/protofuzz
|
831bb0a75e124ba12104e6b4ee055d7580ba9ef3
|
[
"MIT"
] | null | null | null |
protofuzz/tests/test_gen.py
|
Spellchaser/protofuzz
|
831bb0a75e124ba12104e6b4ee055d7580ba9ef3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from protofuzz import gen
import unittest
class TestGenerators(unittest.TestCase):
def test_name(self):
"""Test setting a name"""
name = 'A Name'
generator = gen.IterValueGenerator('name', [])
generator.set_name(name)
self.assertEqual(generator.name(), name)
def test_basic_gen(self):
"""Test a basic generator"""
source_vals = [1, 2, 3, 4]
numbers = gen.IterValueGenerator('iter', source_vals)
produced_vals = []
for x in numbers:
produced_vals.append(x)
self.assertEqual(produced_vals, source_vals)
def test_gen_init(self):
"""Test that we can't get a value from a non-iterated generator"""
values = gen.IterValueGenerator('iter', [1, 2, 3, 4])
with self.assertRaises(RuntimeError):
values.get()
def test_dependent_values(self):
"""Make sure dependent values are correctly resolved"""
def is_even(x):
return x % 2 == 0
values = gen.IterValueGenerator('name', [1, 2, 3, 4])
dependent = gen.DependentValueGenerator('depends', target=values,
action=is_even)
for x in values:
generated_val, generated_dependency = values.get(), dependent.get()
self.assertEqual(generated_dependency, is_even(generated_val))
def test_repeated_gets(self):
"""Make sure that calling get() twice on a generator does not advance it"""
def plus_one(x):
return x + 1
values = gen.IterValueGenerator('name', [1, 2, 3, 4])
dependent = gen.DependentValueGenerator('dependent', target=values,
action=plus_one)
# Request an actual item
next(iter(values))
values.get()
first = dependent.get()
second = dependent.get()
self.assertEqual(first, second)
def test_permuted_generators(self):
"""Test basic Product() permuter"""
values1 = gen.IterValueGenerator('a', [1, 2])
values2 = gen.IterValueGenerator('b', [1, 2])
produced_vals = []
for x in gen.Product('name', values1, values2):
x = tuple(map(lambda e: e[1], x))
produced_vals.append(x)
self.assertEqual(produced_vals, [(1, 1), (1, 2), (2, 1), (2, 2)])
def test_permuted_generators_with_dependent_values(self):
"""Test that Product permuter works with dependent values"""
def is_even(x):
return x % 2 == 0
values1 = gen.IterValueGenerator('a', [1, 2, 3])
values2 = gen.IterValueGenerator('b', [1, 2, 3])
values3 = gen.IterValueGenerator('c', [1, 2, 3])
dependent = gen.DependentValueGenerator('v1', target=values1,
action=is_even)
for x in gen.Product('name', values1, values2, values3, dependent):
v1, v2, v3, dep = x
self.assertEqual(is_even(values1.get()), dependent.get())
def test_permuted_generators_with_via_make_dep(self):
"""Test creation of dependencies via Permuter.make_dependent()"""
names = gen.IterValueGenerator('name', ['alice', 'bob'])
lengths = gen.IterValueGenerator('len', ['one', 'two'])
permuter = gen.Zip('Permute', names, lengths)
permuter.make_dependent('len', 'name', len)
for tuples in permuter:
values = dict(tuples)
self.assertEqual(len(values['name']), values['len'])
def test_zip(self):
"""Test a basic Zip permuter"""
source_vals = [1, 2, 3, 4]
vals1 = gen.IterValueGenerator('key', source_vals)
vals2 = gen.IterValueGenerator('val', source_vals)
produced_via_zips = []
for x, y in gen.Zip('name', vals1, vals2):
produced_via_zips.append((x[1], y[1]))
expected = list(zip(source_vals, source_vals))
self.assertEqual(produced_via_zips, expected)
def test_limited_gen(self):
source_vals = list(range(4))
limit = 3
values = gen.IterValueGenerator('name', source_vals)
values.set_limit(limit)
produced_vals = [val for val in values]
self.assertEqual(source_vals[:limit], produced_vals)
def test_limited_zip(self):
"""Test limits on a basic Zip iterator"""
source_vals = [1, 2, 3, 4]
values = gen.IterValueGenerator('name', source_vals)
produced_vals = []
for x in gen.Zip('name', values, limit=len(source_vals)-1):
produced_vals.append(x[0][1])
self.assertEqual(source_vals[:-1], produced_vals)
def test_limited_product(self):
"""Test limits on a Product iterator"""
source_vals = [1, 2, 3, 4]
vals1 = gen.IterValueGenerator('key', source_vals)
vals2 = gen.IterValueGenerator('values', source_vals)
produced_vals = []
for v1, v2 in gen.Product('name', vals1, vals2, limit=4):
produced_vals.append((v1[1], v2[1]))
self.assertEqual(produced_vals, [(1, 1), (1, 2), (1, 3), (1, 4)])
def test_dual_permuters(self):
"""Test nested permuters"""
source_vals = [1, 2]
vals1 = gen.IterValueGenerator('key', source_vals)
vals2 = gen.IterValueGenerator('val', source_vals)
produced_via_zips = []
produced_via_product = []
for x in gen.Zip('name', vals1):
for y in gen.Zip('name', vals2):
produced_via_zips.append(x+y)
for x in gen.Product('name', vals1, vals2):
produced_via_product.append(x)
self.assertEqual(produced_via_zips, produced_via_product)
def test_make_dependent(self):
source_vals = [1, 2, 3, 4]
vals1 = gen.IterValueGenerator('key', source_vals)
vals2 = gen.IterValueGenerator('values', source_vals)
def increment_by_one(val):
return val + 1
permuter = gen.Zip('test', vals1, vals2)
permuter.make_dependent('key', 'values', increment_by_one)
for values in permuter:
res = dict(values)
self.assertEqual(res['key'], increment_by_one(res['values']))
| 33.875676
| 83
| 0.592788
|
4a0281b4f771b89fe2bcdd9b2f5059f86224bada
| 1,698
|
py
|
Python
|
pydis_site/apps/api/urls.py
|
Hotdogszbg/site
|
8071847742f39258781105bb3cfe19fc8c8c967c
|
[
"MIT"
] | null | null | null |
pydis_site/apps/api/urls.py
|
Hotdogszbg/site
|
8071847742f39258781105bb3cfe19fc8c8c967c
|
[
"MIT"
] | 10
|
2021-03-19T12:46:42.000Z
|
2022-03-12T00:52:11.000Z
|
pydis_site/apps/api/urls.py
|
wookie184/site
|
923cbeae0079b4a542fffda19bf3bce3daf15205
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import HealthcheckView, RulesView
from .viewsets import (
BotSettingViewSet, DeletedMessageViewSet,
DocumentationLinkViewSet, InfractionViewSet,
LogEntryViewSet, NominationViewSet,
OffTopicChannelNameViewSet,
OffensiveMessageViewSet, ReminderViewSet,
RoleViewSet, TagViewSet, UserViewSet
)
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
bot_router = DefaultRouter(trailing_slash=False)
bot_router.register(
'bot-settings',
BotSettingViewSet
)
bot_router.register(
'deleted-messages',
DeletedMessageViewSet
)
bot_router.register(
'documentation-links',
DocumentationLinkViewSet
)
bot_router.register(
'infractions',
InfractionViewSet
)
bot_router.register(
'nominations',
NominationViewSet
)
bot_router.register(
'offensive-messages',
OffensiveMessageViewSet
)
bot_router.register(
'off-topic-channel-names',
OffTopicChannelNameViewSet,
base_name='offtopicchannelname'
)
bot_router.register(
'reminders',
ReminderViewSet
)
bot_router.register(
'roles',
RoleViewSet
)
bot_router.register(
'tags',
TagViewSet
)
bot_router.register(
'users',
UserViewSet
)
app_name = 'api'
urlpatterns = (
# Build URLs using something like...
#
# from django_hosts.resolvers import reverse
path('bot/', include((bot_router.urls, 'api'), namespace='bot')),
path('logs', LogEntryViewSet.as_view({'post': 'create'}), name='logs'),
path('healthcheck', HealthcheckView.as_view(), name='healthcheck'),
path('rules', RulesView.as_view(), name='rules')
)
| 23.260274
| 75
| 0.733804
|
4a02825cf3a85eb7c833d5f7ae8b5c012fc65b53
| 2,651
|
py
|
Python
|
official/nlp/tasks/utils.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
official/nlp/tasks/utils.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
official/nlp/tasks/utils.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utils for tasks."""
from typing import Any, Callable
import orbit
import tensorflow as tf
import tensorflow_hub as hub
def get_encoder_from_hub(hub_model_path: str) -> tf.keras.Model:
"""Gets an encoder from hub.
Args:
hub_model_path: The path to the tfhub model.
Returns:
A tf.keras.Model.
"""
input_word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
hub_layer = hub.KerasLayer(hub_model_path, trainable=True)
output_dict = {}
dict_input = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
output_dict = hub_layer(dict_input)
return tf.keras.Model(inputs=dict_input, outputs=output_dict)
def predict(predict_step_fn: Callable[[Any], Any],
aggregate_fn: Callable[[Any, Any], Any], dataset: tf.data.Dataset):
"""Runs prediction.
Args:
predict_step_fn: A callable such as `def predict_step(inputs)`, where
`inputs` are input tensors.
aggregate_fn: A callable such as `def aggregate_fn(state, value)`, where
`value` is the outputs from `predict_step_fn`.
dataset: A `tf.data.Dataset` object.
Returns:
The aggregated predictions.
"""
@tf.function
def predict_step(iterator):
"""Predicts on distributed devices."""
outputs = tf.distribute.get_strategy().run(
predict_step_fn, args=(next(iterator),))
return tf.nest.map_structure(
tf.distribute.get_strategy().experimental_local_results, outputs)
loop_fn = orbit.utils.create_loop_fn(predict_step)
# Set `num_steps` to -1 to exhaust the dataset.
outputs = loop_fn(
iter(dataset), num_steps=-1, state=None, reduce_fn=aggregate_fn) # pytype: disable=wrong-arg-types
return outputs
| 34.428571
| 106
| 0.701622
|
4a02827a5d501fd8f388b5278ebb77819bc41dce
| 25,498
|
py
|
Python
|
pymc3/gp/cov.py
|
ExpectationMax/pymc3
|
7988d0bd023c8ba05a2d97bcbb563a67ed9ed82a
|
[
"Apache-2.0"
] | null | null | null |
pymc3/gp/cov.py
|
ExpectationMax/pymc3
|
7988d0bd023c8ba05a2d97bcbb563a67ed9ed82a
|
[
"Apache-2.0"
] | null | null | null |
pymc3/gp/cov.py
|
ExpectationMax/pymc3
|
7988d0bd023c8ba05a2d97bcbb563a67ed9ed82a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from functools import reduce
from numbers import Number
from operator import add, mul
import aesara
import aesara.tensor as aet
import numpy as np
from aesara.tensor.sharedvar import TensorSharedVariable
from aesara.tensor.var import TensorConstant, TensorVariable
__all__ = [
"Constant",
"WhiteNoise",
"ExpQuad",
"RatQuad",
"Exponential",
"Matern52",
"Matern32",
"Linear",
"Polynomial",
"Cosine",
"Periodic",
"WarpedInput",
"Gibbs",
"Coregion",
"ScaledCov",
"Kron",
]
class Covariance:
r"""
Base class for all kernels/covariance functions.
Parameters
----------
input_dim: integer
The number of input dimensions, or columns of X (or Xs)
the kernel will operate on.
active_dims: List of integers
Indicate which dimension or column of X the covariance
function operates on.
"""
def __init__(self, input_dim, active_dims=None):
self.input_dim = input_dim
if active_dims is None:
self.active_dims = np.arange(input_dim)
else:
self.active_dims = np.asarray(active_dims, np.int)
def __call__(self, X, Xs=None, diag=False):
r"""
Evaluate the kernel/covariance function.
Parameters
----------
X: The training inputs to the kernel.
Xs: The optional prediction set of inputs the kernel.
If Xs is None, Xs = X.
diag: bool
Return only the diagonal of the covariance function.
Default is False.
"""
if diag:
return self.diag(X)
else:
return self.full(X, Xs)
def diag(self, X):
raise NotImplementedError
def full(self, X, Xs):
raise NotImplementedError
def _slice(self, X, Xs):
if self.input_dim != X.shape[-1]:
warnings.warn(
f"Only {self.input_dim} column(s) out of {X.shape[-1]} are"
" being used to compute the covariance function. If this"
" is not intended, increase 'input_dim' parameter to"
" the number of columns to use. Ignore otherwise.",
UserWarning,
)
X = aet.as_tensor_variable(X[:, self.active_dims])
if Xs is not None:
Xs = aet.as_tensor_variable(Xs[:, self.active_dims])
return X, Xs
def __add__(self, other):
return Add([self, other])
def __mul__(self, other):
return Prod([self, other])
def __radd__(self, other):
return self.__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, other):
if (
isinstance(other, aesara.compile.SharedVariable)
and other.get_value().squeeze().shape == ()
):
other = aet.squeeze(other)
return Exponentiated(self, other)
elif isinstance(other, Number):
return Exponentiated(self, other)
elif np.asarray(other).squeeze().shape == ():
other = np.squeeze(other)
return Exponentiated(self, other)
raise ValueError("A covariance function can only be exponentiated by a scalar value")
def __array_wrap__(self, result):
"""
Required to allow radd/rmul by numpy arrays.
"""
result = np.squeeze(result)
if len(result.shape) <= 1:
result = result.reshape(1, 1)
elif len(result.shape) > 2:
raise ValueError(
f"cannot combine a covariance function with array of shape {result.shape}"
)
r, c = result.shape
A = np.zeros((r, c))
for i in range(r):
for j in range(c):
A[i, j] = result[i, j].factor_list[1]
if isinstance(result[0][0], Add):
return result[0][0].factor_list[0] + A
elif isinstance(result[0][0], Prod):
return result[0][0].factor_list[0] * A
else:
raise RuntimeError
class Combination(Covariance):
def __init__(self, factor_list):
input_dim = max(
[factor.input_dim for factor in factor_list if isinstance(factor, Covariance)]
)
super().__init__(input_dim=input_dim)
self.factor_list = []
for factor in factor_list:
if isinstance(factor, self.__class__):
self.factor_list.extend(factor.factor_list)
else:
self.factor_list.append(factor)
def merge_factors(self, X, Xs=None, diag=False):
factor_list = []
for factor in self.factor_list:
# make sure diag=True is handled properly
if isinstance(factor, Covariance):
factor_list.append(factor(X, Xs, diag))
elif isinstance(factor, np.ndarray):
if np.ndim(factor) == 2 and diag:
factor_list.append(np.diag(factor))
else:
factor_list.append(factor)
elif isinstance(
factor,
(
TensorConstant,
TensorVariable,
TensorSharedVariable,
),
):
if factor.ndim == 2 and diag:
factor_list.append(aet.diag(factor))
else:
factor_list.append(factor)
else:
factor_list.append(factor)
return factor_list
class Add(Combination):
def __call__(self, X, Xs=None, diag=False):
return reduce(add, self.merge_factors(X, Xs, diag))
class Prod(Combination):
def __call__(self, X, Xs=None, diag=False):
return reduce(mul, self.merge_factors(X, Xs, diag))
class Exponentiated(Covariance):
def __init__(self, kernel, power):
self.kernel = kernel
self.power = power
super().__init__(input_dim=self.kernel.input_dim, active_dims=self.kernel.active_dims)
def __call__(self, X, Xs=None, diag=False):
return self.kernel(X, Xs, diag=diag) ** self.power
class Kron(Covariance):
r"""Form a covariance object that is the kronecker product of other covariances.
In contrast to standard multiplication, where each covariance is given the
same inputs X and Xs, kronecker product covariances first split the inputs
into their respective spaces (inferred from the input_dim of each object)
before forming their product. Kronecker covariances have a larger
input dimension than any of its factors since the inputs are the
concatenated columns of its components.
Factors must be covariances or their combinations, arrays will not work.
Generally utilized by the `gp.MarginalKron` and gp.LatentKron`
implementations.
"""
def __init__(self, factor_list):
self.input_dims = [factor.input_dim for factor in factor_list]
input_dim = sum(self.input_dims)
super().__init__(input_dim=input_dim)
self.factor_list = factor_list
def _split(self, X, Xs):
indices = np.cumsum(self.input_dims)
X_split = np.hsplit(X, indices)
if Xs is not None:
Xs_split = np.hsplit(Xs, indices)
else:
Xs_split = [None] * len(X_split)
return X_split, Xs_split
def __call__(self, X, Xs=None, diag=False):
X_split, Xs_split = self._split(X, Xs)
covs = [cov(x, xs, diag) for cov, x, xs in zip(self.factor_list, X_split, Xs_split)]
return reduce(mul, covs)
class Constant(Covariance):
r"""
Constant valued covariance function.
.. math::
k(x, x') = c
"""
def __init__(self, c):
super().__init__(1, None)
self.c = c
def diag(self, X):
return aet.alloc(self.c, X.shape[0])
def full(self, X, Xs=None):
if Xs is None:
return aet.alloc(self.c, X.shape[0], X.shape[0])
else:
return aet.alloc(self.c, X.shape[0], Xs.shape[0])
class WhiteNoise(Covariance):
r"""
White noise covariance function.
.. math::
k(x, x') = \sigma^2 \mathrm{I}
"""
def __init__(self, sigma):
super().__init__(1, None)
self.sigma = sigma
def diag(self, X):
return aet.alloc(aet.square(self.sigma), X.shape[0])
def full(self, X, Xs=None):
if Xs is None:
return aet.diag(self.diag(X))
else:
return aet.alloc(0.0, X.shape[0], Xs.shape[0])
class Circular(Covariance):
R"""
Circular Kernel.
.. math::
k_g(x, y) = W_\pi(\operatorname{dist}_{\mathit{geo}}(x, y)),
with
.. math::
W_c(t) = \left(1 + \tau \frac{t}{c}\right)\left(1-\frac{t}{c}\right)^\tau_+
where :math:`c` is maximum value for :math:`t` and :math:`\tau\ge 4`.
:math:`\tau` controls for correlation strength, larger :math:`\tau` leads to less smooth functions
See [1]_ for more explanations and use cases.
Parameters
----------
period : scalar
defines the circular interval :math:`[0, \mathit{bound})`
tau : scalar
:math:`\tau\ge 4` defines correlation strength, the larger,
the smaller correlation is. Minimum value is :math:`4`
References
----------
.. [1] Espéran Padonou, O Roustant, "Polar Gaussian Processes for Predicting on Circular Domains"
https://hal.archives-ouvertes.fr/hal-01119942v1/document
"""
def __init__(self, input_dim, period, tau=4, active_dims=None):
super().__init__(input_dim, active_dims)
self.c = aet.as_tensor_variable(period / 2)
self.tau = tau
def dist(self, X, Xs):
if Xs is None:
Xs = aet.transpose(X)
else:
Xs = aet.transpose(Xs)
return aet.abs_((X - Xs + self.c) % (self.c * 2) - self.c)
def weinland(self, t):
return (1 + self.tau * t / self.c) * aet.clip(1 - t / self.c, 0, np.inf) ** self.tau
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
return self.weinland(self.dist(X, Xs))
def diag(self, X):
return aet.alloc(1.0, X.shape[0])
class Stationary(Covariance):
r"""
Base class for stationary kernels/covariance functions.
Parameters
----------
ls: Lengthscale. If input_dim > 1, a list or array of scalars or PyMC3 random
variables. If input_dim == 1, a scalar or PyMC3 random variable.
ls_inv: Inverse lengthscale. 1 / ls. One of ls or ls_inv must be provided.
"""
def __init__(self, input_dim, ls=None, ls_inv=None, active_dims=None):
super().__init__(input_dim, active_dims)
if (ls is None and ls_inv is None) or (ls is not None and ls_inv is not None):
raise ValueError("Only one of 'ls' or 'ls_inv' must be provided")
elif ls_inv is not None:
if isinstance(ls_inv, (list, tuple)):
ls = 1.0 / np.asarray(ls_inv)
else:
ls = 1.0 / ls_inv
self.ls = aet.as_tensor_variable(ls)
def square_dist(self, X, Xs):
X = aet.mul(X, 1.0 / self.ls)
X2 = aet.sum(aet.square(X), 1)
if Xs is None:
sqd = -2.0 * aet.dot(X, aet.transpose(X)) + (
aet.reshape(X2, (-1, 1)) + aet.reshape(X2, (1, -1))
)
else:
Xs = aet.mul(Xs, 1.0 / self.ls)
Xs2 = aet.sum(aet.square(Xs), 1)
sqd = -2.0 * aet.dot(X, aet.transpose(Xs)) + (
aet.reshape(X2, (-1, 1)) + aet.reshape(Xs2, (1, -1))
)
return aet.clip(sqd, 0.0, np.inf)
def euclidean_dist(self, X, Xs):
r2 = self.square_dist(X, Xs)
return aet.sqrt(r2 + 1e-12)
def diag(self, X):
return aet.alloc(1.0, X.shape[0])
def full(self, X, Xs=None):
raise NotImplementedError
class Periodic(Stationary):
r"""
The Periodic kernel.
.. math::
k(x, x') = \mathrm{exp}\left( -\frac{\mathrm{sin}^2(\pi |x-x'| \frac{1}{T})}{2\ell^2} \right)
Notes
-----
Note that the scaling factor for this kernel is different compared to the more common
definition (see [1]_). Here, 0.5 is in the exponent instead of the more common value, 2.
Divide the length-scale by 2 when initializing the kernel to recover the standard definition.
References
----------
.. [1] David Duvenaud, "The Kernel Cookbook"
https://www.cs.toronto.edu/~duvenaud/cookbook/
"""
def __init__(self, input_dim, period, ls=None, ls_inv=None, active_dims=None):
super().__init__(input_dim, ls, ls_inv, active_dims)
self.period = period
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
if Xs is None:
Xs = X
f1 = X.dimshuffle(0, "x", 1)
f2 = Xs.dimshuffle("x", 0, 1)
r = np.pi * (f1 - f2) / self.period
r = aet.sum(aet.square(aet.sin(r) / self.ls), 2)
return aet.exp(-0.5 * r)
class ExpQuad(Stationary):
r"""
The Exponentiated Quadratic kernel. Also refered to as the Squared
Exponential, or Radial Basis Function kernel.
.. math::
k(x, x') = \mathrm{exp}\left[ -\frac{(x - x')^2}{2 \ell^2} \right]
"""
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
return aet.exp(-0.5 * self.square_dist(X, Xs))
class RatQuad(Stationary):
r"""
The Rational Quadratic kernel.
.. math::
k(x, x') = \left(1 + \frac{(x - x')^2}{2\alpha\ell^2} \right)^{-\alpha}
"""
def __init__(self, input_dim, alpha, ls=None, ls_inv=None, active_dims=None):
super().__init__(input_dim, ls, ls_inv, active_dims)
self.alpha = alpha
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
return aet.power(
(1.0 + 0.5 * self.square_dist(X, Xs) * (1.0 / self.alpha)),
-1.0 * self.alpha,
)
class Matern52(Stationary):
r"""
The Matern kernel with nu = 5/2.
.. math::
k(x, x') = \left(1 + \frac{\sqrt{5(x - x')^2}}{\ell} +
\frac{5(x-x')^2}{3\ell^2}\right)
\mathrm{exp}\left[ - \frac{\sqrt{5(x - x')^2}}{\ell} \right]
"""
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
r = self.euclidean_dist(X, Xs)
return (1.0 + np.sqrt(5.0) * r + 5.0 / 3.0 * aet.square(r)) * aet.exp(
-1.0 * np.sqrt(5.0) * r
)
class Matern32(Stationary):
r"""
The Matern kernel with nu = 3/2.
.. math::
k(x, x') = \left(1 + \frac{\sqrt{3(x - x')^2}}{\ell}\right)
\mathrm{exp}\left[ - \frac{\sqrt{3(x - x')^2}}{\ell} \right]
"""
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
r = self.euclidean_dist(X, Xs)
return (1.0 + np.sqrt(3.0) * r) * aet.exp(-np.sqrt(3.0) * r)
class Matern12(Stationary):
r"""
The Matern kernel with nu = 1/2
k(x, x') = \mathrm{exp}\left[ -\frac{(x - x')^2}{\ell} \right]
"""
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
r = self.euclidean_dist(X, Xs)
return aet.exp(-r)
class Exponential(Stationary):
r"""
The Exponential kernel.
.. math::
k(x, x') = \mathrm{exp}\left[ -\frac{||x - x'||}{2\ell^2} \right]
"""
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
return aet.exp(-0.5 * self.euclidean_dist(X, Xs))
class Cosine(Stationary):
r"""
The Cosine kernel.
.. math::
k(x, x') = \mathrm{cos}\left( 2 \pi \frac{||x - x'||}{ \ell^2} \right)
"""
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
return aet.cos(2.0 * np.pi * self.euclidean_dist(X, Xs))
class Linear(Covariance):
r"""
The Linear kernel.
.. math::
k(x, x') = (x - c)(x' - c)
"""
def __init__(self, input_dim, c, active_dims=None):
super().__init__(input_dim, active_dims)
self.c = c
def _common(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
Xc = aet.sub(X, self.c)
return X, Xc, Xs
def full(self, X, Xs=None):
X, Xc, Xs = self._common(X, Xs)
if Xs is None:
return aet.dot(Xc, aet.transpose(Xc))
else:
Xsc = aet.sub(Xs, self.c)
return aet.dot(Xc, aet.transpose(Xsc))
def diag(self, X):
X, Xc, _ = self._common(X, None)
return aet.sum(aet.square(Xc), 1)
class Polynomial(Linear):
r"""
The Polynomial kernel.
.. math::
k(x, x') = [(x - c)(x' - c) + \mathrm{offset}]^{d}
"""
def __init__(self, input_dim, c, d, offset, active_dims=None):
super().__init__(input_dim, c, active_dims)
self.d = d
self.offset = offset
def full(self, X, Xs=None):
linear = super().full(X, Xs)
return aet.power(linear + self.offset, self.d)
def diag(self, X):
linear = super().diag(X)
return aet.power(linear + self.offset, self.d)
class WarpedInput(Covariance):
r"""
Warp the inputs of any kernel using an arbitrary function
defined using Aesara.
.. math::
k(x, x') = k(w(x), w(x'))
Parameters
----------
cov_func: Covariance
warp_func: callable
Aesara function of X and additional optional arguments.
args: optional, tuple or list of scalars or PyMC3 variables
Additional inputs (besides X or Xs) to warp_func.
"""
def __init__(self, input_dim, cov_func, warp_func, args=None, active_dims=None):
super().__init__(input_dim, active_dims)
if not callable(warp_func):
raise TypeError("warp_func must be callable")
if not isinstance(cov_func, Covariance):
raise TypeError("Must be or inherit from the Covariance class")
self.w = handle_args(warp_func, args)
self.args = args
self.cov_func = cov_func
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
if Xs is None:
return self.cov_func(self.w(X, self.args), Xs)
else:
return self.cov_func(self.w(X, self.args), self.w(Xs, self.args))
def diag(self, X):
X, _ = self._slice(X, None)
return self.cov_func(self.w(X, self.args), diag=True)
class Gibbs(Covariance):
r"""
The Gibbs kernel. Use an arbitrary lengthscale function defined
using Aesara. Only tested in one dimension.
.. math::
k(x, x') = \sqrt{\frac{2\ell(x)\ell(x')}{\ell^2(x) + \ell^2(x')}}
\mathrm{exp}\left[ -\frac{(x - x')^2}
{\ell^2(x) + \ell^2(x')} \right]
Parameters
----------
lengthscale_func: callable
Aesara function of X and additional optional arguments.
args: optional, tuple or list of scalars or PyMC3 variables
Additional inputs (besides X or Xs) to lengthscale_func.
"""
def __init__(self, input_dim, lengthscale_func, args=None, active_dims=None):
super().__init__(input_dim, active_dims)
if active_dims is not None:
if len(active_dims) > 1:
raise NotImplementedError(("Higher dimensional inputs ", "are untested"))
else:
if input_dim != 1:
raise NotImplementedError(("Higher dimensional inputs ", "are untested"))
if not callable(lengthscale_func):
raise TypeError("lengthscale_func must be callable")
self.lfunc = handle_args(lengthscale_func, args)
self.args = args
def square_dist(self, X, Xs=None):
X2 = aet.sum(aet.square(X), 1)
if Xs is None:
sqd = -2.0 * aet.dot(X, aet.transpose(X)) + (
aet.reshape(X2, (-1, 1)) + aet.reshape(X2, (1, -1))
)
else:
Xs2 = aet.sum(aet.square(Xs), 1)
sqd = -2.0 * aet.dot(X, aet.transpose(Xs)) + (
aet.reshape(X2, (-1, 1)) + aet.reshape(Xs2, (1, -1))
)
return aet.clip(sqd, 0.0, np.inf)
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
rx = self.lfunc(aet.as_tensor_variable(X), self.args)
if Xs is None:
rz = self.lfunc(aet.as_tensor_variable(X), self.args)
r2 = self.square_dist(X, X)
else:
rz = self.lfunc(aet.as_tensor_variable(Xs), self.args)
r2 = self.square_dist(X, Xs)
rx2 = aet.reshape(aet.square(rx), (-1, 1))
rz2 = aet.reshape(aet.square(rz), (1, -1))
return aet.sqrt((2.0 * aet.outer(rx, rz)) / (rx2 + rz2)) * aet.exp(-1.0 * r2 / (rx2 + rz2))
def diag(self, X):
return aet.alloc(1.0, X.shape[0])
class ScaledCov(Covariance):
r"""
Construct a kernel by multiplying a base kernel with a scaling
function defined using Aesara. The scaling function is
non-negative, and can be parameterized.
.. math::
k(x, x') = \phi(x) k_{\text{base}}(x, x') \phi(x')
Parameters
----------
cov_func: Covariance
Base kernel or covariance function
scaling_func: callable
Aesara function of X and additional optional arguments.
args: optional, tuple or list of scalars or PyMC3 variables
Additional inputs (besides X or Xs) to lengthscale_func.
"""
def __init__(self, input_dim, cov_func, scaling_func, args=None, active_dims=None):
super().__init__(input_dim, active_dims)
if not callable(scaling_func):
raise TypeError("scaling_func must be callable")
if not isinstance(cov_func, Covariance):
raise TypeError("Must be or inherit from the Covariance class")
self.cov_func = cov_func
self.scaling_func = handle_args(scaling_func, args)
self.args = args
def diag(self, X):
X, _ = self._slice(X, None)
cov_diag = self.cov_func(X, diag=True)
scf_diag = aet.square(aet.flatten(self.scaling_func(X, self.args)))
return cov_diag * scf_diag
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
scf_x = self.scaling_func(X, self.args)
if Xs is None:
return aet.outer(scf_x, scf_x) * self.cov_func(X)
else:
scf_xs = self.scaling_func(Xs, self.args)
return aet.outer(scf_x, scf_xs) * self.cov_func(X, Xs)
class Coregion(Covariance):
r"""Covariance function for intrinsic/linear coregionalization models.
Adapted from GPy http://gpy.readthedocs.io/en/deploy/GPy.kern.src.html#GPy.kern.src.coregionalize.Coregionalize.
This covariance has the form:
.. math::
\mathbf{B} = \mathbf{W}\mathbf{W}^\top + \text{diag}(\kappa)
and calls must use integers associated with the index of the matrix.
This allows the api to remain consistent with other covariance objects:
.. math::
k(x, x') = \mathbf{B}[x, x'^\top]
Parameters
----------
W: 2D array of shape (num_outputs, rank)
a low rank matrix that determines the correlations between
the different outputs (rows)
kappa: 1D array of shape (num_outputs, )
a vector which allows the outputs to behave independently
B: 2D array of shape (num_outputs, rank)
the total matrix, exactly one of (W, kappa) and B must be provided
Notes
-----
Exactly one dimension must be active for this kernel. Thus, if
`input_dim != 1`, then `active_dims` must have a length of one.
"""
def __init__(self, input_dim, W=None, kappa=None, B=None, active_dims=None):
super().__init__(input_dim, active_dims)
if len(self.active_dims) != 1:
raise ValueError("Coregion requires exactly one dimension to be active")
make_B = W is not None or kappa is not None
if make_B and B is not None:
raise ValueError("Exactly one of (W, kappa) and B must be provided to Coregion")
if make_B:
self.W = aet.as_tensor_variable(W)
self.kappa = aet.as_tensor_variable(kappa)
self.B = aet.dot(self.W, self.W.T) + aet.diag(self.kappa)
elif B is not None:
self.B = aet.as_tensor_variable(B)
else:
raise ValueError("Exactly one of (W, kappa) and B must be provided to Coregion")
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
index = aet.cast(X, "int32")
if Xs is None:
index2 = index.T
else:
index2 = aet.cast(Xs, "int32").T
return self.B[index, index2]
def diag(self, X):
X, _ = self._slice(X, None)
index = aet.cast(X, "int32")
return aet.diag(self.B)[index.ravel()]
def handle_args(func, args):
def f(x, args):
if args is None:
return func(x)
else:
if not isinstance(args, tuple):
args = (args,)
return func(x, *args)
return f
| 31.057247
| 116
| 0.577418
|
4a0282be2e7a880e598508d913997c3732f833be
| 11,702
|
py
|
Python
|
Blender 2.91/Data/FmdlSplitVertexEncoding.py
|
Hazuki-san/PES_Face_Hair_Modifier
|
e1daa4a6dfb6a9d7158582e6ca57c8fa3b23bdcc
|
[
"MIT"
] | 3
|
2020-12-23T14:43:03.000Z
|
2022-02-06T03:38:48.000Z
|
Blender 2.91/Data/FmdlSplitVertexEncoding.py
|
Hazuki-san/PES_Face_Hair_Modifier
|
e1daa4a6dfb6a9d7158582e6ca57c8fa3b23bdcc
|
[
"MIT"
] | 2
|
2021-01-22T19:13:06.000Z
|
2022-02-10T15:55:49.000Z
|
Blender 2.91/Data/FmdlSplitVertexEncoding.py
|
Hazuki-san/PES_Face_Hair_Modifier
|
e1daa4a6dfb6a9d7158582e6ca57c8fa3b23bdcc
|
[
"MIT"
] | 2
|
2021-01-14T13:51:43.000Z
|
2021-03-28T16:52:01.000Z
|
from . import FmdlFile
#
# FMDL files store mesh geometry as vertices, and faces that are sequences of
# vertices.
#
# Blender, and many other mesh editors, support a richer notion of geometry,
# consisting of vertices, faces, and loops. A vertex is a combination of a
# position and position transformation behavior (in FMDL context: vertex
# position, and bone mapping), and a loop is a vertex as it occurs in a
# particular face, adding rendering information such as normals, tangents,
# colors, and UV coordinates. Faces are a sequence of loops, not a sequence of
# vertices.
#
# If a vertex is rendered in different ways in different faces it occurs in,
# the vertex/face/loop geometry expresses this as a single vertex with multiple
# different loops; the vertex/face geometry would express multiple different
# vertices with identical position and transformation behavior instead. The
# advantage of vertex/face/loop geometry while editing a mesh is that the
# different loops making up a vertex can be edited as a unit, being able to be
# moved and transformed and such as a unit; whereas the vertex/face geometry
# would make it easy to accidentally move only one loop of a vertex, and create
# an inconsistent geometry thereby.
#
# The FMDL format natively stores vertex/face geometry only. This module
# implements a nonstandard FMDL encoding, compatible with native FMDL, that can
# preserve the vertex/loop relation. This information is encoded in the order
# of different vertices in a mesh.
#
# The encoding is defined as follows:
#
# Two vertices in a mesh of an FMDL file are considered *topologically
# equivalent* if they have the same vertex position, bone indices and bone
# weights. The values are compared as byte sequences as they are stored in the
# FMDL vertex buffer.
#
# The *nontopological encoding* of a vertex in an FMDL file is a summary of all
# vertex data stored in the FMDL file other than the three values that determine
# topological equivalence. It consists of the concatenation of the byte
# sequences encoding these vertex data points, in order of increasing data point
# type enum. For currently known FMDL versions, those are (in order) the vertex
# normal, color, UV coordinates in increasing order, and tangent.
#
# If vertices X and Y are topologically equivalent, X is considered less than Y,
# denoted X < Y, if the nontopological encoding of X is strictly less than the
# nontopological order of Y, in lexicographical order. X < Y is not defined when
# X and Y are not topologically equivalent.
#
# The vertex/loop relation is encoded as follows: two vertices X and Y in a mesh
# of an FMDL file are loops of the same vertex if and only if:
# - X and Y are topolotically equivalent; and
# - X < Y; and
# - vertex Y comes immediately after vertex X in the vertex buffer. That is, the
# vertex index of Y is 1 + the vertex index of X.
#
# A mesh consisting of vertex/face/loop geometry can be stored using this
# encoding by first making a list of all vertices and loops in the geometry,
# and computing the FMDL bytewise encoding of each loop; collapsing identical
# loops for a vertex to a single loop; ordering the different loops of a vertex
# in order of increasing nontopological encoding; ordering different vertices
# that happen to be topologically equivalent in order of nonincreasing
# topological encoding, to avoid accidentally combining distinct vertices;
# and then adding these vertices to the FMDL vertex buffer in such a way that
# the loops making up a vertex make a contiguous block respecting this ordering.
#
# FMDL files whose vertex buffers implement vertex/loop structure are marked by
# the `X-FMDL-Extensions: vertex-loop-preservation` extension header.
#
def topologicalKey(encodedVertex, vertexFields):
if vertexFields.hasBoneMapping:
return (encodedVertex.position, tuple(encodedVertex.boneMapping))
else:
return encodedVertex.position
def nontopologicalEncoding(encodedVertex, vertexFields):
encoding = bytearray()
if vertexFields.hasNormal:
encoding += encodedVertex.normal
if vertexFields.hasColor:
encoding += encodedVertex.color
for i in range(4):
if vertexFields.uvCount > i:
encoding += encodedVertex.uv[i]
if vertexFields.hasTangent:
encoding += encodedVertex.tangent
return bytes(encoding)
def replaceFaceVertices(faces, replacedVertices):
return [
FmdlFile.FmdlFile.Face(*[
(replacedVertices[vertex] if vertex in replacedVertices else vertex) for vertex in face.vertices
]) for face in faces
]
#
# Consider all FMDL vertices to be loops of the same vertex when they share a
# position object pointer.
#
def encodeMeshVertexLoopPreservation(mesh):
#
# Map from topological keys to lists of position objects
#
topologicallyEquivalentVertices = {}
#
# Map from position objects to lists of encoded vertices
#
splitVertices = {}
for encodedVertex in mesh.vertexEncoding:
key = topologicalKey(encodedVertex, mesh.vertexFields)
if encodedVertex.vertex.position not in splitVertices:
splitVertices[encodedVertex.vertex.position] = []
if key not in topologicallyEquivalentVertices:
topologicallyEquivalentVertices[key] = []
topologicallyEquivalentVertices[key].append(encodedVertex.vertex.position)
splitVertices[encodedVertex.vertex.position].append(encodedVertex)
#
# Sort splitVertices by nontopological encoding, and remove duplicates.
#
replacedVertices = {}
for key in splitVertices:
loops = {}
for encodedVertex in splitVertices[key]:
encoding = nontopologicalEncoding(encodedVertex, mesh.vertexFields)
if encoding in loops:
replacedVertices[encodedVertex.vertex] = loops[encoding].vertex
else:
loops[encoding] = encodedVertex
splitVertices[key] = [loops[encoding] for encoding in sorted(loops.keys())]
#
# Sort topologicallyEquivalentVertices by nontopological encoding of
# the first element, in descending order.
#
for (key, positions) in topologicallyEquivalentVertices.items():
topologicallyEquivalentVertices[key] = sorted(positions, reverse = True, key = (
lambda position : nontopologicalEncoding(splitVertices[position][0], mesh.vertexFields)
))
encodedVertices = []
addedTopologicalKeys = set()
for encodedVertex in mesh.vertexEncoding:
key = topologicalKey(encodedVertex, mesh.vertexFields)
if key not in addedTopologicalKeys:
addedTopologicalKeys.add(key)
for position in topologicallyEquivalentVertices[key]:
encodedVertices += splitVertices[position]
output = FmdlFile.FmdlFile.Mesh()
output.boneGroup = mesh.boneGroup
output.materialInstance = mesh.materialInstance
output.alphaEnum = mesh.alphaEnum
output.shadowEnum = mesh.shadowEnum
output.vertexFields = mesh.vertexFields
output.vertices = [encodedVertex.vertex for encodedVertex in encodedVertices]
output.faces = replaceFaceVertices(mesh.faces, replacedVertices)
output.vertexEncoding = encodedVertices
output.extensionHeaders = mesh.extensionHeaders.copy()
return output
def encodeFmdlVertexLoopPreservation(fmdl):
fmdl.precomputeVertexEncoding()
output = FmdlFile.FmdlFile()
output.bones = fmdl.bones
output.materialInstances = fmdl.materialInstances
output.meshes = []
meshMap = {}
for mesh in fmdl.meshes:
encodedMesh = encodeMeshVertexLoopPreservation(mesh)
output.meshes.append(encodedMesh)
meshMap[mesh] = encodedMesh
output.meshGroups = []
meshGroupMap = {}
for meshGroup in fmdl.meshGroups:
encodedMeshGroup = FmdlFile.FmdlFile.MeshGroup()
output.meshGroups.append(encodedMeshGroup)
meshGroupMap[meshGroup] = encodedMeshGroup
for meshGroup in fmdl.meshGroups:
encodedMeshGroup = meshGroupMap[meshGroup]
encodedMeshGroup.name = meshGroup.name
encodedMeshGroup.boundingBox = meshGroup.boundingBox
encodedMeshGroup.visible = meshGroup.visible
if meshGroup.parent == None:
encodedMeshGroup.parent = None
else:
encodedMeshGroup.parent = meshGroupMap[meshGroup.parent]
encodedMeshGroup.children = []
for child in meshGroup.children:
encodedMeshGroup.children.append(meshGroupMap[child])
encodedMeshGroup.meshes = []
for mesh in meshGroup.meshes:
encodedMeshGroup.meshes.append(meshMap[mesh])
output.extensionHeaders = {}
for (key, value) in fmdl.extensionHeaders.items():
output.extensionHeaders[key] = value[:]
if 'X-FMDL-Extensions' not in output.extensionHeaders:
output.extensionHeaders['X-FMDL-Extensions'] = []
output.extensionHeaders['X-FMDL-Extensions'].append("vertex-loop-preservation")
return output
def decodeMeshVertexLoopPreservation(mesh):
vertexEncoding = []
vertices = []
replacedVertices = {}
previousEncodedVertex = None
for encodedVertex in mesh.vertexEncoding:
if (
previousEncodedVertex != None
and topologicalKey(encodedVertex, mesh.vertexFields)
== topologicalKey(previousEncodedVertex, mesh.vertexFields)
and nontopologicalEncoding(previousEncodedVertex, mesh.vertexFields)
< nontopologicalEncoding(encodedVertex, mesh.vertexFields)
):
vertex = FmdlFile.FmdlFile.Vertex()
vertex.position = previousEncodedVertex.vertex.position
vertex.normal = encodedVertex.vertex.normal
vertex.tangent = encodedVertex.vertex.tangent
vertex.color = encodedVertex.vertex.color
vertex.boneMapping = previousEncodedVertex.vertex.boneMapping
vertex.uv = encodedVertex.vertex.uv[:]
encoding = FmdlFile.FmdlFile.VertexEncoding()
encoding.vertex = vertex
encoding.position = encodedVertex.position
encoding.normal = encodedVertex.normal
encoding.tangent = encodedVertex.tangent
encoding.color = encodedVertex.color
encoding.boneMapping = encodedVertex.boneMapping
encoding.uv = encodedVertex.uv[:]
vertexEncoding.append(encoding)
vertices.append(vertex)
replacedVertices[encodedVertex.vertex] = vertex
previousEncodedVertex = encoding
else:
vertexEncoding.append(encodedVertex)
vertices.append(encodedVertex.vertex)
previousEncodedVertex = encodedVertex
output = FmdlFile.FmdlFile.Mesh()
output.boneGroup = mesh.boneGroup
output.materialInstance = mesh.materialInstance
output.alphaEnum = mesh.alphaEnum
output.shadowEnum = mesh.shadowEnum
output.vertexFields = mesh.vertexFields
output.vertices = vertices
output.faces = replaceFaceVertices(mesh.faces, replacedVertices)
output.vertexEncoding = vertexEncoding
output.extensionHeaders = mesh.extensionHeaders.copy()
return output
def decodeFmdlVertexLoopPreservation(fmdl):
if fmdl.extensionHeaders == None or "vertex-loop-preservation" not in fmdl.extensionHeaders['x-fmdl-extensions']:
return fmdl
output = FmdlFile.FmdlFile()
output.bones = fmdl.bones
output.materialInstances = fmdl.materialInstances
output.meshes = []
meshMap = {}
for mesh in fmdl.meshes:
encodedMesh = decodeMeshVertexLoopPreservation(mesh)
output.meshes.append(encodedMesh)
meshMap[mesh] = encodedMesh
output.meshGroups = []
meshGroupMap = {}
for meshGroup in fmdl.meshGroups:
encodedMeshGroup = FmdlFile.FmdlFile.MeshGroup()
output.meshGroups.append(encodedMeshGroup)
meshGroupMap[meshGroup] = encodedMeshGroup
for meshGroup in fmdl.meshGroups:
encodedMeshGroup = meshGroupMap[meshGroup]
encodedMeshGroup.name = meshGroup.name
encodedMeshGroup.boundingBox = meshGroup.boundingBox
encodedMeshGroup.visible = meshGroup.visible
if meshGroup.parent == None:
encodedMeshGroup.parent = None
else:
encodedMeshGroup.parent = meshGroupMap[meshGroup.parent]
encodedMeshGroup.children = []
for child in meshGroup.children:
encodedMeshGroup.children.append(meshGroupMap[child])
encodedMeshGroup.meshes = []
for mesh in meshGroup.meshes:
encodedMeshGroup.meshes.append(meshMap[mesh])
return output
| 39.268456
| 114
| 0.779696
|
4a0282d22aa92a1c45f2e403ece20801fd82d1cc
| 344
|
py
|
Python
|
01_Language/01_Functions/python/date_offset_get.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 3
|
2020-06-28T07:42:51.000Z
|
2021-01-15T10:32:11.000Z
|
01_Language/01_Functions/python/date_offset_get.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 9
|
2021-03-10T22:45:40.000Z
|
2022-02-27T06:53:20.000Z
|
01_Language/01_Functions/python/date_offset_get.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 1
|
2021-01-15T10:51:24.000Z
|
2021-01-15T10:51:24.000Z
|
# coding: utf-8
import pytz
import datetime
if __name__ == '__main__':
tz = pytz.timezone("America/New_York")
winter = datetime.datetime.strptime("2010-12-21", "%Y-%m-%d")
summer = datetime.datetime.strptime("2008-06-21", "%Y-%m-%d")
print(tz.utcoffset(winter).total_seconds())
print(tz.utcoffset(summer).total_seconds())
| 28.666667
| 65
| 0.677326
|
4a02845fa4b7814f64a526adb31c98ca979fa391
| 1,358
|
py
|
Python
|
galleryViewer/urls.py
|
amirhossein-bayati/photo-gallery
|
01c18bd139afd1e6fd496a1ebff547daec95b156
|
[
"MIT"
] | 1
|
2021-10-10T07:09:44.000Z
|
2021-10-10T07:09:44.000Z
|
galleryViewer/urls.py
|
amirhossein-bayati/photo-gallery
|
01c18bd139afd1e6fd496a1ebff547daec95b156
|
[
"MIT"
] | null | null | null |
galleryViewer/urls.py
|
amirhossein-bayati/photo-gallery
|
01c18bd139afd1e6fd496a1ebff547daec95b156
|
[
"MIT"
] | null | null | null |
"""galleryViewer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from blog import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
sitemaps = {
'posts': PostSitemap,
}
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls', namespace='blog')),
path('account/', include('Account.urls', namespace='account')),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 33.95
| 103
| 0.705449
|
4a0284b1c4922386303716b34de187705e0290f3
| 14,425
|
py
|
Python
|
vunit/sim_if/rivierapro.py
|
solsjo/vunit
|
0e4dbbe3b59c93eecdef6cd7121ca300e21ee222
|
[
"Artistic-2.0",
"Apache-2.0"
] | null | null | null |
vunit/sim_if/rivierapro.py
|
solsjo/vunit
|
0e4dbbe3b59c93eecdef6cd7121ca300e21ee222
|
[
"Artistic-2.0",
"Apache-2.0"
] | null | null | null |
vunit/sim_if/rivierapro.py
|
solsjo/vunit
|
0e4dbbe3b59c93eecdef6cd7121ca300e21ee222
|
[
"Artistic-2.0",
"Apache-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2021, Lars Asplund lars.anders.asplund@gmail.com
"""
Interface towards Aldec Riviera Pro
"""
from pathlib import Path
import os
import re
import logging
from ..exceptions import CompileError
from ..ostools import Process, file_exists
from ..vhdl_standard import VHDL
from . import SimulatorInterface, ListOfStringOption, StringOption
from .vsim_simulator_mixin import VsimSimulatorMixin, fix_path
LOGGER = logging.getLogger(__name__)
class RivieraProInterface(VsimSimulatorMixin, SimulatorInterface):
"""
Riviera Pro interface
"""
name = "rivierapro"
supports_gui_flag = True
package_users_depend_on_bodies = True
compile_options = [
ListOfStringOption("rivierapro.vcom_flags"),
ListOfStringOption("rivierapro.vlog_flags"),
]
sim_options = [
ListOfStringOption("rivierapro.vsim_flags"),
ListOfStringOption("rivierapro.vsim_flags.gui"),
ListOfStringOption("rivierapro.init_files.after_load"),
ListOfStringOption("rivierapro.init_files.before_run"),
StringOption("rivierapro.init_file.gui"),
]
@classmethod
def from_args(cls, args, output_path, elaborate_only=False, precompiled=None, **kwargs):
"""
Create new instance from command line arguments object
"""
persistent = not (args.unique_sim or args.gui)
return cls(
prefix=cls.find_prefix(),
output_path=output_path,
persistent=persistent,
gui=args.gui,
elaborate_only=elaborate_only,
precompiled=precompiled,
)
@classmethod
def find_prefix_from_path(cls):
"""
Find RivieraPro toolchain.
Must have vsim and vsimsa binaries but no avhdl.exe
"""
def no_avhdl(path):
return not file_exists(str(Path(path) / "avhdl.exe"))
return cls.find_toolchain(["vsim", "vsimsa"], constraints=[no_avhdl])
@classmethod
def _get_version(cls):
"""
Return a VersionConsumer object containing the simulator version.
"""
proc = Process([str(Path(cls.find_prefix()) / "vcom"), "-version"], env=cls.get_env())
consumer = VersionConsumer()
proc.consume_output(consumer)
return consumer
@classmethod
def get_osvvm_coverage_api(cls):
"""
Returns simulator name when OSVVM coverage API is supported, None otherwise.
"""
version = cls._get_version()
if version.year is not None:
if (version.year == 2016 and version.month >= 10) or (version.year > 2016):
return cls.name
return None
@classmethod
def supports_vhdl_package_generics(cls):
"""
Returns True when this simulator supports VHDL package generics
"""
return True
@staticmethod
def supports_coverage():
"""
Returns True when the simulator supports coverage
"""
return True
def __init__( # pylint: disable=too-many-arguments
self,
prefix,
output_path,
persistent=False,
gui=False,
elaborate_only=False,
precompiled=None,
):
SimulatorInterface.__init__(self, output_path, gui, elaborate_only, precompiled)
VsimSimulatorMixin.__init__(
self,
prefix,
persistent,
sim_cfg_file_name=str(Path(output_path) / "library.cfg"),
elaborate_only=elaborate_only,
precompiled=precompiled,
)
self._create_library_cfg()
self._libraries = []
self._coverage_files = set()
self._version = self._get_version()
def add_simulator_specific(self, project):
"""
Add builtin (global) libraries
"""
built_in_libraries = self._get_mapped_libraries(self._builtin_library_cfg)
for library_name in built_in_libraries:
# A user might shadow a built in library with their own version
if not project.has_library(library_name):
project.add_builtin_library(library_name)
def setup_library_mapping(self, project):
"""
Setup library mapping
"""
mapped_libraries = self._get_mapped_libraries(self._sim_cfg_file_name)
for library in project.get_libraries():
self._libraries.append(library)
self.create_library(library.name, library.directory, mapped_libraries)
def compile_source_file_command(self, source_file):
"""
Returns the command to compile a single source_file
"""
if source_file.is_vhdl:
return self.compile_vhdl_file_command(source_file)
if source_file.is_any_verilog:
return self.compile_verilog_file_command(source_file)
LOGGER.error("Unknown file type: %s", source_file.file_type)
raise CompileError
def _std_str(self, vhdl_standard):
"""
Convert standard to format of Riviera-PRO command line flag
"""
if vhdl_standard == VHDL.STD_2019:
if self._version.year is not None:
if (self._version.year == 2020 and self._version.month < 4) or (self._version.year < 2020):
return "-2018"
return "-2019"
return "-%s" % vhdl_standard
def compile_vhdl_file_command(self, source_file):
"""
Returns the command to compile a VHDL file
"""
return (
[
str(Path(self._prefix) / "vcom"),
"-quiet",
"-j",
str(Path(self._sim_cfg_file_name).parent),
]
+ source_file.compile_options.get("rivierapro.vcom_flags", [])
+ [
self._std_str(source_file.get_vhdl_standard()),
"-work",
source_file.library.name,
source_file.name,
]
)
def compile_verilog_file_command(self, source_file):
"""
Returns the command to compile a Verilog file
"""
args = [
str(Path(self._prefix) / "vlog"),
"-quiet",
"-lc",
self._sim_cfg_file_name,
]
if source_file.is_system_verilog:
args += ["-sv2k12"]
args += source_file.compile_options.get("rivierapro.vlog_flags", [])
args += ["-work", source_file.library.name, source_file.name]
for library in self._libraries:
args += ["-l", library.name]
for include_dir in source_file.include_dirs:
args += ["+incdir+%s" % include_dir]
for key, value in source_file.defines.items():
args += ["+define+%s" % key]
if value:
args[-1] += "=%s" % value
return args
def create_library(self, library_name, path, mapped_libraries=None):
"""
Create and map a library_name to path
"""
mapped_libraries = mapped_libraries if mapped_libraries is not None else {}
apath = str(Path(path).parent.resolve())
if not file_exists(apath):
os.makedirs(apath)
if not file_exists(path):
proc = Process(
[str(Path(self._prefix) / "vlib"), library_name, path],
cwd=str(Path(self._sim_cfg_file_name).parent),
env=self.get_env(),
)
proc.consume_output(callback=None)
if library_name in mapped_libraries and mapped_libraries[library_name] == path:
return
proc = Process(
[str(Path(self._prefix) / "vmap"), library_name, path],
cwd=str(Path(self._sim_cfg_file_name).parent),
env=self.get_env(),
)
proc.consume_output(callback=None)
def _create_library_cfg(self):
"""
Create the library.cfg file if it does not exist
"""
if file_exists(self._sim_cfg_file_name):
return
with Path(self._sim_cfg_file_name).open("w", encoding="utf-8") as ofile:
ofile.write('$INCLUDE = "%s"\n' % self._builtin_library_cfg)
@property
def _builtin_library_cfg(self):
return str(Path(self._prefix).parent / "vlib" / "library.cfg")
_library_re = re.compile(r"([a-zA-Z_0-9]+)\s=\s(.*)")
def _get_mapped_libraries(self, library_cfg_file):
"""
Get mapped libraries by running vlist on the working directory
"""
lines = []
proc = Process([str(Path(self._prefix) / "vlist")], cwd=str(Path(library_cfg_file).parent))
proc.consume_output(callback=lines.append)
libraries = {}
for line in lines:
match = self._library_re.match(line)
if match is None:
continue
key = match.group(1)
value = match.group(2)
libraries[key] = str((Path(library_cfg_file).parent / (Path(value).parent)).resolve())
return libraries
def _create_load_function(self, test_suite_name, config, output_path): # pylint: disable=unused-argument
"""
Create the vunit_load TCL function that runs the vsim command and loads the design
"""
set_generic_str = " ".join(
(
"-g/%s/%s=%s" % (config.entity_name, name, format_generic(value))
for name, value in config.generics.items()
)
)
pli_str = " ".join('-pli "%s"' % fix_path(name) for name in config.sim_options.get("pli", []))
vsim_flags = [
"-dataset {%s}" % fix_path(str(Path(output_path) / "dataset.asdb")),
pli_str,
set_generic_str,
]
if config.sim_options.get("enable_coverage", False):
coverage_file_path = str(Path(output_path) / "coverage.acdb")
self._coverage_files.add(coverage_file_path)
vsim_flags += ["-acdb_file {%s}" % coverage_file_path]
vsim_flags += [self._vsim_extra_args(config)]
if config.sim_options.get("disable_ieee_warnings", False):
vsim_flags.append("-ieee_nowarn")
# Add the the testbench top-level unit last as coverage is
# only collected for the top-level unit specified last
vsim_flags += ["-lib", config.library_name, config.entity_name]
if config.architecture_name is not None:
vsim_flags.append(config.architecture_name)
tcl = """
proc vunit_load {{}} {{
# Make the variable 'aldec' visible; otherwise, the Matlab interface
# is broken because vsim does not find the library aldec_matlab_cosim.
global aldec
# Make the variable 'LICENSE_QUEUE' visible (if set); otherwise vsim
# will not wait for simulation licenses.
global LICENSE_QUEUE
set vsim_failed [catch {{
eval vsim {{{vsim_flags}}}
}}]
if {{${{vsim_failed}}}} {{
return true
}}
if {{[_vunit_source_init_files_after_load]}} {{
return true
}}
vhdlassert.break {break_level}
vhdlassert.break -builtin {break_level}
return false
}}
""".format(
vsim_flags=" ".join(vsim_flags), break_level=config.vhdl_assert_stop_level
)
return tcl
def _vsim_extra_args(self, config):
"""
Determine vsim_extra_args
"""
vsim_extra_args = []
vsim_extra_args = config.sim_options.get("rivierapro.vsim_flags", vsim_extra_args)
if self._gui:
vsim_extra_args = config.sim_options.get("rivierapro.vsim_flags.gui", vsim_extra_args)
return " ".join(vsim_extra_args)
@staticmethod
def _create_run_function():
"""
Create the vunit_run function to run the test bench
"""
return """
proc _vunit_run_failure {} {
catch {
# tb command can fail when error comes from pli
echo "Stack trace result from 'bt' command"
bt
}
}
proc _vunit_run {} {
if {[_vunit_source_init_files_before_run]} {
return true
}
proc on_break {} {
resume
}
onbreak {on_break}
run -all
}
proc _vunit_sim_restart {} {
restart
}
"""
def merge_coverage(self, file_name, args=None):
"""
Merge coverage from all test cases,
"""
if self._persistent_shell is not None:
# Teardown to ensure acdb file was written.
self._persistent_shell.teardown()
merge_command = "acdb merge"
for coverage_file in self._coverage_files:
if file_exists(coverage_file):
merge_command += " -i {%s}" % coverage_file.replace("\\", "/")
else:
LOGGER.warning("Missing coverage file: %s", coverage_file)
if args is not None:
merge_command += " " + " ".join("{%s}" % arg for arg in args)
merge_command += " -o {%s}" % file_name.replace("\\", "/")
merge_script_name = Path(self._output_path) / "acdb_merge.tcl"
with merge_script_name.open("w", encoding="utf-8") as fptr:
fptr.write(merge_command + "\n")
vcover_cmd = [
str(Path(self._prefix) / "vsim"),
"-c",
"-do",
"source {%s}; quit;" % str(merge_script_name).replace("\\", "/"),
]
print("Merging coverage files into %s..." % file_name)
vcover_merge_process = Process(vcover_cmd, env=self.get_env())
vcover_merge_process.consume_output()
print("Done merging coverage files")
def format_generic(value):
"""
Generic values with space in them need to be quoted
"""
value_str = str(value)
if " " in value_str:
return '"%s"' % value_str
return value_str
class VersionConsumer(object):
"""
Consume version information
"""
def __init__(self):
self.year = None
self.month = None
_version_re = re.compile(r"(?P<year>\d+)\.(?P<month>\d+)\.\d+")
def __call__(self, line):
match = self._version_re.search(line)
if match is not None:
self.year = int(match.group("year"))
self.month = int(match.group("month"))
return True
| 30.954936
| 109
| 0.599307
|
4a028532823c53aa6fb4e3b2a558267fff859ef1
| 8,339
|
py
|
Python
|
ngrok/client.py
|
ngrok/ngrok-api-python
|
951146e42586e0893d1c23d1b6bad20156c449f9
|
[
"MIT"
] | 2
|
2021-11-24T19:55:35.000Z
|
2022-03-26T05:32:35.000Z
|
ngrok/client.py
|
ngrok/ngrok-api-python
|
951146e42586e0893d1c23d1b6bad20156c449f9
|
[
"MIT"
] | 2
|
2021-09-10T16:17:01.000Z
|
2021-12-07T00:15:01.000Z
|
ngrok/client.py
|
ngrok/ngrok-api-python
|
951146e42586e0893d1c23d1b6bad20156c449f9
|
[
"MIT"
] | 4
|
2021-07-19T06:41:05.000Z
|
2022-03-14T22:48:55.000Z
|
from __future__ import annotations
import collections
import os
from .services import *
class Client(object):
def __init__(self, api_key: str, base_url: str = "https://api.ngrok.com"):
self.http_client = HTTPClient(api_key, base_url)
@property
def abuse_reports(self) -> AbuseReportsClient:
"""Abuse Reports allow you to submit take-down requests for URLs hosted by
ngrok that violate ngrok's terms of service."""
return AbuseReportsClient(self)
@property
def agent_ingresses(self) -> AgentIngressesClient:
return AgentIngressesClient(self)
@property
def api_keys(self) -> APIKeysClient:
"""API Keys are used to authenticate to the `ngrok
API` <https://ngrok.com/docs/api#authentication>`_. You may use the API itself
to provision and manage API Keys but you'll need to provision your first API
key from the `API Keys page` <https://dashboard.ngrok.com/api/keys>`_ on your
ngrok.com dashboard."""
return APIKeysClient(self)
@property
def certificate_authorities(self) -> CertificateAuthoritiesClient:
"""Certificate Authorities are x509 certificates that are used to sign other
x509 certificates. Attach a Certificate Authority to the Mutual TLS module
to verify that the TLS certificate presented by a client has been signed by
this CA. Certificate Authorities are used only for mTLS validation only and
thus a private key is not included in the resource."""
return CertificateAuthoritiesClient(self)
@property
def credentials(self) -> CredentialsClient:
"""Tunnel Credentials are ngrok agent authtokens. They authorize the ngrok
agent to connect the ngrok service as your account. They are installed with
the ``ngrok authtoken`` command or by specifying it in the ``ngrok.yml``
configuration file with the ``authtoken`` property."""
return CredentialsClient(self)
@property
def endpoint_configurations(self) -> EndpointConfigurationsClient:
"""Endpoint Configurations are a reusable group of modules that encapsulate how
traffic to a domain or address is handled. Endpoint configurations are only
applied to Domains and TCP Addresses they have been attached to."""
return EndpointConfigurationsClient(self)
@property
def event_streams(self) -> EventStreamsClient:
return EventStreamsClient(self)
@property
def event_destinations(self) -> EventDestinationsClient:
return EventDestinationsClient(self)
@property
def event_subscriptions(self) -> EventSubscriptionsClient:
return EventSubscriptionsClient(self)
@property
def event_sources(self) -> EventSourcesClient:
return EventSourcesClient(self)
@property
def ip_policies(self) -> IPPoliciesClient:
"""IP Policies are reusable groups of CIDR ranges with an ``allow`` or ``deny``
action. They can be attached to endpoints via the Endpoint Configuration IP
Policy module. They can also be used with IP Restrictions to control source
IP ranges that can start tunnel sessions and connect to the API and dashboard."""
return IPPoliciesClient(self)
@property
def ip_policy_rules(self) -> IPPolicyRulesClient:
"""IP Policy Rules are the IPv4 or IPv6 CIDRs entries that
make up an IP Policy."""
return IPPolicyRulesClient(self)
@property
def ip_restrictions(self) -> IPRestrictionsClient:
"""An IP restriction is a restriction placed on the CIDRs that are allowed to
initiate traffic to a specific aspect of your ngrok account. An IP
restriction has a type which defines the ingress it applies to. IP
restrictions can be used to enforce the source IPs that can make API
requests, log in to the dashboard, start ngrok agents, and connect to your
public-facing endpoints."""
return IPRestrictionsClient(self)
@property
def reserved_addrs(self) -> ReservedAddrsClient:
"""Reserved Addresses are TCP addresses that can be used to listen for traffic.
TCP address hostnames and ports are assigned by ngrok, they cannot be
chosen."""
return ReservedAddrsClient(self)
@property
def reserved_domains(self) -> ReservedDomainsClient:
"""Reserved Domains are hostnames that you can listen for traffic on. Domains
can be used to listen for http, https or tls traffic. You may use a domain
that you own by creating a CNAME record specified in the returned resource.
This CNAME record points traffic for that domain to ngrok's edge servers."""
return ReservedDomainsClient(self)
@property
def ssh_certificate_authorities(self) -> SSHCertificateAuthoritiesClient:
"""An SSH Certificate Authority is a pair of an SSH Certificate and its private
key that can be used to sign other SSH host and user certificates."""
return SSHCertificateAuthoritiesClient(self)
@property
def ssh_credentials(self) -> SSHCredentialsClient:
"""SSH Credentials are SSH public keys that can be used to start SSH tunnels
via the ngrok SSH tunnel gateway."""
return SSHCredentialsClient(self)
@property
def ssh_host_certificates(self) -> SSHHostCertificatesClient:
"""SSH Host Certificates along with the corresponding private key allows an SSH
server to assert its authenticity to connecting SSH clients who trust the
SSH Certificate Authority that was used to sign the certificate."""
return SSHHostCertificatesClient(self)
@property
def ssh_user_certificates(self) -> SSHUserCertificatesClient:
"""SSH User Certificates are presented by SSH clients when connecting to an SSH
server to authenticate their connection. The SSH server must trust the SSH
Certificate Authority used to sign the certificate."""
return SSHUserCertificatesClient(self)
@property
def tls_certificates(self) -> TLSCertificatesClient:
"""TLS Certificates are pairs of x509 certificates and their matching private
key that can be used to terminate TLS traffic. TLS certificates are unused
until they are attached to a Domain. TLS Certificates may also be
provisioned by ngrok automatically for domains on which you have enabled
automated certificate provisioning."""
return TLSCertificatesClient(self)
@property
def tunnel_sessions(self) -> TunnelSessionsClient:
"""Tunnel Sessions represent instances of ngrok agents or SSH reverse tunnel
sessions that are running and connected to the ngrok service. Each tunnel
session can include one or more Tunnels."""
return TunnelSessionsClient(self)
@property
def tunnels(self) -> TunnelsClient:
"""Tunnels provide endpoints to access services exposed by a running ngrok
agent tunnel session or an SSH reverse tunnel session."""
return TunnelsClient(self)
@property
def pointcfg_module(self):
ns = collections.namedtuple(
"Namespace",
"logging",
"circuit_breaker",
"compression",
"tls_termination",
"ip_policy",
"mutual_tls",
"request_headers",
"response_headers",
"oauth",
"webhook_validation",
"saml",
"oidc",
)
return ns(
logging=EndpointLoggingModuleClient(self),
circuit_breaker=EndpointCircuitBreakerModuleClient(self),
compression=EndpointCompressionModuleClient(self),
tls_termination=EndpointTLSTerminationModuleClient(self),
ip_policy=EndpointIPPolicyModuleClient(self),
mutual_tls=EndpointMutualTLSModuleClient(self),
request_headers=EndpointRequestHeadersModuleClient(self),
response_headers=EndpointResponseHeadersModuleClient(self),
oauth=EndpointOAuthModuleClient(self),
webhook_validation=EndpointWebhookValidationModuleClient(self),
saml=EndpointSAMLModuleClient(self),
oidc=EndpointOIDCModuleClient(self),
)
| 44.356383
| 89
| 0.697925
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.