content
stringlengths 0
894k
| type
stringclasses 2
values |
|---|---|
"""Implements the Projection extension.
https://github.com/stac-extensions/projection
"""
from typing import Any, Dict, Generic, List, Optional, Set, TypeVar, cast
import pystac
from pystac.extensions.hooks import ExtensionHooks
from pystac.extensions.base import (
ExtensionManagementMixin,
PropertiesExtension,
)
T = TypeVar("T", pystac.Item, pystac.Asset)
SCHEMA_URI = "https://stac-extensions.github.io/projection/v1.0.0/schema.json"
EPSG_PROP = "proj:epsg"
WKT2_PROP = "proj:wkt2"
PROJJSON_PROP = "proj:projjson"
GEOM_PROP = "proj:geometry"
BBOX_PROP = "proj:bbox"
CENTROID_PROP = "proj:centroid"
SHAPE_PROP = "proj:shape"
TRANSFORM_PROP = "proj:transform"
class ProjectionExtension(
Generic[T], PropertiesExtension, ExtensionManagementMixin[pystac.Item]
):
"""ProjectionItemExt is the extension of an Item in the Projection Extension.
The Projection extension adds projection information to STAC Items.
Args:
item : The item to be extended.
Attributes:
item : The Item that is being extended.
Note:
Using ProjectionItemExt to directly wrap an item will add the 'proj' extension
ID to the item's stac_extensions.
"""
def __init__(self, item: pystac.Item) -> None:
self.item = item
def apply(
self,
epsg: Optional[int],
wkt2: Optional[str] = None,
projjson: Optional[Dict[str, Any]] = None,
geometry: Optional[Dict[str, Any]] = None,
bbox: Optional[List[float]] = None,
centroid: Optional[Dict[str, float]] = None,
shape: Optional[List[int]] = None,
transform: Optional[List[float]] = None,
) -> None:
"""Applies Projection extension properties to the extended Item.
Args:
epsg : REQUIRED. EPSG code of the datasource.
wkt2 : WKT2 string representing the Coordinate Reference
System (CRS) that the ``geometry`` and ``bbox`` fields represent
projjson : PROJJSON dict representing the
Coordinate Reference System (CRS) that the ``geometry`` and ``bbox``
fields represent
geometry : GeoJSON Polygon dict that defines the footprint of
this Item.
bbox : Bounding box of the Item in the asset CRS in
2 or 3 dimensions.
centroid : A dict with members 'lat' and 'lon' that defines
coordinates representing the centroid of the item in the asset data CRS.
Coordinates are defined in latitude and longitude, even if the data
coordinate system may not use lat/long.
shape : Number of pixels in Y and X directions for the
default grid.
transform : The affine transformation coefficients for
the default grid
"""
self.epsg = epsg
self.wkt2 = wkt2
self.projjson = projjson
self.geometry = geometry
self.bbox = bbox
self.centroid = centroid
self.shape = shape
self.transform = transform
@property
def epsg(self) -> Optional[int]:
"""Get or sets the EPSG code of the datasource.
A Coordinate Reference System (CRS) is the data reference system (sometimes
called a 'projection') used by the asset data, and can usually be referenced
using an `EPSG code <http://epsg.io/>`_.
If the asset data does not have a CRS, such as in the case of non-rectified
imagery with Ground Control Points, epsg should be set to None.
It should also be set to null if a CRS exists, but for which there is no valid
EPSG code.
Returns:
int
"""
return self._get_property(EPSG_PROP, int)
@epsg.setter
def epsg(self, v: Optional[int]) -> None:
self._set_property(EPSG_PROP, v, pop_if_none=False)
@property
def wkt2(self) -> Optional[str]:
"""Get or sets the WKT2 string representing the Coordinate Reference System (CRS)
that the proj:geometry and proj:bbox fields represent
This value is a
`WKT2 string <http://docs.opengeospatial.org/is/12-063r5/12-063r5.html>`_.
If the data does not have a CRS, such as in the case of non-rectified imagery
with Ground Control Points, wkt2 should be set to null. It should also be set
to null if a CRS exists, but for which a WKT2 string does not exist.
Returns:
str
"""
return self._get_property(WKT2_PROP, str)
@wkt2.setter
def wkt2(self, v: Optional[str]) -> None:
self._set_property(WKT2_PROP, v)
@property
def projjson(self) -> Optional[Dict[str, Any]]:
"""Get or sets the PROJJSON string representing the Coordinate Reference System (CRS)
that the proj:geometry and proj:bbox fields represent
This value is a
`PROJJSON object <https://proj.org/specifications/projjson.html>`_.
If the data does not have a CRS, such as in the case of non-rectified imagery
with Ground Control Points, projjson should be set to null. It should also be
set to null if a CRS exists, but for which a PROJJSON string does not exist.
The schema for this object can be found
`here <https://proj.org/schemas/v0.2/projjson.schema.json>`_.
Returns:
dict
"""
return self._get_property(PROJJSON_PROP, Dict[str, Any])
@projjson.setter
def projjson(self, v: Optional[Dict[str, Any]]) -> None:
self._set_property(PROJJSON_PROP, v)
@property
def geometry(self) -> Optional[Dict[str, Any]]:
"""Get or sets a Polygon GeoJSON dict representing the footprint of this item.
This dict should be formatted according the Polygon object format specified in
`RFC 7946, sections 3.1.6 <https://tools.ietf.org/html/rfc7946>`_,
except not necessarily in EPSG:4326 as required by RFC7946. Specified based on
the ``epsg``, ``projjson`` or ``wkt2`` fields (not necessarily EPSG:4326).
Ideally, this will be represented by a Polygon with five coordinates, as the
item in the asset data CRS should be a square aligned to the original CRS grid.
Returns:
dict
"""
return self._get_property(GEOM_PROP, Dict[str, Any])
@geometry.setter
def geometry(self, v: Optional[Dict[str, Any]]) -> None:
self._set_property(GEOM_PROP, v)
@property
def bbox(self) -> Optional[List[float]]:
"""Get or sets the bounding box of the assets represented by this item in the asset
data CRS.
Specified as 4 or 6 coordinates based on the CRS defined in the ``epsg``,
``projjson`` or ``wkt2`` properties. First two numbers are coordinates of the
lower left corner, followed by coordinates of upper right corner, e.g.,
[west, south, east, north], [xmin, ymin, xmax, ymax], [left, down, right, up],
or [west, south, lowest, east, north, highest]. The length of the array
must be 2*n where n is the number of dimensions.
Returns:
List[float]
"""
return self._get_property(BBOX_PROP, List[float])
@bbox.setter
def bbox(self, v: Optional[List[float]]) -> None:
self._set_property(BBOX_PROP, v)
@property
def centroid(self) -> Optional[Dict[str, float]]:
"""Get or sets coordinates representing the centroid of the item in the asset data CRS.
Coordinates are defined in latitude and longitude, even if the data coordinate
system does not use lat/long.
Example::
item.ext.proj.centroid = { 'lat': 0.0, 'lon': 0.0 }
Returns:
dict
"""
return self._get_property(CENTROID_PROP, Dict[str, float])
@centroid.setter
def centroid(self, v: Optional[Dict[str, float]]) -> None:
self._set_property(CENTROID_PROP, v)
@property
def shape(self) -> Optional[List[int]]:
"""Get or sets the number of pixels in Y and X directions for the default grid.
The shape is an array of integers that represents the number of pixels in the
most common pixel grid used by the item's assets. The number of pixels should
be specified in Y, X order. If the shape is defined in an item's properties it
is used as the default shape for all assets that don't have an overriding shape.
Returns:
List[int]
"""
return self._get_property(SHAPE_PROP, List[int])
@shape.setter
def shape(self, v: Optional[List[int]]) -> None:
self._set_property(SHAPE_PROP, v)
@property
def transform(self) -> Optional[List[float]]:
"""Get or sets the the affine transformation coefficients for the default grid.
The transform is a linear mapping from pixel coordinate space (Pixel, Line) to
projection coordinate space (Xp, Yp). It is a 3x3 matrix stored as a flat array of 9
elements in row major order. Since the last row is always 0,0,1 it can be omitted, in
which case only 6 elements are recorded. This mapping can be obtained from
GDAL `GetGeoTransform <https://gdal.org/api/gdaldataset_cpp.html#_CPPv4N11GDALDataset15GetGeoTransformEPd>`_
or the
Rasterio `Transform <https://rasterio.readthedocs.io/en/stable/api/rasterio.io.html#rasterio.io.BufferedDatasetWriter.transform>`_.
Returns:
List[float]
""" # noqa: E501
return self._get_property(TRANSFORM_PROP, List[float])
@transform.setter
def transform(self, v: Optional[List[float]]) -> None:
self._set_property(TRANSFORM_PROP, v)
@classmethod
def get_schema_uri(cls) -> str:
return SCHEMA_URI
@staticmethod
def ext(obj: T) -> "ProjectionExtension[T]":
if isinstance(obj, pystac.Item):
return cast(ProjectionExtension[T], ItemProjectionExtension(obj))
elif isinstance(obj, pystac.Asset):
return cast(ProjectionExtension[T], AssetProjectionExtension(obj))
else:
raise pystac.ExtensionTypeError(
f"File extension does not apply to type {type(obj)}"
)
class ItemProjectionExtension(ProjectionExtension[pystac.Item]):
def __init__(self, item: pystac.Item):
self.item = item
self.properties = item.properties
def __repr__(self) -> str:
return "<ItemProjectionExtension Item id={}>".format(self.item.id)
class AssetProjectionExtension(ProjectionExtension[pystac.Asset]):
def __init__(self, asset: pystac.Asset):
self.asset_href = asset.href
self.properties = asset.properties
if asset.owner and isinstance(asset.owner, pystac.Item):
self.additional_read_properties = [asset.owner.properties]
def __repr__(self) -> str:
return "<AssetProjectionExtension Asset href={}>".format(self.asset_href)
class ProjectionExtensionHooks(ExtensionHooks):
schema_uri: str = SCHEMA_URI
prev_extension_ids: Set[str] = set(["proj", "projection"])
stac_object_types: Set[pystac.STACObjectType] = set([pystac.STACObjectType.ITEM])
PROJECTION_EXTENSION_HOOKS: ExtensionHooks = ProjectionExtensionHooks()
|
python
|
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def home_view(request, *args, **kwargs):
# return HttpResponse("<h1>Hello World</h1>")
return render(request,"home.html", {})
|
python
|
import re
import utils as u
with open(__file__ + ".input.txt", "r+") as file:
input_str = file.read()
regex = re.compile(r"(?P<from>\d+)-(?P<to>\d+)\s(?P<letter>\w):\s(?P<password>\w+)")
def is_valid_password(input_str):
nb_from, nb_to, letter, password = regex.search(input_str).groups()
return int(nb_from) <= password.count(letter) <= int(nb_to)
def is_valid_password_for_part_two(input_str):
nb_from, nb_to, letter, password = regex.search(input_str).groups()
return (password[int(nb_from) - 1], password[int(nb_to) - 1]).count(letter) == 1
# part 1 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_
u.assert_equals(is_valid_password("1-3 a: abcde"), True)
u.assert_equals(is_valid_password("1-3 b: cdefg"), False)
u.assert_equals(is_valid_password("2-9 c: ccccccccc"), True)
u.answer_part_1(sum(1 for string in input_str.split("\n") if is_valid_password(string)))
# 347 too low
# 519 OK
# part 2 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_
u.assert_equals(is_valid_password_for_part_two("1-3 a: abcde"), True)
u.assert_equals(is_valid_password_for_part_two("1-3 b: cdefg"), False)
u.assert_equals(is_valid_password_for_part_two("2-9 c: ccccccccc"), False)
u.answer_part_2(
sum(1 for string in input_str.split("\n") if is_valid_password_for_part_two(string))
)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# libfv.pyโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โ โ
# โ A Python library module that supports read/modification/write of .otf โ
# โ and .ttf font version strings โ
# โ โ
# โ Copyright 2018 Christopher Simpkins โ
# โ MIT License โ
# โ โ
# โ Source: https://github.com/source-foundry/font-v โ
# โ โ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
from __future__ import unicode_literals
import os
import re
from fontTools import ttLib
from git import Repo
from fontv.utilities import get_git_root_path
class FontVersion(object):
"""
FontVersion is a ttf and otf font version string class that provides support for font version string reads,
reporting, modification, & writes. It provides full support for the OpenFV font versioning specification
(https://github.com/openfv/openfv). Support is provided for instantiation from ttf and otf fonts, as well
as from fontTools.ttLib.ttFont objects (https://github.com/fonttools/fonttools).
The class works on Python "strings". String types indicated below refer to the Python2 unicode type and Python3
string type.
PUBLIC ATTRIBUTES:
contains_metadata: (boolean) boolean for presence of metadata in version string
contains_state: (boolean) boolean for presence of state substring metadata in the version string
contains_status: (boolean) boolean for presence of development/release status substring in the version string
develop_string: (string) The string to use for development builds in the absence of git commit SHA1 string
fontpath: (string) The path to the font file
is_development: (boolean) boolean for presence of development status substring at version_string_parts[1]
is_release: (boolean) boolean for presence of release status status substring at version_string_parts[1]
metadata: (list) A list of metadata substrings in the version string. Either version_string_parts[1:] or empty list
release_string: (string) The string to use for release builds in the absence of git commit SHA1 string
sha1_develop: (string) The string to append to the git SHA1 hash string for development builds
sha1_release: (string) The string to append to the git SHA1 hash string for release builds
state: (string) The state metadata substring
ttf: (fontTools.ttLib.TTFont) for font file
version_string_parts: (list) List that maintains in memory semicolon parsed substrings of font version string
version: (string) The version number substring formatted as "Version X.XXX"
PRIVATE ATTRIBUTES
_nameID_5_dict: (dictionary) {(platformID, platEncID,langID) : fontTools.ttLib.TTFont name record ID 5 object } map
:parameter font: (string) file path to the .otf or .ttf font file OR (ttLib.TTFont) object for appropriate font file
:parameter develop: (string) the string to use for development builds in the absence of git commit SHA1 string
:parameter release: (string) the string to use for release builds in the absence of a git commit SHA1 string
:parameter sha1_develop: (string) the string to append to the git SHA1 hash string for development builds
:parameter sha1_release: (string) the string to append to the git SHA1 hash string for release builds
:raises: fontTools.ttLib.TTLibError if fontpath is not a ttf or otf font
:raises: IndexError if there are no nameID 5 records in the font name table
:raises: IOError if fontpath does not exist
"""
def __init__(
self,
font,
develop="DEV",
release="RELEASE",
sha1_develop="-dev",
sha1_release="-release",
):
try:
# assume that it is a ttLib.TTFont object and attempt to call object attributes
self.fontpath = font.reader.file.name
# if it does not raise AttributeError, we guessed correctly, can set the ttf attr here
self.ttf = font
except AttributeError:
# if above attempt to call TTFont attribute raises AttributeError (as it would with string file path)
# then instantiate a ttLib.TTFont object and define the fontpath attribute with the file path string
self.ttf = ttLib.TTFont(file=font, recalcTimestamp=False)
self.fontpath = font
self.develop_string = develop
self.release_string = release
self.sha1_develop = sha1_develop
self.sha1_release = sha1_release
# name.ID = 5 version string substring data
self.name_ID5_dict = {}
self.version_string_parts = (
[]
) # list of substring items in version string (; delimited parse to list)
self.version = ""
self.state = ""
self.metadata = []
# truth test values for version string contents, updated with self._parse() method calls following updates to
# in memory version string data with methods in this library
self.contains_metadata = False
self.contains_state = False
self.contains_status = False
self.is_development = False
self.is_release = False
# head.fontRevision data. float type
self.head_fontRevision = 0.0
# object instantiation method call (truth test values updated in the following method)
self._read_version_string()
def __eq__(self, otherfont):
"""
Equality comparison between FontVersion objects
:param otherfont: fontv.libfv.FontVersion object for comparison
:return: (boolean) True = versions are the same; False = versions are not the same
"""
if type(otherfont) is type(self):
return self.version_string_parts == otherfont.version_string_parts
return False
def __ne__(self, otherfont):
"""
Inequality comparison between FontVersion objects
:param otherfont: fontv.libfv.FontVersion object for comparison
:return: (boolean) True = versions differ; False = versions are the same
"""
return not self.__eq__(otherfont)
def __str__(self):
"""
Human readable string formatting
:return: (string)
"""
return (
"<fontv.libfv.FontVersion> "
+ os.linesep
+ self.get_name_id5_version_string()
+ os.linesep
+ "file path:"
" " + self.fontpath
)
# TODO: confirm comparisons of version numbers like "Version 1.001", "Version 1.01", "Version 1.1" as not the same
# TODO: before this is released. Will need to be documented as such because this is not obvious behavior
# def __gt__(self, otherfont):
# """
#
# :param otherfont:
#
# :return:
# """
# return self.get_version_number_tuple() > otherfont.get_version_number_tuple()
#
# def __lt__(self, otherfont):
# """
#
# :param otherfont:
#
# :return:
# """
# return self.get_version_number_tuple() < otherfont.get_version_number_tuple()
def _parse(self):
"""
Private method that parses version string data to set FontVersion object attributes. Called on FontVersion
object instantiation and at the completion of setter methods in the library in order to update object
attributes with new data.
:return: None
"""
# metadata parsing
self._parse_metadata() # parse the metadata
self._parse_state() # parse the state substring data
self._parse_status() # parse the version substring dev/rel status indicator data
def _read_version_string(self):
"""
Private method that reads OpenType name ID 5 and head.fontRevision record data from a fontTools.ttLib.ttFont
object and sets FontVersion object properties. The method is called on instantiation of a FontVersion object
:return: None
"""
# Read the name.ID=5 record
namerecord_list = self.ttf["name"].names
# read in name records
for record in namerecord_list:
if record.nameID == 5:
# map dictionary as {(platformID, platEncID, langID) : version string}
recordkey = (record.platformID, record.platEncID, record.langID)
self.name_ID5_dict[recordkey] = record.toUnicode()
# assert that at least one nameID 5 record was obtained from the font in order to instantiate
# a FontVersion object
if len(self.name_ID5_dict) == 0:
raise IndexError(
"Unable to read nameID 5 version records from the font " + self.fontpath
)
# define the version string from the dictionary
for vs in self.name_ID5_dict.values():
version_string = vs
break # take the first value that dictionary serves up
# parse version string into substrings
self._parse_version_substrings(version_string)
# define version as first substring
self.version = self.version_string_parts[0]
# Read the head.fontRevision record (stored as a float)
self.head_fontRevision = self.ttf["head"].fontRevision
self._parse() # update FontVersion object attributes based upon the data read in
def _get_repo_commit(self):
"""
Private method that makes a system git call via the GitPython library and returns a short git commit
SHA1 hash string for the commit at HEAD using `git rev-list`.
:return: (string) short git commit SHA1 hash string
"""
repo = Repo(get_git_root_path(self.fontpath))
gitpy = repo.git
# git rev-list --abbrev-commit --max-count=1 --format="%h" HEAD - abbreviated unique sha1 for the repository
# number of sha1 hex characters determined by git (addresses https://github.com/source-foundry/font-v/issues/2)
full_git_sha_string = gitpy.rev_list(
"--abbrev-commit", "--max-count=1", '--format="%h"', "HEAD"
)
unicode_full_sha_string = full_git_sha_string
sha_string_list = unicode_full_sha_string.split("\n")
final_sha_string = sha_string_list[1].replace('"', "")
return final_sha_string
def _parse_metadata(self):
"""
Private method that parses a font version string for semicolon delimited font version
string metadata. Metadata are defined as anything beyond the first substring item of a version string.
See OpenFV specification for version substring definition details (https://github.com/openfv/openfv)
:return: None
"""
if len(self.version_string_parts) > 1:
# set to True if there are > 1 sub strings as others are defined as metadata
self.contains_metadata = True
self.metadata = (
[]
) # reset to empty and allow following code to define the list items
for metadata_item in self.version_string_parts[1:]:
self.metadata.append(metadata_item)
else:
self.metadata = []
self.contains_metadata = False
def _parse_state(self):
"""
Private method that parses a font version string for [ ... ] delimited data that represents the State
substring as defined by the OpenFV specification. The result of this test is used to define State data
in the FontVersion object.
See OpenFV specification for the state substring metadata definition (https://github.com/openfv/openfv)
:return: None
"""
if len(self.version_string_parts) > 1:
# Test for regular expression pattern match for state substring at version string list position 1
# as defined by OpenFV specification.
# This method call returns tuple of (truth test for match, matched state string (or empty string))
response = self._is_state_substring_return_state_match(
self.version_string_parts[1]
)
is_state_substring = response[0]
state_substring_match = response[1]
if is_state_substring is True:
self.contains_state = True
self.state = state_substring_match
else:
self.contains_state = False
self.state = ""
else:
self.contains_state = False
self.state = ""
def _parse_status(self):
"""
Private method that parses a font version string to determine if it contains development/release Status
substring metadata as defined by the OpenFV specification. The result of this test is used to define Status
data in the FontVersion object.
See OpenFV specification for the Status substring metadata definition (https://github.com/openfv/openfv)
:return: None
"""
if len(self.version_string_parts) > 1:
# define as list item 1 as per OpenFV specification
status_needle = self.version_string_parts[1]
# reset each time there is a parse attempt and let logic below define
self.contains_status = False
if self._is_development_substring(status_needle):
self.contains_status = True
self.is_development = True
else:
self.is_development = False
if self._is_release_substring(status_needle):
self.contains_status = True
self.is_release = True
else:
self.is_release = False
else:
self.contains_status = False
self.is_development = False
self.is_release = False
def _parse_version_substrings(self, version_string):
"""
Private method that splits a full semicolon delimited version string on semicolon characters to a Python list.
:param version_string: (string) the semicolon delimited version string to split
:return: None
"""
# split semicolon delimited list of version substrings
if ";" in version_string:
self.version_string_parts = version_string.split(";")
else:
self.version_string_parts = [version_string]
self.version = self.version_string_parts[0]
def _set_state_status_substring(self, state_status_string):
"""
Private method that sets the State/Status substring in the FontVersion.version_string_parts[1] list position.
The method preserves Other metadata when present in the version string.
See OpenFV specification for State/Status substring and Other metdata definition details
(https://github.com/openfv/openfv)
:param state_status_string: (string) the string value to insert at the status substring position of the
self.version_string_parts list
:return: None
"""
if len(self.version_string_parts) > 1:
prestring = self.version_string_parts[1]
state_response = self._is_state_substring_return_state_match(prestring)
is_state_substring = state_response[0]
if (
self._is_release_substring(prestring)
or self._is_development_substring(prestring)
or is_state_substring
):
# directly replace when existing status substring
self.version_string_parts[1] = state_status_string
else:
# if the second item of the substring list is not a status string, save it and all subsequent list items
# then create a new list with inserted status string value
self.version_string_parts = [
self.version_string_parts[0]
] # redefine list as list with version number
self.version_string_parts.append(
state_status_string
) # define the status substring as next item
for (
item
) in (
self.metadata
): # iterate through all previous metadata substrings and append to list
self.version_string_parts.append(item)
else:
# if the version string is defined as only a version number substring (i.e. list size = 1),
# write the new status substring to the list. Nothing else required
self.version_string_parts.append(state_status_string)
# update FontVersion truth testing properties based upon the new data
self._parse()
def _is_development_substring(self, needle):
"""
Private method that returns a boolean that indicates whether the needle string meets the OpenFV specification
definition of a Development Status metadata substring.
See OpenFV specification for Status substring definition details (https://github.com/openfv/openfv)
:param needle: (string) test string
:return: boolean True = is development substring and False = is not a development substring
"""
if (
self.develop_string == needle.strip()
or self.sha1_develop in needle[-len(self.sha1_develop) :]
):
return True
else:
return False
def _is_release_substring(self, needle):
"""
Private method that returns a boolean that indicates whether the needle string meets the OpenFV specification
definition of a Release Status metadata substring.
See OpenFV specification for Status substring definition details (https://github.com/openfv/openfv)
:param needle: (string) test string
:return: boolean True = is release substring and False = is not a release substring
"""
if (
self.release_string == needle.strip()
or self.sha1_release in needle[-len(self.sha1_release) :]
):
return True
else:
return False
def _is_state_substring_return_state_match(self, needle):
"""
Private method that returns a tuple of boolean, string. The boolean value reflects the truth test needle is a
State substring. The match value is defined as the contents inside [ and ] delimiters as defined by the
regex pattern. If there is no match, the string item in the tuple is an empty string.
See OpenFV specification for State substring definition details (https://github.com/openfv/openfv)
:param needle: (string) test string to attempt match for state substring
:return: (boolean, string) see full docstring for details re: interpretation of returned values
"""
regex_pattern = r"\s?\[([a-zA-Z0-9_\-\.]{1,50})\]"
p = re.compile(regex_pattern)
m = p.match(needle)
if m:
return True, m.group(1)
else:
return False, ""
def clear_metadata(self):
"""
Public method that clears all version string metadata in memory. This results in a version string that ONLY
includes the version number substring. The intent is to support removal of unnecessary version string data
that are included in a font binary.
See OpenFV specification for Version number substring and Metadata definition details
(https://github.com/openfv/openfv)
:return: None
"""
self.version_string_parts = [self.version_string_parts[0]]
self._parse()
def get_version_number_string(self):
"""
Public method that returns a string of the version number in XXX.XXX format. A version number match is defined
according to the OpenFV specification with up to three digits on either side of the period.
See OpenFV specification for the font version number format definition and semantics
(https://github.com/openfv/openfv)
:return: string (Python 3) or unicode (Python 2). Empty string if unable to parse version number format
"""
match = re.search(r"\d{1,3}\.\d{1,3}", self.version)
if match:
return match.group(0)
else:
return ""
def get_version_number_tuple(self):
"""
Public method that returns a tuple of integer values with the following definition:
( major version, minor version position 1, minor version position 2, minor version position 3 )
where position is the decimal position of the integer in the minor version string. The version number format is
defined by the OpenFV specification.
See OpenFV specification for the font version number format definition and semantics
(https://github.com/openfv/openfv)
:return: tuple of integers or None if the version number substring is inappropriately formatted
"""
match = re.search(r"\d{1,3}\.\d{1,3}", self.version)
if match:
version_number_int_list = []
version_number_string = match.group(0)
version_number_list = version_number_string.split(".")
version_number_major_int = int(version_number_list[0])
version_number_int_list.append(
version_number_major_int
) # add major version integer
for minor_int in version_number_list[1]:
version_number_int_list.append(int(minor_int))
return tuple(version_number_int_list)
else:
return None
def get_head_fontrevision_version_number(self):
"""
Public method that returns the version number that is parsed from head.fontRevision record as a float value.
:return: float
"""
return self.head_fontRevision
# TODO: remove this deprecated method (commented out in v0.7.0, deprecation warnings in v0.6.0)
# def get_version_string(self):
# """
# DEPRECATED: Please convert to use of FontVersion.get_name_id5_version_string() method
# """
# warnings.simplefilter('always')
# warnstring = "[WARNING] FontVersion.get_version_string is a deprecated method. Please convert to " \
# "FontVersion.get_name_id5_version_string."
# warnings.warn(warnstring, DeprecationWarning, stacklevel=2)
# return ";".join(self.version_string_parts)
def get_name_id5_version_string(self):
"""
Public method that returns the full version string as the semicolon delimiter joined contents of the
FontVersion.version_string_parts Python list.
:return: string (Python 3) or unicode (Python 2)
"""
return ";".join(self.version_string_parts)
def get_metadata_list(self):
"""
Public method that returns a Python list containing metadata substring items generated by splitting the
string on a semicolon delimiter. Metadata are defined according to the OpenFV specification.
The version number string (i.e. "Version X.XXX") is not present in this list.
See OpenFV specification for the version string Metadata definition (https://github.com/openfv/openfv)
:return: list of string (Python 3) or list of unicode (Python 2)
"""
return self.metadata
def get_state_status_substring(self):
"""
Public method that returns the State and/or Status substring at position 2 of the semicolon delimited version
string. This substring may include any of the following metadata according to the OpenFV specification:
- "DEV"
- "RELEASE"
- "[state]-dev"
- "[state]-release"
See OpenFV specification for State and Status substring definitions (https://github.com/openfv/openfv)
:return: string (Python 3) or unicode (Python 2), empty string if this substring is not set in the font
"""
if len(self.version_string_parts) > 1:
if self.is_development or self.is_release or self.contains_state:
return self.version_string_parts[1]
else:
return ""
else:
return ""
def set_state_git_commit_sha1(self, development=False, release=False):
"""
Public method that adds a git commit sha1 hash label to the font version string at the State metadata position
as defined by the OpenFV specification. This can be combined with a Development/Release Status metadata
substring if the calling code defines either the development or release parameter to a value of True.
Note that development and release are mutually exclusive. ValueError is raised if both are set to True. The
font source must be under git version control in order to use this method. If the font source is not under
git version control, an IOError is raised during the attempt to locate the .git directory in the project.
See OpenFV specification for State substring definition details (https://github.com/openfv/openfv)
:param development: (boolean) False (default) = do not add development status indicator; True = add indicator
:param release: (boolean) False (default) = do not add release status indicator; True = add indicator
:raises: IOError when the git repository root cannot be identified using the directory traversal in the
fontv.utilities.get_git_root_path() function
:raises: ValueError when calling code sets both development and release parameters to True as these are
mutually exclusive requests
:return: None
"""
git_sha1_hash = self._get_repo_commit()
git_sha1_hash_formatted = "[" + git_sha1_hash + "]"
if development and release:
raise ValueError(
"Cannot set both development parameter and release parameter to a value of True in "
"fontv.libfv.FontVersion.set_state_git_commit_sha1() method. These are mutually "
"exclusive."
)
if (
development
): # if request for development status label, append FontVersion.sha1_develop to hash digest
hash_substring = git_sha1_hash_formatted + self.sha1_develop
elif (
release
): # if request for release status label, append FontVersion.sha1_release to hash digest
hash_substring = git_sha1_hash_formatted + self.sha1_release
else: # else just use the hash digest
hash_substring = git_sha1_hash_formatted
self._set_state_status_substring(hash_substring)
def set_development_status(self):
"""
Public method that sets the in memory Development Status metadata substring for the font version string.
See OpenFV specification for Status substring and Development status definition
(https://github.com/openfv/openfv)
:return: None
"""
self._set_state_status_substring(self.develop_string)
def set_release_status(self):
"""
Public method that sets the in memory Release Status metadata substring for the font version string.
See OpenFV specification for Status substring and Release status definition details
(https://github.com/openfv/openfv)
:return: None
"""
self._set_state_status_substring(self.release_string)
def set_version_number(self, version_number):
"""
Public method that sets the version number substring with the version_number parameter. The version_number
parameter should follow the OpenFV specification for the font version number format.
See OpenFV specification for the font version number definition and semantics
(https://github.com/openfv/openfv)
The method will raise ValueError if the version_string cannot be cast to a float type. This is mandatory
for the definition of the head table fontRevision record definition in the font binary. Attempts to add
metadata strings to the version_number violate the OpenFV specification and are intentionally not permitted.
:param version_number: (string) version number in X.XXX format where X are integers
:return: None
"""
version_number_substring = "Version " + version_number
self.version_string_parts[0] = version_number_substring
self.version = self.version_string_parts[0] # "Version X.XXX"
self.head_fontRevision = float(version_number) # X.XXX
self._parse()
def set_version_string(self, version_string):
"""
Public method that sets the entire version string (including metadata if desired) with a version_string
parameter. The version_string parameter should be formatted according to the OpenFV font versioning
specification (https://github.com/openfv/openfv) for the OpenType name table ID 5 record version string.
The method will raise a ValueError if the version number used in the version_string cannot be cast to a
float type. This is mandatory for the definition of the head table fontRevision record definition in the
font binary. Attempts to add metadata strings to the version_number violate the OpenFV specification and
are intentionally not permitted.
:param version_string: (string) The version string with semicolon delimited metadata (if metadata are included)
:return: None
"""
self._parse_version_substrings(version_string)
self._parse()
self.head_fontRevision = float(self.get_version_number_string())
def write_version_string(self, fontpath=None):
"""
Public method that writes the in memory version data to:
(1) each OpenType name table ID 5 record in original font file
(2) OpenType head table fontRevision record
The name table ID 5 record(s) write is with a semicolon joined list of the items in
FontVersion.version_string_parts
The head table fontRevision record write is with the version number float value in FontVersion.head_fontRevision
The write is to a .otf file if the FontVersion object was instantiated from a .otf binary and a .ttf
file if the FontVersion object was instantiated from a .ttf binary. By default the write is to the same
file path that was used for instantiation of the FontVersion object. This write path default can be modified by
passing a new file path in the fontpath parameter.
:param fontpath: (string) optional file path to write out the font version string to a font binary
:return: None
"""
# Write to name table ID 5 record
version_string = self.get_name_id5_version_string()
namerecord_list = self.ttf["name"].names
for record in namerecord_list:
if record.nameID == 5:
# write to fonttools ttLib object name ID 5 table record for each nameID 5 record found in the font
record.string = version_string
# Write version number to head table fontRevision record
self.ttf["head"].fontRevision = self.head_fontRevision
# Write changes out to the font binary path
if fontpath is None:
self.ttf.save(self.fontpath)
else:
self.ttf.save(fontpath)
|
python
|
import sys
def test_python_path():
paths = sys.path
workspace = '/workspaces/bestbot'
assert workspace in paths
|
python
|
def foo(x = []):
return x.append("x")
def bar(x = []):
return len(x)
foo()
bar()
class Owner(object):
@classmethod
def cm(cls, arg):
return cls
@classmethod
def cm2(cls, arg):
return arg
#Normal method
def m(self):
a = self.cm(0)
return a.cm2(1)
|
python
|
import hqm
import socket
class HQMBot():
def __init__(self, host, port, team, name):
self.team = team
self.host = host
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.session = hqm.HQMClientSession(name, 55)
self.syncing = True
def run(self):
try:
while True:
send = self.session.get_message()
self.socket.sendto(send, (self.host, self.port))
data = self.socket.recv(8192)
self.dataReceived(data)
except KeyboardInterrupt:
send = self.session.get_exit_message()
self.socket.sendto(send, (self.host, self.port))
def dataReceived(self, data):
self.session.parse_message(data)
if self.session.last_message_num == 0:
self.syncing = False
gamestate = self.session.gamestate
if not self.syncing and gamestate:
you = gamestate.you
you_player = gamestate.players.get(you)
if you_player["team"] == -1: #Still spectating
self.session.join_team(self.team)
self.spectate()
else:
if you_player["team"] != self.team:
self.session.join_team(-1) # Back to spectator so we can switch team
else:
self.session.join_team(None)
self.action() #Let's do stuff
def spectate(self):
pass
def action(self):
pass #Insert your code here
class TestBot(HQMBot):
def action(self):
session = self.session
# Session contains the current gamestate and some other useful functions such as add_chat
gamestate = session.gamestate
# Gamestate contains score, time, and a player and object list
players = gamestate.players
# A dictionary of all the players in the server. One of them is you
# Each player object is a dictionary with the keys:
# name : The player name
# i : The player index
# team : The team this player is in.
# -1 is spectating
# 0 is red
# 1 is blue
# obj: The index of the player objects
you = gamestate.you
# An index that identifies who this bot is
objects = gamestate.objects
# A dictionary of all the objects in the server. These include both players and pucks.
# For player objects, you need the player list to determine which object belongs to which player.
# Each object is a dictonary. You need to run object.calculate_positions()
# for each object to calculate some useful position data.
# Both players and pucks have these keys after calculate_positions():
# type : Identifies the type, either the string PLAYER or the string PUCK
# pos : The object position, a numpy array with 3 elements.
# rot : The object rotation, a numpy 3x3 rotation matrix
# Player objects also these keys after calculate_positions():
# stick_pos : The stick position, a numpy array with 3 elements
# stick_rot : The stick rotation, a numpy 3x3 rotation matrix
# head_rot : Head rotation, left (-)/right (+), a float with rotation in radians
# body_rot : Body rotation, backwards (-)/forwards (+), a float with rotation in radians
pucks = []
teammates = []
opponents = []
you = None
for object in objects.values():
object.calculate_positions()
if object["type"] == "PUCK":
pucks.append(object)
you_player = players[gamestate.you]
you_obj = objects[you_player["obj"]]
for i, player in players.items():
if i==you or player["obj"] == -1: # If the player is you, or a spectator
continue
player_obj = objects[player["obj"]]
if player["team"]==you_player["team"]:
teammates.append(player_obj)
else:
opponents.append(player_obj)
session.move_lr = 1 # Turn left/right, normal values are -1.0 (move left), 0 or 1.0 (move right)
session.move_fwbw = 1.0 # Forwards/Backwards, normal values are -1.0 (backwards), 0 or 1.0 (forwards)
#session.stick_x # Stick left/right rotation, normal values are from -pi/2 (left) to pi/2 (right)
#session.stick_y # Stick up/down rotation, normal values are -0.98 (up) to 0.39 (down)
#session.stick_angle # Stick angle, normal values are -1 (clockwise) to 1 (counter-clockwise),
# The standard client changes the angle in steps of .25 when the mouse wheel is used
#session.head_rot # Head rotation, normal values are -2.74 (left) to 2.74 (right)
#session.body_rot # Body rotation, normal values are -pi/2 (backwards) to pi/2 (forwards)
#session.jump = True # Jump key
#session.crouch = True # Crouch key
#session.shift = True # Shift key
if gamestate.simstep%2000==500:
session.add_chat("MigoBot")
|
python
|
import os
import random
check = """<input type="checkbox" id="{}" name="chord" value="{}">
<label for="{}"> {}</label><br>\n"""
def s(note):
t = note.lower()
return t[0] + "_" + t[2:5]
to_print = ""
for note in os.listdir("./Chords"):
to_print += check.format(note[:-4], note, s(note), note[:-4])
with open("check.html", "w") as fi:
fi.write(to_print)
|
python
|
import unittest
from app import db
from app.crypto.pw_hashing import global_salt_hash, indiv_salt_hash
from app.data_access.db_model.user import User
from app.data_access.user_controller import create_user, user_exists, delete_user, activate_user, \
store_pdf_and_transfer_ticket, find_user, check_idnr, check_dob
from app.data_access.user_controller_errors import UserAlreadyExistsError
class TestUserExists(unittest.TestCase):
def setUp(self):
db.create_all()
self.existing_idnr = "123"
create_user(self.existing_idnr, '1985-01-01', '789')
def test_if_existing_idnr_then_return_true(self):
response = user_exists(self.existing_idnr)
self.assertTrue(response)
def test_if_not_existing_idnr_then_return_true(self):
response = user_exists('non_existent_user')
self.assertFalse(response)
def tearDown(self):
db.drop_all()
class TestCreateUser(unittest.TestCase):
def setUp(self):
db.create_all()
self.existing_idnr = "123"
create_user(self.existing_idnr, '1985-01-01', '789')
def test_if_idnr_exists_and_request_id_same_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1985-01-01', '789')
def test_if_idnr_exists_and_request_id_different_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1985-01-01', '000')
def test_if_idnr_exists_and_dob_same_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1985-01-01', '789')
def test_if_idnr_exists_and_dob_different_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1999-01-01', '789')
def test_if_new_idnr_then_save_user(self):
new_idnr = '33602'
create_user(new_idnr, '1985-01-01', '000')
self.assertTrue(user_exists(new_idnr))
def test_if_new_idnr_then_save_correct_attributes(self):
new_idnr = '33604'
dob = '1985-01-01'
req_id = '000'
create_user(new_idnr, dob, req_id)
created_user = find_user(new_idnr)
self.assertEqual(global_salt_hash().hash(new_idnr), created_user.idnr_hashed)
self.assertTrue(indiv_salt_hash().verify(dob, created_user.dob_hashed))
self.assertEqual(req_id, created_user.elster_request_id)
self.assertFalse(created_user.is_active)
def test_if_new_idnr_then_return_user_with_correct_attributes(self):
new_idnr = '33605'
dob = '1985-01-01'
req_id = '000'
created_user = create_user(new_idnr, dob, req_id)
self.assertEqual(global_salt_hash().hash(new_idnr), created_user.idnr_hashed)
self.assertTrue(indiv_salt_hash().verify(dob, created_user.dob_hashed))
self.assertEqual(req_id, created_user.elster_request_id)
self.assertFalse(created_user.is_active)
def tearDown(self):
db.drop_all()
class TestDeleteUser(unittest.TestCase):
def setUp(self):
db.create_all()
create_user('Added_user', '1985-01-01', '123')
def test_if_user_is_deleted_then_user_is_removed_from_storage(self):
delete_user('Added_user')
db.session.rollback() # Verify changes have actually been written to the database.
self.assertEqual(0, User.query.count())
def tearDown(self):
db.drop_all()
class TestActivateUser(unittest.TestCase):
def setUp(self):
db.create_all()
self.user = create_user('1234', '1985-01-01', '5678')
def test_activates_user_and_commits_changes(self):
activate_user('1234', '5678')
db.session.rollback() # Verify changes have actually been written to the database.
self.assertTrue(self.user.is_active)
def test_activate_user_returns_an_activated_user(self):
returned_user = activate_user('1234', '5678')
self.assertTrue(returned_user.is_active)
def tearDown(self):
db.drop_all()
class TestStorePdfAndTransferTicket(unittest.TestCase):
def setUp(self):
db.create_all()
def test_pdf_is_set_in_user(self):
expected_pdf = b'thisisagreatPDFforya'
user = User('123', '123', '123')
store_pdf_and_transfer_ticket(user, expected_pdf, 'Passierschein A38')
db.session.rollback()
self.assertEqual(expected_pdf, user.pdf)
def test_transfer_ticket_is_set_in_user(self):
expected_transfer_ticket = 'Passierschein A38'
user = User('123', '123', '123')
store_pdf_and_transfer_ticket(user, b'pdf', expected_transfer_ticket)
db.session.rollback()
self.assertEqual(expected_transfer_ticket, user.transfer_ticket)
def tearDown(self):
db.drop_all()
class TestCheckIdnr(unittest.TestCase):
def setUp(self):
db.create_all()
self.correct_idnr = '1234567890'
self.existing_user = create_user(self.correct_idnr, '1985-01-01', '000')
def test_if_idnr_correct_return_true(self):
self.assertTrue(check_idnr(self.existing_user, self.correct_idnr))
def test_if_idnr_incorrect_return_false(self):
self.assertFalse(check_idnr(self.existing_user, 'INCORRECT'))
def tearDown(self):
db.drop_all()
class TestCheckDob(unittest.TestCase):
def setUp(self):
db.create_all()
self.correct_dob = '1985-01-01'
self.existing_user = create_user('1234', self.correct_dob, '000')
def test_if_dob_correct_return_true(self):
self.assertTrue(check_dob(self.existing_user, self.correct_dob))
def test_if_dob_incorrect_return_false(self):
self.assertFalse(check_dob(self.existing_user, 'INCORRECT'))
def tearDown(self):
db.drop_all()
|
python
|
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from wrappr_backend.detection.api import urlpatterns as api_urls
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
url(r"^api/", include(api_urls)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUI_try.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(645, 692)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(644, 691))
Dialog.setMaximumSize(QtCore.QSize(646, 693))
self.comboBox = QtGui.QComboBox(Dialog)
self.comboBox.setEnabled(True)
self.comboBox.setGeometry(QtCore.QRect(10, 10, 211, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
self.comboBox.setMinimumSize(QtCore.QSize(0, 0))
self.comboBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.comboBox.setSizeIncrement(QtCore.QSize(0, 0))
self.comboBox.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.comboBox.setPalette(palette)
font = QtGui.QFont()
self.comboBox.setFont(font)
self.comboBox.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.comboBox.setMouseTracking(False)
self.comboBox.setFocusPolicy(QtCore.Qt.WheelFocus)
self.comboBox.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.comboBox.setUpdatesEnabled(True)
self.comboBox.setVisible(True)
self.comboBox.setAcceptDrops(False)
self.comboBox.setWindowTitle(_fromUtf8(""))
self.comboBox.setWindowIconText(_fromUtf8(""))
self.comboBox.setWindowOpacity(1.0)
self.comboBox.setWindowModified(False)
self.comboBox.setToolTip(_fromUtf8(""))
self.comboBox.setStatusTip(_fromUtf8(""))
self.comboBox.setWhatsThis(_fromUtf8(""))
self.comboBox.setAccessibleName(_fromUtf8(""))
self.comboBox.setAccessibleDescription(_fromUtf8(""))
self.comboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.comboBox.setAutoFillBackground(False)
self.comboBox.setStyleSheet(_fromUtf8(""))
self.comboBox.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.comboBox.setWindowFilePath(_fromUtf8(""))
self.comboBox.setInputMethodHints(QtCore.Qt.ImhNone)
self.comboBox.setEditable(False)
self.comboBox.setMaxVisibleItems(10)
self.comboBox.setMaxCount(2147483647)
self.comboBox.setInsertPolicy(QtGui.QComboBox.InsertAtBottom)
self.comboBox.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContentsOnFirstShow)
self.comboBox.setMinimumContentsLength(0)
self.comboBox.setIconSize(QtCore.QSize(16, 16))
self.comboBox.setAutoCompletion(True)
self.comboBox.setAutoCompletionCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.comboBox.setDuplicatesEnabled(False)
self.comboBox.setFrame(True)
self.comboBox.setModelColumn(0)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.tableWidget = QtGui.QTableWidget(Dialog)
self.tableWidget.setGeometry(QtCore.QRect(250, 10, 381, 671))
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.pushButton_2 = QtGui.QPushButton(Dialog)
self.pushButton_2.setEnabled(True)
self.pushButton_2.setGeometry(QtCore.QRect(120, 610, 111, 31))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setMinimumSize(QtCore.QSize(0, 0))
self.pushButton_2.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton_2.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton_2.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.pushButton_2.setPalette(palette)
font = QtGui.QFont()
self.pushButton_2.setFont(font)
self.pushButton_2.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pushButton_2.setMouseTracking(False)
self.pushButton_2.setFocusPolicy(QtCore.Qt.StrongFocus)
self.pushButton_2.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.pushButton_2.setUpdatesEnabled(True)
self.pushButton_2.setVisible(True)
self.pushButton_2.setAcceptDrops(False)
self.pushButton_2.setWindowTitle(_fromUtf8(""))
self.pushButton_2.setWindowIconText(_fromUtf8(""))
self.pushButton_2.setWindowOpacity(1.0)
self.pushButton_2.setWindowModified(False)
self.pushButton_2.setToolTip(_fromUtf8(""))
self.pushButton_2.setStatusTip(_fromUtf8(""))
self.pushButton_2.setWhatsThis(_fromUtf8(""))
self.pushButton_2.setAccessibleName(_fromUtf8(""))
self.pushButton_2.setAccessibleDescription(_fromUtf8(""))
self.pushButton_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton_2.setAutoFillBackground(False)
self.pushButton_2.setStyleSheet(_fromUtf8(""))
self.pushButton_2.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.pushButton_2.setWindowFilePath(_fromUtf8(""))
self.pushButton_2.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton_2.setIconSize(QtCore.QSize(16, 16))
self.pushButton_2.setShortcut(_fromUtf8(""))
self.pushButton_2.setCheckable(False)
self.pushButton_2.setChecked(False)
self.pushButton_2.setAutoRepeat(False)
self.pushButton_2.setAutoExclusive(False)
self.pushButton_2.setAutoRepeatDelay(300)
self.pushButton_2.setAutoRepeatInterval(100)
self.pushButton_2.setDown(False)
self.pushButton_2.setAutoDefault(True)
self.pushButton_2.setDefault(False)
self.pushButton_2.setFlat(False)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton = QtGui.QPushButton(Dialog)
self.pushButton.setEnabled(True)
self.pushButton.setGeometry(QtCore.QRect(10, 610, 101, 31))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMinimumSize(QtCore.QSize(0, 0))
self.pushButton.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.pushButton.setPalette(palette)
font = QtGui.QFont()
self.pushButton.setFont(font)
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pushButton.setMouseTracking(False)
self.pushButton.setFocusPolicy(QtCore.Qt.StrongFocus)
self.pushButton.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.pushButton.setUpdatesEnabled(True)
self.pushButton.setVisible(True)
self.pushButton.setAcceptDrops(False)
self.pushButton.setWindowTitle(_fromUtf8(""))
self.pushButton.setWindowIconText(_fromUtf8(""))
self.pushButton.setWindowOpacity(1.0)
self.pushButton.setWindowModified(False)
self.pushButton.setToolTip(_fromUtf8(""))
self.pushButton.setStatusTip(_fromUtf8(""))
self.pushButton.setWhatsThis(_fromUtf8(""))
self.pushButton.setAccessibleName(_fromUtf8(""))
self.pushButton.setAccessibleDescription(_fromUtf8(""))
self.pushButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton.setAutoFillBackground(False)
self.pushButton.setStyleSheet(_fromUtf8(""))
self.pushButton.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.pushButton.setWindowFilePath(_fromUtf8(""))
self.pushButton.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton.setIconSize(QtCore.QSize(16, 16))
self.pushButton.setShortcut(_fromUtf8(""))
self.pushButton.setCheckable(False)
self.pushButton.setChecked(False)
self.pushButton.setAutoRepeat(False)
self.pushButton.setAutoExclusive(False)
self.pushButton.setAutoRepeatDelay(300)
self.pushButton.setAutoRepeatInterval(100)
self.pushButton.setDown(False)
self.pushButton.setAutoDefault(True)
self.pushButton.setDefault(True)
self.pushButton.setFlat(False)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_3 = QtGui.QPushButton(Dialog)
self.pushButton_3.setEnabled(True)
self.pushButton_3.setGeometry(QtCore.QRect(10, 650, 101, 31))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
self.pushButton_3.setSizePolicy(sizePolicy)
self.pushButton_3.setMinimumSize(QtCore.QSize(0, 0))
self.pushButton_3.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton_3.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton_3.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.pushButton_3.setPalette(palette)
font = QtGui.QFont()
self.pushButton_3.setFont(font)
self.pushButton_3.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pushButton_3.setMouseTracking(False)
self.pushButton_3.setFocusPolicy(QtCore.Qt.StrongFocus)
self.pushButton_3.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.pushButton_3.setUpdatesEnabled(True)
self.pushButton_3.setVisible(True)
self.pushButton_3.setAcceptDrops(False)
self.pushButton_3.setWindowTitle(_fromUtf8(""))
self.pushButton_3.setWindowIconText(_fromUtf8(""))
self.pushButton_3.setWindowOpacity(1.0)
self.pushButton_3.setWindowModified(False)
self.pushButton_3.setToolTip(_fromUtf8(""))
self.pushButton_3.setStatusTip(_fromUtf8(""))
self.pushButton_3.setWhatsThis(_fromUtf8(""))
self.pushButton_3.setAccessibleName(_fromUtf8(""))
self.pushButton_3.setAccessibleDescription(_fromUtf8(""))
self.pushButton_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton_3.setAutoFillBackground(False)
self.pushButton_3.setStyleSheet(_fromUtf8(""))
self.pushButton_3.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.pushButton_3.setWindowFilePath(_fromUtf8(""))
self.pushButton_3.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton_3.setIconSize(QtCore.QSize(16, 16))
self.pushButton_3.setShortcut(_fromUtf8(""))
self.pushButton_3.setCheckable(False)
self.pushButton_3.setChecked(False)
self.pushButton_3.setAutoRepeat(False)
self.pushButton_3.setAutoExclusive(False)
self.pushButton_3.setAutoRepeatDelay(300)
self.pushButton_3.setAutoRepeatInterval(100)
self.pushButton_3.setDown(False)
self.pushButton_3.setAutoDefault(True)
self.pushButton_3.setDefault(False)
self.pushButton_3.setFlat(False)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_4 = QtGui.QPushButton(Dialog)
self.pushButton_4.setGeometry(QtCore.QRect(120, 650, 111, 31))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.comboBox_2 = QtGui.QComboBox(Dialog)
self.comboBox_2.setEnabled(False)
self.comboBox_2.setGeometry(QtCore.QRect(10, 40, 151, 22))
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.spinBox = QtGui.QSpinBox(Dialog)
self.spinBox.setEnabled(False)
self.spinBox.setGeometry(QtCore.QRect(180, 40, 42, 22))
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.retranslateUi(Dialog)
self.comboBox.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "ะัะพะณัะฐะผะผะฐ ััะตัะฐ ะดะตัะฐะปะตะน", None))
self.comboBox.setItemText(0, _translate("Dialog", "50ะะ 4", None))
self.comboBox.setItemText(1, _translate("Dialog", "50ะะ 6.3", None))
self.comboBox.setItemText(2, _translate("Dialog", "50ะะ 10", None))
self.comboBox.setItemText(3, _translate("Dialog", "50ะะ 14", None))
self.comboBox.setItemText(4, _translate("Dialog", "50ะะ 16", None))
self.comboBox.setItemText(5, _translate("Dialog", "50ะะ 32", None))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "ะะตัะฐะปะธ", None))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "ะกะบะปะฐะด", None))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("Dialog", "ะขัะตะฑัะตััั", None))
self.pushButton_2.setText(_translate("Dialog", "ะัั
ะพะด", None))
self.pushButton.setText(_translate("Dialog", "ะัะฟะพะปะฝะธัั ะทะฐะบะฐะท", None))
self.pushButton_3.setText(_translate("Dialog", "ะะพะฟะพะปะฝะธัั ัะบะปะฐะด", None))
self.pushButton_4.setText(_translate("Dialog", "ะะพัะผะพััะตัั ัะบะปะฐะด", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QWidget()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
python
|
from flask import jsonify, request, Response
from jsonschema import RefResolutionError
from sqlalchemy.orm import Session
from flexget.api import APIResource, api
from flexget.api.app import NotFoundError
from flexget.config_schema import resolve_ref, schema_paths
schema_api = api.namespace('schema', description='Config and plugin schemas')
schema_api_list = api.schema_model(
'schema.list',
{'type': 'object', 'properties': {'schemas': {'type': 'array', 'items': {'type': 'object'}}}},
)
def rewrite_ref(identifier: str, base_url: str) -> str:
"""
The refs in the schemas are arbitrary identifiers, and cannot be used as-is as real network locations.
This rewrites any of those arbitrary refs to be real urls servable by this endpoint.
"""
if not base_url.endswith('/'):
base_url += '/'
if identifier.startswith('/schema/'):
return base_url + identifier[1:]
return identifier
def rewrite_refs(schema, base_url: str):
"""Make sure any $refs in the schema point properly back to this endpoint."""
if isinstance(schema, dict):
if '$ref' in schema:
return {'$ref': rewrite_ref(schema['$ref'], base_url)}
return {k: rewrite_refs(v, base_url) for k, v in schema.items()}
if isinstance(schema, list):
return [rewrite_refs(v, base_url) for v in schema]
return schema
@schema_api.route('/')
class SchemaAllAPI(APIResource):
@api.response(200, model=schema_api_list)
def get(self, session: Session = None) -> Response:
""" List all schema definitions """
schemas = []
for path in schema_paths:
schema = rewrite_refs(resolve_ref(path), request.url_root)
schema['id'] = rewrite_ref(path, request.url_root)
schemas.append(schema)
return jsonify({'schemas': schemas})
@schema_api.route('/<path:path>/')
@api.doc(params={'path': 'Path of schema'})
@api.response(NotFoundError)
class SchemaAPI(APIResource):
@api.response(200, model=schema_api_list)
def get(self, path: str, session: Session = None) -> Response:
""" Get schema definition """
try:
schema = resolve_ref(request.full_path)
except RefResolutionError:
raise NotFoundError('invalid schema path')
schema['id'] = request.url
return jsonify(rewrite_refs(schema, request.url_root))
|
python
|
"""
Compare two integers given as strings.
Example
For a = "12" and b = "13", the output should be
compareIntegers(a, b) = "less";
For a = "875" and b = "799", the output should be
compareIntegers(a, b) = "greater";
For a = "1000" and b = "1000", the output should be
compareIntegers(a, b) = "equal".
"""
def compareIntegers(a, b):
if len(a) > len(b):
return 'greater'
if len(a) < len(b):
return 'less'
if a < b:
return 'less'
if a > b:
return 'greater'
return 'equal'
def compareIntegers(a, b):
a = int(a)
b = int(b)
return 'less' if a < b else 'equal' if a == b else 'greater'
|
python
|
# This file is part of the faebryk project
# SPDX-License-Identifier: MIT
import faebryk.library.core
import faebryk.library.kicad
import faebryk.library.library
import faebryk.library.traits
|
python
|
from datetime import datetime
from .api import ApiObject
class Trigger(ApiObject):
"""
https://www.xibbaz.com/documentation/3.4/manual/api/reference/trigger/object
"""
DEFAULT_SELECTS = ('Items', 'Functions', 'Dependencies', 'DiscoveryRule', 'LastEvent', 'Tags')
RELATIONS = ('hosts', 'groups')
@classmethod
def _text_field(self):
return 'description'
PROPS = dict(
triggerid = dict(
doc = "ID of the trigger.",
id = True,
readonly = True,
),
description = dict(
doc = "Name of the trigger.",
),
expression = dict(
doc = "Reduced trigger expression.",
),
comments = dict(
doc = "Additional comments to the trigger.",
),
error = dict(
doc = "Error text if there have been any problems when updating the state of the trigger.",
readonly = True,
),
flags = dict(
doc = "Origin of the trigger.",
kind = int,
readonly = True,
vals = {
0: 'a plain trigger (default)',
4: 'a discovered trigger',
},
),
lastchange = dict(
doc = "Time when the trigger last changed its state.",
kind = datetime,
readonly = True,
),
priority = dict(
doc = "Severity of the trigger.",
kind = int,
vals = {
0: 'not classified (default)',
1: 'information',
2: 'warning',
3: 'average',
4: 'high',
5: 'disaster',
},
),
state = dict(
doc = "State of the trigger.",
kind = int,
readonly = True,
vals = {
0: 'trigger state is up to date (default)',
1: 'current trigger state is unknown',
},
),
status = dict(
doc = "Whether the trigger is enabled or disabled.",
kind = int,
vals = {
0: 'enabled (default)',
1: 'disabled',
},
),
templateid = dict(
doc = "ID of the parent template trigger.",
readonly = True,
),
type = dict(
doc = "Whether the trigger can generate multiple problem events.",
kind = int,
vals = {
0: 'do not generate multiple events (default)',
1: 'generate multiple events',
},
),
url = dict(
doc = "URL associated with the trigger.",
),
value = dict(
doc = "Whether the trigger is in OK or problem state.",
kind = int,
readonly = True,
vals = {
0: 'ok',
1: 'problem',
},
),
recovery_mode = dict(
doc = "OK event generation mode.",
kind = int,
vals = {
0: 'expression (default)',
1: 'recovery expression',
2: 'none',
},
),
recovery_expression = dict(
doc = "Reduced trigger recovery expression.",
),
correlation_mode = dict(
doc = "OK event closes.",
kind = int,
vals = {
0: 'all problems (default)',
1: 'all problems if tag values match',
},
),
correlation_tag = dict(
doc = "Tag for matching.",
),
manual_close = dict(
doc = "Allow manual close.",
kind = int,
vals = {
0: 'no (default)',
1: 'yes',
},
),
)
|
python
|
################################################################### #
# Basic plot for two-strain SIR model:
# Bifurcation diagram for one parameter
####################################################################
import sys
import numpy as np
import pylab as plt
from matplotlib.font_manager import FontProperties
from two_strain import *
# Run parameters
run_num = 1 # sys.argv[1]
end_time = 1000*365
output_interval = 365.0 # if not 365., need to adjust strobe interval
step_size = 1.0
sweep_par = "beta[0]" # e.g., "beta[0]", "a[1]", "alpha[0]"
par_min = 1.0/7.0
par_max = 7.0/7.0
n_points = 40 # number of points in parameter range
n_strobes = 50 # number of years to sample
# Strain parameters, including initial conditions
beta = np.array([5, 5])/7.0
epsilon = 0.1
gamma = np.array([1, 1])/7.0
mu = 1/(10*365.0)
alpha = np.array([1., 1.])
a = np.array([1., 1.])
omega = 2*np.pi/365.
obs_sd = 0.01
NSS = 0.2
NIS = 1e-3
NRS = 0.02
NRI = 0.0
NSI = 1e-3
NSR = 0.02
NIR = 0.0
# Organize and run simulations
SI = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR])
ic = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR, 1-np.sum(SI)])
par_vals = np.linspace(par_min, par_max, n_points)
bif_vals = np.zeros((len(par_vals), n_strobes))
for i in range(len(par_vals)):
print('Running value %d of %d' % (i+1,len(par_vals)))
exec(sweep_par + " = par_vals[i]")
params = np.array([gamma, mu, alpha, a, omega, beta, epsilon])
output = run_two_strain(end_time, output_interval, step_size, params, ic)
I = output[:, 1] + output[:, 6] # NIS + NIR
bif_vals[i, :] = I[-n_strobes:len(I)]
# Plot output
plt.plot(par_vals, bif_vals, '.k')
plt.xlabel(sweep_par)
plt.ylabel("NIS + NIR")
plt.xlim([par_min, par_max])
plt.show()
plt.savefig("bifurcation_" + sweep_par + ".png")
plt.close()
|
python
|
import tweepy , tkinter, datetime, os, sys, random, time, pytz
from keys import *
from tweepy import TweepError
#Create oauth handler for tokens setting
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(key,secret)
api = tweepy.API(auth)
random_lyrics = 'Lyrics.txt'
time_stamp = pytz.timezone('US/Central')
#Creating a function that scans and stores the last statusID
def test_bot():
try:
api.verify_credentials()
print('Authentication was successful')
except:
print('Error')
user = api.me()
print(user.name + ' ' + 'Succesfully Signing In....')
mentions = api.mentions_timeline(count = 1)
mentions_id = api.get_status
print(mentions_id)
for tweet in mentions:
filesong = open(random_lyrics, 'r')
lyrics = filesong.readlines()
song_lines = len(lyrics)
#Setting Counter
random_lyric = random.randrange(0, song_lines)
tid = tweet.user.id
username_of_person = tweet.user.screen_name
try:
api.update_status("@" + username_of_person + ' ' + lyrics[random_lyric], in_reply_to_status_id = tid).append(time_stamp)
print('Replied to' + ' ' + username_of_person + ' ' + 'with:'+ ' '+lyrics[random_lyric])
except tweepy.TweepError as e:
print(e.reason)
#LOOPS Infinately, for every 10 seconds
while True:
test_bot()
time.sleep(15)
|
python
|
from mycroft import MycroftSkill, intent_file_handler
class Prepararrefeicoes(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('prepararrefeicoes.intent')
def handle_prepararrefeicoes(self, message):
self.speak_dialog('prepararrefeicoes')
def create_skill():
return Prepararrefeicoes()
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ibc/core/commitment/v1/commitment.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from confio import proofs_pb2 as confio_dot_proofs__pb2
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="ibc/core/commitment/v1/commitment.proto",
package="ibc.core.commitment.v1",
syntax="proto3",
serialized_options=b"Z9github.com/cosmos/ibc-go/modules/core/23-commitment/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\'ibc/core/commitment/v1/commitment.proto\x12\x16ibc.core.commitment.v1\x1a\x14gogoproto/gogo.proto\x1a\x13\x63onfio/proofs.proto" \n\nMerkleRoot\x12\x0c\n\x04hash\x18\x01 \x01(\x0c:\x04\x88\xa0\x1f\x00"9\n\x0cMerklePrefix\x12)\n\nkey_prefix\x18\x01 \x01(\x0c\x42\x15\xf2\xde\x1f\x11yaml:"key_prefix""9\n\nMerklePath\x12%\n\x08key_path\x18\x01 \x03(\tB\x13\xf2\xde\x1f\x0fyaml:"key_path":\x04\x98\xa0\x1f\x00"5\n\x0bMerkleProof\x12&\n\x06proofs\x18\x01 \x03(\x0b\x32\x16.ics23.CommitmentProofB;Z9github.com/cosmos/ibc-go/modules/core/23-commitment/typesb\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
confio_dot_proofs__pb2.DESCRIPTOR,
],
)
_MERKLEROOT = _descriptor.Descriptor(
name="MerkleRoot",
full_name="ibc.core.commitment.v1.MerkleRoot",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="hash",
full_name="ibc.core.commitment.v1.MerkleRoot.hash",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=110,
serialized_end=142,
)
_MERKLEPREFIX = _descriptor.Descriptor(
name="MerklePrefix",
full_name="ibc.core.commitment.v1.MerklePrefix",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key_prefix",
full_name="ibc.core.commitment.v1.MerklePrefix.key_prefix",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\362\336\037\021yaml:"key_prefix"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=144,
serialized_end=201,
)
_MERKLEPATH = _descriptor.Descriptor(
name="MerklePath",
full_name="ibc.core.commitment.v1.MerklePath",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key_path",
full_name="ibc.core.commitment.v1.MerklePath.key_path",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\362\336\037\017yaml:"key_path"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\230\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=203,
serialized_end=260,
)
_MERKLEPROOF = _descriptor.Descriptor(
name="MerkleProof",
full_name="ibc.core.commitment.v1.MerkleProof",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="proofs",
full_name="ibc.core.commitment.v1.MerkleProof.proofs",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=262,
serialized_end=315,
)
_MERKLEPROOF.fields_by_name[
"proofs"
].message_type = confio_dot_proofs__pb2._COMMITMENTPROOF
DESCRIPTOR.message_types_by_name["MerkleRoot"] = _MERKLEROOT
DESCRIPTOR.message_types_by_name["MerklePrefix"] = _MERKLEPREFIX
DESCRIPTOR.message_types_by_name["MerklePath"] = _MERKLEPATH
DESCRIPTOR.message_types_by_name["MerkleProof"] = _MERKLEPROOF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MerkleRoot = _reflection.GeneratedProtocolMessageType(
"MerkleRoot",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEROOT,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerkleRoot)
},
)
_sym_db.RegisterMessage(MerkleRoot)
MerklePrefix = _reflection.GeneratedProtocolMessageType(
"MerklePrefix",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEPREFIX,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerklePrefix)
},
)
_sym_db.RegisterMessage(MerklePrefix)
MerklePath = _reflection.GeneratedProtocolMessageType(
"MerklePath",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEPATH,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerklePath)
},
)
_sym_db.RegisterMessage(MerklePath)
MerkleProof = _reflection.GeneratedProtocolMessageType(
"MerkleProof",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEPROOF,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerkleProof)
},
)
_sym_db.RegisterMessage(MerkleProof)
DESCRIPTOR._options = None
_MERKLEROOT._options = None
_MERKLEPREFIX.fields_by_name["key_prefix"]._options = None
_MERKLEPATH.fields_by_name["key_path"]._options = None
_MERKLEPATH._options = None
# @@protoc_insertion_point(module_scope)
|
python
|
import operator
from typing import Any, Callable, List, Optional, Type, Union
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.relationships import RelationshipProperty
from sqlalchemy.sql import functions
from sqlalchemy.sql.expression import (
BinaryExpression,
BindParameter,
BooleanClauseList,
ClauseElement,
ColumnClause,
False_,
Null,
True_,
and_,
cast,
extract,
false,
literal,
null,
or_,
true,
)
from sqlalchemy.types import Date, Time
from odata_query import ast, exceptions as ex, typing, utils, visitor
from . import functions_ext
class AstToSqlAlchemyClauseVisitor(visitor.NodeVisitor):
"""
:class:`NodeVisitor` that transforms an :term:`AST` into a SQLAlchemy query
filter clause.
Args:
root_model: The root model of the query.
"""
def __init__(self, root_model: Type[DeclarativeMeta]):
self.root_model = root_model
self.join_relationships: List[InstrumentedAttribute] = []
def visit_Identifier(self, node: ast.Identifier) -> ColumnClause:
":meta private:"
try:
return getattr(self.root_model, node.name)
except AttributeError:
raise ex.InvalidFieldException(node.name)
def visit_Attribute(self, node: ast.Attribute) -> ColumnClause:
":meta private:"
rel_attr = self.visit(node.owner)
# Owner is an InstrumentedAttribute, hopefully of a relationship.
# But we need the model pointed to by the relationship.
prop_inspect = inspect(rel_attr).property
if not isinstance(prop_inspect, RelationshipProperty):
# TODO: new exception:
raise ValueError(f"Not a relationship: {node.owner}")
self.join_relationships.append(rel_attr)
# We'd like to reference the column on the related class:
owner_cls = prop_inspect.entity.class_
try:
return getattr(owner_cls, node.attr)
except AttributeError:
raise ex.InvalidFieldException(node.attr)
def visit_Null(self, node: ast.Null) -> Null:
":meta private:"
return null()
def visit_Integer(self, node: ast.Integer) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_Float(self, node: ast.Float) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_Boolean(self, node: ast.Boolean) -> Union[True_, False_]:
":meta private:"
if node.val == "true":
return true()
else:
return false()
def visit_String(self, node: ast.String) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_Date(self, node: ast.Date) -> BindParameter:
":meta private:"
try:
return literal(node.py_val)
except ValueError:
raise ex.ValueException(node.val)
def visit_DateTime(self, node: ast.DateTime) -> BindParameter:
":meta private:"
try:
return literal(node.py_val)
except ValueError:
raise ex.ValueException(node.val)
def visit_Time(self, node: ast.Time) -> BindParameter:
":meta private:"
try:
return literal(node.py_val)
except ValueError:
raise ex.ValueException(node.val)
def visit_Duration(self, node: ast.Duration) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_GUID(self, node: ast.GUID) -> BindParameter:
":meta private:"
return literal(node.val)
def visit_List(self, node: ast.List) -> list:
":meta private:"
return [self.visit(n) for n in node.val]
def visit_Add(self, node: ast.Add) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.add
def visit_Sub(self, node: ast.Sub) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.sub
def visit_Mult(self, node: ast.Mult) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.mul
def visit_Div(self, node: ast.Div) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.truediv
def visit_Mod(self, node: ast.Mod) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.mod
def visit_BinOp(self, node: ast.BinOp) -> Any:
":meta private:"
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.op)
return op(left, right)
def visit_Eq(
self, node: ast.Eq
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.eq
def visit_NotEq(
self, node: ast.NotEq
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.ne
def visit_Lt(
self, node: ast.Lt
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.lt
def visit_LtE(
self, node: ast.LtE
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.le
def visit_Gt(
self, node: ast.Gt
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.gt
def visit_GtE(
self, node: ast.GtE
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.ge
def visit_In(
self, node: ast.In
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return lambda a, b: a.in_(b)
def visit_Compare(self, node: ast.Compare) -> BinaryExpression:
":meta private:"
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.comparator)
# If a node is a `relationship` representing a single foreign key,
# the client meant to compare the foreign key, not the related object.
# E.g. In "blogpost/author eq 1", left should be "blogpost/author_id"
left = self._maybe_sub_relationship_with_foreign_key(left)
right = self._maybe_sub_relationship_with_foreign_key(right)
return op(left, right)
def visit_And(
self, node: ast.And
) -> Callable[[ClauseElement, ClauseElement], BooleanClauseList]:
":meta private:"
return and_
def visit_Or(
self, node: ast.Or
) -> Callable[[ClauseElement, ClauseElement], BooleanClauseList]:
":meta private:"
return or_
def visit_BoolOp(self, node: ast.BoolOp) -> BooleanClauseList:
":meta private:"
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.op)
return op(left, right)
def visit_Not(self, node: ast.Not) -> Callable[[ClauseElement], ClauseElement]:
":meta private:"
return operator.invert
def visit_UnaryOp(self, node: ast.UnaryOp) -> ClauseElement:
":meta private:"
mod = self.visit(node.op)
val = self.visit(node.operand)
try:
return mod(val)
except TypeError:
raise ex.TypeException(node.op.__class__.__name__, val)
def visit_Call(self, node: ast.Call) -> ClauseElement:
":meta private:"
try:
handler = getattr(self, "func_" + node.func.name.lower())
except AttributeError:
raise ex.UnsupportedFunctionException(node.func.name)
return handler(*node.args)
def visit_CollectionLambda(self, node: ast.CollectionLambda) -> ClauseElement:
":meta private:"
owner_prop = self.visit(node.owner)
collection_model = inspect(owner_prop).property.entity.class_
if node.lambda_:
# For the lambda, we want to strip the identifier off, because
# we will execute this as a subquery in the wanted model's context.
subq_ast = utils.expression_relative_to_identifier(
node.lambda_.identifier, node.lambda_.expression
)
subq_transformer = self.__class__(collection_model)
subquery_filter = subq_transformer.visit(subq_ast)
else:
subquery_filter = None
if isinstance(node.operator, ast.Any):
return owner_prop.any(subquery_filter)
else:
# For an ALL query, invert both the filter and the EXISTS:
if node.lambda_:
subquery_filter = ~subquery_filter
return ~owner_prop.any(subquery_filter)
def func_contains(self, field: ast._Node, substr: ast._Node) -> ClauseElement:
":meta private:"
return self._substr_function(field, substr, "contains")
def func_startswith(self, field: ast._Node, substr: ast._Node) -> ClauseElement:
":meta private:"
return self._substr_function(field, substr, "startswith")
def func_endswith(self, field: ast._Node, substr: ast._Node) -> ClauseElement:
":meta private:"
return self._substr_function(field, substr, "endswith")
def func_length(self, arg: ast._Node) -> functions.Function:
":meta private:"
return functions.char_length(self.visit(arg))
def func_concat(self, *args: ast._Node) -> functions.Function:
":meta private:"
return functions.concat(*[self.visit(arg) for arg in args])
def func_indexof(self, first: ast._Node, second: ast._Node) -> functions.Function:
":meta private:"
# TODO: Highly dialect dependent, might want to implement in GenericFunction:
# Subtract 1 because OData is 0-indexed while SQL is 1-indexed
return functions_ext.strpos(self.visit(first), self.visit(second)) - 1
def func_substring(
self, fullstr: ast._Node, index: ast._Node, nchars: Optional[ast._Node] = None
) -> functions.Function:
":meta private:"
# Add 1 because OData is 0-indexed while SQL is 1-indexed
if nchars:
return functions_ext.substr(
self.visit(fullstr),
self.visit(index) + 1,
self.visit(nchars),
)
else:
return functions_ext.substr(self.visit(fullstr), self.visit(index) + 1)
def func_matchespattern(
self, field: ast._Node, pattern: ast._Node
) -> functions.Function:
":meta private:"
identifier = self.visit(field)
return identifier.regexp_match(self.visit(pattern))
def func_tolower(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.lower(self.visit(field))
def func_toupper(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.upper(self.visit(field))
def func_trim(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.ltrim(functions_ext.rtrim(self.visit(field)))
def func_date(self, field: ast._Node) -> ClauseElement:
":meta private:"
return cast(self.visit(field), Date)
def func_day(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "day")
def func_hour(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "hour")
def func_minute(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "minute")
def func_month(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "month")
def func_now(self) -> functions.Function:
":meta private:"
return functions.now()
def func_second(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "second")
def func_time(self, field: ast._Node) -> functions.Function:
":meta private:"
return cast(self.visit(field), Time)
def func_year(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "year")
def func_ceiling(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.ceil(self.visit(field))
def func_floor(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.floor(self.visit(field))
def func_round(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.round(self.visit(field))
def _substr_function(
self, field: ast._Node, substr: ast._Node, func: str
) -> ClauseElement:
":meta private:"
typing.typecheck(field, (ast.Identifier, ast.String), "field")
typing.typecheck(substr, ast.String, "substring")
identifier = self.visit(field)
substring = self.visit(substr)
op = getattr(identifier, func)
return op(substring)
def _maybe_sub_relationship_with_foreign_key(
self, elem: ClauseElement
) -> ClauseElement:
"""
If the given ClauseElement is a `relationship` with a single ForeignKey,
replace it with the `ForeignKey` itself.
:meta private:
"""
try:
prop_inspect = inspect(elem).property
if isinstance(prop_inspect, RelationshipProperty):
foreign_key = prop_inspect._calculated_foreign_keys
if len(foreign_key) == 1:
return next(iter(foreign_key))
except Exception:
pass
return elem
|
python
|
'''
*File: domain_restriction.py
*Author: Nicholas Mattei (nicholas.mattei@nicta.com.au)
*Date: March 18, 2014
*
* Copyright (c) 2014, Nicholas Mattei and NICTA
* All rights reserved.
*
* Developed by: Nicholas Mattei
* NICTA
* http://www.nickmattei.net
* http://www.preflib.org
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NICTA nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICTA ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NICTA BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
About
--------------------
This file tests a profile for being single-peaked.
'''
import sys
import copy
import glob
from preflibtools import io
from preflibtools import generate_profiles
# Implement of the Single Peaked Consistancy Algorithm detailed in
# B. Escoffier, J. Lang, and M. Ozturk, "Single-peaked consistency and its complexity".
# 2008 European Conference on Artificial Intelligence.
#
# Intuitivily, this algortihm finds an axis that is single peaked with respect to
# the rmaps that are passed in, or it returns an empty axis (vector) of the candidates.
# This is achieved in time O(|rmaps|*|candmap|).
#
# Note that this algorithm only works for STRICT preferences. If a non-strict
# set of rankmaps is passed in, an error is returned.
def is_single_peaked(rmaps, candmap):
for current in rmaps:
if len(current) != len(candmap):
print("is_single_peaked called with non-strict preferences")
exit()
orders = order_vectors(rmaps)
fullorders = order_vectors(rmaps)
#Build the order...
leftside = []
rightside = []
last_cands = last_set(orders)
# Only one last makes no constraints so iterate...
while (len(last_cands) == 1):
if len(leftside) < len(rightside):
leftside.append(last_cands[0])
else:
rightside.insert(0,last_cands[0])
orders = remove_cands(orders, last_cands)
last_cands = last_set(orders)
#Only break if we have != 1 last candidate, wither we quit, or we put on one each end.
if len(last_cands) > 2:
return []
else:
leftside.append(last_cands[0])
rightside.insert(0,last_cands[1])
orders = remove_cands(orders, last_cands)
# While there are still unplaced candidates (not removed from every vote))
while len(orders[0]) > 0:
last_cands = last_set(orders)
# Should never have more than 2...
if len(last_cands) > 2:
return []
else:
x_i = leftside[len(leftside)-1]
x_j = rightside[0]
#Check Conditions outlined by Lang.
#If L={X}, Case 3
if len(last_cands) == 1:
x = last_cands[0]
# if x_i < x < x_j => leftside + x
if any(o.index(x_j) < o.index(x) and o.index(x) < o.index(x_i) for o in fullorders):
leftside.append(x)
# if x_j < x < x_i => x + right
elif any(o.index(x) < o.index(x_j) and o.index(x_i) < o.index(x) for o in fullorders):
rightside.insert(0, x)
# Otherwise it doest nmatter and we put it either place...
else:
if len(leftside) < len(rightside):
leftside.append(x)
else:
rightside.insert(0,x)
# Restrict...
orders = remove_cands(orders, last_cands)
#if L = {x, y}, Case 2c and 2d
# if x_i < x < x_j < y ==> left+x and y+right
# if x_j < x < x_i < y ==> left+y and x+right
# if both, then contradiction...
# if x_i < x < y < x_j ==> then this must be axis...
# if x_j < y < x < x_i ==> then this must be axis...
elif len(last_cands) == 2:
C1 = False
C2 = False
x = last_cands[0]
y = last_cands[1]
x_i = leftside[len(leftside)-1]
x_j = rightside[0]
# Iterate over each of the orders and check for the C1 or C2 conditions or D1 or D2... Switch on these..
for o in fullorders:
#Condition D1:
if o.index(x_i) > o.index(x) and o.index(x) > o.index(y) and o.index(y) > o.index(x_j):
# The axis is the current voter restricted to the remainder
temp_order = copy.copy(o)
temp_order = remove_cands([temp_order], list(set(leftside + rightside)))[0]
temp_order.reverse() ## Note that this reversed is the "increasing order of voter j"
social_axis = leftside + temp_order + rightside
if verify_orders_single_peaked_axis_strict(social_axis, fullorders):
return social_axis
else:
return []
#Condition D2:
if o.index(x_j) > o.index(y) and o.index(y) > o.index(x) and o.index(x) > o.index(x_i):
# The axis is the current voter restricted to the remainder
temp_order = copy.copy(o)
temp_order = remove_cands([temp_order], list(set(leftside + rightside)))[0]
social_axis = leftside + temp_order + rightside
if verify_orders_single_peaked_axis_strict(social_axis, fullorders):
return social_axis
else:
return []
#Condition C1:
if o.index(x_i) > o.index(x) and o.index(x) > o.index(x_j) and o.index(x_j) > o.index(y):
C1 = True
#Condition C2:
if o.index(x_j) > o.index(x) and o.index(x) > o.index(x_i) and o.index(x_i) > o.index(y):
C2 = True
# Short Circuit if we have C1 and C2 at any point...
if C1 and C2:
return []
# Processing C1 or C2 if necessary:
if C1:
leftside.append(x)
rightside.insert(0,y)
else: # Do C2 or it doesn't matter...
leftside.append(y)
rightside.insert(0,x)
orders = remove_cands(orders, last_cands)
#Leftside + Rightside must be the social axis
social_axis = leftside+rightside
if verify_orders_single_peaked_axis_strict(social_axis, fullorders):
return social_axis
else:
return []
# Helper function to find last place candidates
def last_set(orders):
if len(orders) > 0 and len(orders[0]) > 0:
# Make and return the set of last place candidates
last_cands = set()
for i in orders:
last_cands.add(i[len(i)-1])
return(list(last_cands))
# Helper function to compute the result of removing (set) of candidates from a list of orders.
def remove_cands(orders, cands_to_remove):
projection = []
for c_vote in orders:
tvote = copy.copy(c_vote)
for c_remove in cands_to_remove:
tvote.remove(c_remove)
projection.append(tvote)
return projection
# Helper Function: Given cands --> rank, return a vector of unique vectors in the profile
# that are just the orders of the candidates with index 0 == most prefered.
def order_vectors(rmaps):
orders = []
rank_to_candidate = io.rankmap_convert_rank_to_candidate(rmaps)
for c_map in rank_to_candidate:
c_vote = []
for i in sorted(c_map.keys()):
c_vote.append(c_map[i])
orders.append(c_vote)
return orders
# Verify that a profile of strict orders is single peaked w.r.t. the passed axis
def verify_orders_single_peaked_axis_strict(axis, orders):
# print("Candidate Axis: " + str(axis))
# print("Orders: " + str(orders))
if len(orders) < 1 or len(axis) != len(orders[0]):
return False
temporders = copy.copy(orders)
for corder in orders:
#Peal off the top element
split = axis.index(corder[0])
# Reverse the left side and compare element by element on the restricted set.
left = axis[:split]
left.reverse()
right = axis[split:]
# print("Checking Left Side")
restricted = remove_cands([corder], list(set(axis) - set(left)))
restricted = restricted[0]
#items should match element for element...
if len(left) > 0 and not all(restricted[i] == left[i] for i in range(len(left))):
print("Axis is not compatiable with order: " + str(corder))
return False
# print("Checking Right Side")
restricted = remove_cands([corder], list(set(axis) - set(right)))
restricted = restricted[0]
#items should match element for element...
if not all(restricted[i] == right[i] for i in range(len(right))):
print("Axis is not compatiable with order: " + str(corder))
return False
return True
# Generate a random instance and test it for SP -- Output the axis if it is...
if __name__ == '__main__':
ncand = 3
nvoters = 100
candmap = generate_profiles.gen_cand_map(ncand)
#rmaps, rmapscounts = generate_profiles.gen_impartial_culture_strict(nvoters, cmap)
rankmaps, rankmapcounts = generate_profiles.gen_single_peaked_impartial_culture_strict(nvoters, candmap)
io.pp_profile_toscreen(candmap, rankmaps, rankmapcounts)
social_axis = is_single_peaked(rankmaps, candmap)
if social_axis != []:
print("Single Peaked w.r.t " + str(social_axis))
else:
print("Not Single Peaked")
# Test all the SOC's... for fun....
files = glob.glob("./soc/*.soc")
total = 0
totalSP = 0
for cfile in sorted(files):
print("Testing: " + str(cfile))
inf = open(cfile, "r")
candmap, rankmaps, rankmapcounts, numvoters = io.read_election_file(inf)
total += 1
social_axis = is_single_peaked(rankmaps, candmap)
if social_axis != []:
print("Single Peaked w.r.t " + str(social_axis))
totalSP += 1
else:
print("Not Single Peaked")
inf.close()
print("Parsed " + str(total) + " SOC files")
print("Exactly " + str(totalSP) + " were single peaked")
|
python
|
import tensorflow as tf
from storage import run_dir
from train import train
from model import model
from predict import predict
model = model()
train(model)
|
python
|
import os
import re
import codecs
from setuptools import setup, find_packages
current_path = os.path.abspath(os.path.dirname(__file__))
def read_file(*parts):
with codecs.open(os.path.join(current_path, *parts), 'r', 'utf8') as reader:
return reader.read()
def get_requirements(*parts):
with codecs.open(os.path.join(current_path, *parts), 'r', 'utf8') as reader:
return list(map(lambda x: x.strip(), reader.readlines()))
def find_version(*file_paths):
version_file = read_file(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
setup(
name='keras-trans-mask',
version=find_version('keras_trans_mask', '__init__.py'),
packages=find_packages(),
url='https://github.com/CyberZHG/keras-trans-mask',
license='MIT',
author='CyberZHG',
author_email='CyberZHG@users.noreply.github.com',
description='Transfer masking in Keras',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
install_requires=get_requirements('requirements.txt'),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
python
|
##!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import math
import random
from hashlib import sha256
from pathlib import Path
from typing import Optional, Tuple, Union
import aiohttp
import fsspec
from fsspec.core import url_to_fs
###############################################################################
log = logging.getLogger(__name__)
###############################################################################
MAX_THUMBNAIL_HEIGHT = 540
MAX_THUMBNAIL_WIDTH = 960
def get_media_type(uri: str) -> Optional[str]:
"""
Get the IANA media type for the provided URI.
If one could not be found, return None.
Parameters
----------
uri: str
The URI to get the IANA media type for.
Returns
-------
mtype: Optional[str]:
The found matching IANA media type.
"""
import dask.dataframe as dd
# Media types retrieved from:
# http://www.iana.org/assignments/media-types/media-types.xhtml
media_types = dd.read_csv(
str(Path(__file__).parent / "resources" / "content-types-*.csv")
)
# Get suffix from URI
splits = uri.split(".")
suffix = splits[-1]
# Find content type
matching = media_types[media_types["Name"] == suffix].compute()
# If there is exactly one matching type, return it
if len(matching) == 1:
return matching["Template"].values[0]
# Otherwise, return none
return None
def resource_copy(
uri: str,
dst: Optional[Union[str, Path]] = None,
overwrite: bool = False,
) -> str:
"""
Copy a resource (local or remote) to a local destination on the machine.
Parameters
----------
uri: str
The uri for the resource to copy.
dst: Optional[Union[str, Path]]
A specific destination to where the copy should be placed. If None provided
stores the resource in the current working directory.
overwrite: bool
Boolean value indicating whether or not to overwrite a local resource with
the same name if it already exists.
Returns
-------
saved_path: str
The path of where the resource ended up getting copied to.
"""
if dst is None:
dst = uri.split("/")[-1]
# Ensure dst doesn't exist
dst = Path(dst).resolve()
if dst.is_dir():
dst = dst / uri.split("/")[-1]
if dst.is_file() and not overwrite:
raise FileExistsError(dst)
# Open requests connection to uri as a stream
log.info(f"Beginning resource copy from: {uri}")
# Get file system
try:
kwargs = {}
# Set custom timeout for http resources
if uri.startswith("http"):
kwargs = {"timeout": aiohttp.ClientTimeout(total=1800)}
# TODO: Add explicit use of GCS credentials until public read is fixed
fs, remote_path = url_to_fs(uri, **kwargs)
fs.get(remote_path, str(dst))
log.info(f"Completed resource copy from: {uri}")
log.info(f"Stored resource copy: {dst}")
return str(dst)
except Exception as e:
log.error(
f"Something went wrong during resource copy. "
f"Attempted copy from: '{uri}', resulted in error."
)
raise e
def split_audio(
video_read_path: str,
audio_save_path: str,
overwrite: bool = False,
) -> Tuple[str, str, str]:
"""
Split and store the audio from a video file using ffmpeg.
Parameters
----------
video_read_path: str
Path to the video to split the audio from.
audio_save_path: str
Path to where the audio should be stored.
Returns
-------
resolved_audio_save_path: str
Path to where the split audio file was saved.
ffmpeg_stdout_path: str
Path to the ffmpeg stdout log file.
ffmpeg stderr path: str
Path to the ffmpeg stderr log file.
"""
import ffmpeg
# Check paths
resolved_video_read_path = Path(video_read_path).resolve(strict=True)
resolved_audio_save_path = Path(audio_save_path).resolve()
if resolved_audio_save_path.is_file() and not overwrite:
raise FileExistsError(resolved_audio_save_path)
if resolved_audio_save_path.is_dir():
raise IsADirectoryError(resolved_audio_save_path)
# Construct ffmpeg dag
stream = ffmpeg.input(resolved_video_read_path)
stream = ffmpeg.output(
stream,
filename=resolved_audio_save_path,
format="wav",
acodec="pcm_s16le",
ac=1,
ar="16k",
)
# Run dag
log.debug(f"Beginning audio separation for: {video_read_path}")
out, err = ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)
log.debug(f"Completed audio separation for: {video_read_path}")
log.debug(f"Stored audio: {audio_save_path}")
# Store logs
ffmpeg_stdout_path = resolved_audio_save_path.with_suffix(".out")
ffmpeg_stderr_path = resolved_audio_save_path.with_suffix(".err")
with open(ffmpeg_stdout_path, "wb") as write_out:
write_out.write(out)
with open(ffmpeg_stderr_path, "wb") as write_err:
write_err.write(err)
return (
str(resolved_audio_save_path),
str(ffmpeg_stdout_path),
str(ffmpeg_stderr_path),
)
def get_static_thumbnail(
video_path: str, session_content_hash: str, seconds: int = 30
) -> str:
"""
A function that produces a png thumbnail image from a video file
Parameters
----------
video_path: str
The URL of the video from which the thumbnail will be produced
session_content_hash: str
The video content hash. This will be used in the produced image file's name
seconds: int
Determines after how many seconds a frame will be selected to produce the
thumbnail. The default is 30 seconds
Returns
-------
str: cover_name
The name of the thumbnail file:
Always session_content_hash + "-static-thumbnail.png"
"""
import imageio
from PIL import Image
reader = imageio.get_reader(video_path)
png_path = ""
if reader.get_length() > 1:
png_path = f"{session_content_hash}-static-thumbnail.png"
image = None
try:
frame_to_take = math.floor(reader.get_meta_data()["fps"] * seconds)
image = reader.get_data(frame_to_take)
except (ValueError, IndexError):
reader = imageio.get_reader(video_path)
image = reader.get_data(0)
final_ratio = find_proper_resize_ratio(image.shape[0], image.shape[1])
if final_ratio < 1:
image = Image.fromarray(image).resize(
(
math.floor(image.shape[1] * final_ratio),
math.floor(image.shape[0] * final_ratio),
)
)
imageio.imwrite(png_path, image)
return png_path
def get_hover_thumbnail(
video_path: str,
session_content_hash: str,
num_frames: int = 10,
duration: float = 6.0,
) -> str:
"""
A function that produces a gif hover thumbnail from an mp4 video file
Parameters
----------
video_path: str
The URL of the video from which the thumbnail will be produced
session_content_hash: str
The video content hash. This will be used in the produced image file's name
num_frames: int
Determines the number of frames in the thumbnail
duration: float
Runtime of the produced GIF.
Default: 6.0 seconds
Returns
-------
str: cover_name
The name of the thumbnail file:
Always session_content_hash + "-hover-thumbnail.png"
"""
import imageio
import numpy as np
from PIL import Image
reader = imageio.get_reader(video_path)
gif_path = ""
if reader.get_length() > 1:
gif_path = f"{session_content_hash}-hover-thumbnail.gif"
# Get first frame
sample = reader.get_data(0)
height = sample.shape[0]
width = sample.shape[1]
final_ratio = find_proper_resize_ratio(height, width)
with imageio.get_writer(gif_path, mode="I", fps=(num_frames / duration)) as writer:
selected_frames = 0
for frame in reader:
# 1% chance to use the frame
if random.random() > 0.99:
image = Image.fromarray(frame)
if final_ratio < 1:
image = image.resize(
(
math.floor(width * final_ratio),
math.floor(height * final_ratio),
)
)
final_image = np.asarray(image).astype(np.uint8)
writer.append_data(final_image)
selected_frames += 1
if selected_frames >= num_frames:
break
return gif_path
def find_proper_resize_ratio(height: int, width: int) -> float:
"""
Return the proper ratio to resize a thumbnail greater than 960 x 540 pixels.
Parameters
----------
height: int
The height, in pixels, of the thumbnail to be resized.
width: int
The width, in pixels, of the thumbnail to be resized.
Returns
-------
final_ratio: float
The ratio by which the thumbnail will be resized.
If the ratio is less than 1, the thumbnail is too large and should be resized
by a factor of final_ratio.
If the ratio is greater than or equal to 1, the thumbnail is not too large and
should not be resized.
"""
if height > MAX_THUMBNAIL_HEIGHT or width > MAX_THUMBNAIL_WIDTH:
height_ratio = MAX_THUMBNAIL_HEIGHT / height
width_ratio = MAX_THUMBNAIL_WIDTH / width
if height_ratio > width_ratio:
final_ratio = height_ratio
else:
final_ratio = width_ratio
return final_ratio
return 2
def hash_file_contents(uri: str, buffer_size: int = 2 ** 16) -> str:
"""
Return the SHA256 hash of a file's content.
Parameters
----------
uri: str
The uri for the file to hash.
buffer_size: int
The number of bytes to read at a time.
Default: 2^16 (64KB)
Returns
-------
hash: str
The SHA256 hash for the file contents.
"""
hasher = sha256()
with fsspec.open(uri, "rb") as open_resource:
while True:
block = open_resource.read(buffer_size)
if not block:
break
hasher.update(block)
return hasher.hexdigest()
def convert_video_to_mp4(video_filepath: str) -> str:
"""
Converts a video to an equivalent MP4 file.
Parameters
----------
video_filepath: str
The filepath of the video to convert.
Returns
-------
mp4_filepath: str
The filepath of the converted MP4 video.
"""
import ffmpeg
mp4_filepath = str(Path(video_filepath).with_suffix(".mp4"))
ffmpeg.input(video_filepath).output(mp4_filepath).overwrite_output().run()
log.info("Finished converting {} to mp4".format(video_filepath))
return mp4_filepath
def generate_file_storage_name(file_uri: str, suffix: str) -> str:
"""
Generate a filename using the hash of the file contents and some provided suffix.
Parameters
----------
file_uri: str
The URI to the file to hash.
suffix: str
The suffix to append to the hash as a part of the filename.
Returns
-------
dst: str
The name of the file as it should be on Google Cloud Storage.
"""
hash = hash_file_contents(file_uri)
return f"{hash}-{suffix}"
|
python
|
from subprocess import call
import glob
dirnames = glob.glob("samples/*")
for d in dirnames:
images = glob.glob(d+"/*.png")
print("")
print("### "+d)
images.sort()
for image in images:
print("")
print("")
|
python
|
import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
# from kivy.garden
from kivy.uix.textinput import TextInput
import time
from Code.Scripts.predict import predict
from kivy.properties import StringProperty,ColorProperty,NumericProperty,BooleanProperty
from Code.Scripts.Analysis import count_tweets,count_unique_tweets
kivy.require("1.10.0")
loginids = {"yash":"yash","vivek":"vivek","animesh":"animesh"}
class HomeLogoImage(Image):
pass
class FeedbackScreen(Screen):
button_visible = NumericProperty()
label_visible = NumericProperty()
button_disabled = BooleanProperty()
entered_text_nav = StringProperty()
entered_text_cost = StringProperty()
entered_text_response = StringProperty()
entered_text_others = StringProperty()
def __init__(self, **kwargs):
super(FeedbackScreen, self).__init__(**kwargs)
self.entered_text_nav = "Kindly rate us and let us know your valuable feedback"
self.entered_text_cost = "Kindly rate us and let us know your valuable feedback"
self.entered_text_response = "Kindly rate us and let us know your valuable feedback"
self.entered_text_others = "Kindly rate us and let us know your valuable feedback"
self.label_visible = 0
self.button_visible = 1
self.button_disabled = False
def submit(self,nav,cost,response,others):
print("Pressed")
text = ""
text += "nav : "+nav
text += "\ncost : "+cost
text += "\nresponse : "+response
text += "\nothers : "+others
file_name = str(int(time.time()))+".txt"
with open("C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\Feedback\\"+file_name, 'w') as txt_file:
txt_file.writelines(text)
self.label_visible = 1
self.button_visible = 0
self.button_disabled = True
def clear_text_others(self):
self.entered_text_others = ""
def clear_text_response(self):
self.entered_text_response = ""
def clear_text_cost(self):
self.entered_text_cost = ""
def clear_text_nav(self):
self.entered_text_nav = ""
class TextInputFeedbackScreen(TextInput):
pass
class AnalysisScreen(Screen):
tweets_collected = StringProperty()
unique_tweets = StringProperty()
positives = StringProperty()
negatives = StringProperty()
def __init__(self, **kwargs):
super(AnalysisScreen,self).__init__(**kwargs)
self.unique_tweets = '0'
self.tweets_collected = '0'
self.positives = '0'
self.negatives = '0'
def refresh_values(self):
print("Refreshed")
self.tweets_collected = str(count_tweets())
self.unique_tweets = str(count_unique_tweets())
self.negatives = 'no data'
self.positives = 'no data'
print(self.tweets_collected)
print(self.unique_tweets)
class LoginScreen(Screen):
pass
class LoginScreenTextInput(TextInput):
pass
class LoginLogoImage(Image):
pass
class LoginScreenLabel(Label):
pass
class LoginScreenButton(Button):
def __init__(self,**kwargs):
super(LoginScreenButton,self).__init__(**kwargs)
def pressed(self,user_id,password):
print("pressed")
class AboutUsScreenDescriptionLabel(Label):
pass
class AboutUsScreenLabel(Label):
pass
class AboutUsScreen(Screen):
pass
class PopularityGraphScreen(Screen):
pass
class TopicLabel(Label):
pass
class WordCloudScreen(Screen):
path_to_cloud = StringProperty()
def __init__(self,**kwargs):
super(WordCloudScreen,self).__init__(**kwargs)
self.path_to_cloud = "C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\dummy_cloud.png"
def on_select(self,text):
print(text," selected")
if "rahul" in text.lower():
self.path_to_cloud = "C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\rahul_cloud.png"
elif "modi" in text.lower():
self.path_to_cloud = "C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\modi_cloud.png"
class LabelFeedbackScreen(Label):
pass
class HomeScreen(Screen):
pass
class LogoImage(Image):
pass
class SentimentTestScreen(Screen):
predicted_sentiment = StringProperty()
predicted_sentiment_color = ColorProperty()
def __init__(self, **kwargs):
super(SentimentTestScreen, self).__init__(**kwargs)
self.predicted_sentiment_color = [1,1,1,1]
def predict_sentiment(self,input_text):
predicted_value = predict(input_text)
print("For the text input ",input_text,"; result is ",predicted_value)
if predicted_value == -1:
self.predicted_sentiment = "negative"
self.predicted_sentiment_color = [1,0,0.4,1]
elif predicted_value == 1:
self.predicted_sentiment = "positive"
self.predicted_sentiment_color = [0,1,0.6,1]
else:
self.predicted_sentiment = "can not classify"
class AnalysisScreenLabel(Label):
pass
class ScreenManagement(ScreenManager):
pass
class HomeScreenButton(Button):
pass
class BackButton(Button):
pass
kivy_file = Builder.load_file('CampaignAssistantGui.kv')
class MyApp(App):
def build(self):
return kivy_file
if __name__ == "__main__":
MyApp().run()
|
python
|
from typing import Any, AsyncGenerator
from dependency_injector.wiring import Provide, inject
from graphql import GraphQLResolveInfo
from containers import SDContainer
from models import OnPathway
from .subscription_type import subscription
@subscription.source("onPathwayUpdated")
@inject
async def on_pathway_updated_generator(
_: Any = None,
info: GraphQLResolveInfo = None,
pathwayId: int = None,
includeDischarged: bool = False,
pub=Provide[SDContainer.pubsub_service]
) -> AsyncGenerator:
topic = pub.subscribe("on-pathway-updated")
async with topic as subscriber:
async for on_pathway in subscriber:
if int(on_pathway.pathway_id) == int(pathwayId):
if (
(not includeDischarged and
not on_pathway.is_discharged)
or includeDischarged
):
yield on_pathway
@subscription.field("onPathwayUpdated")
async def on_pathway_updated_field(
obj: OnPathway = None,
info: GraphQLResolveInfo = None,
pathwayId: int = None,
includeDischarged: bool = None,
):
return obj
|
python
|
import sys
import tableauserverclient as TSC
rob = TSC.RequestOptions.Builder()
ro = rob.file("==", "MyFile").project("==", "Default")._sort("foo", "desc")._pagesize(50)._build()
print ro.filters
|
python
|
# -*- coding: utf-8 -*-
# Detect tissue regions in a whole slide image.
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the MIT License. See LICENSE file in root folder.
#############################################################################
from datetime import datetime
import hashlib
_time = datetime.now()
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = "1.0"
__description__ = {
'name': 'detect_tissue',
'unique_id' : hashlib.md5(str.encode('detect_tissue' + __version__)).hexdigest(),
'version': __version__,
'timestamp': _time.isoformat(),
'input': [None],
'output': [None],
'params': dict()
}
from tinydb import TinyDB, Query
import simplejson as json
import geojson as gjson
import configargparse as opt
import numpy as np
from pathlib import Path
from shapely.affinity import translate
from qpath.base import WSIInfo, MRI
from qpath.annot import Annotation
from qpath.mask import mask_to_external_contours
from qpath.tissue import detect_foreground
from qpath.utils import NumpyJSONEncoder
# minimum object sizes (areas, in px^2) for different magnifications to be considered as "interesting"
min_obj_size = {'0.3125': 1500, '1.25': 50000, '2.5': 100000, '5.0': 500000}
WORK_MAG_1 = 0.3125
WORK_MAG_2 = 2.5
def main():
p = opt.ArgumentParser(description="Detect tissue regions in a whole slide image.")
p.add_argument("--mri_path", action="store", help="root folder for the multiresolution image (ZARR format)",
required=True)
p.add_argument("--out", action="store",
help="JSON file for storing the resulting annotation (will be saved to ../annot/ relative to ZARR path)",
required=True)
p.add_argument("--annotation_name", action="store", help="name of the resulting annotation",
default="tissue", required=False)
p.add_argument("--min_area", action="store", type=int, default=None,
help="minimum area of a tissue region", required=False)
p.add_argument("--he", action="store_true", help="use H&E-specific method for detecting the objects")
p.add_argument("--track_processing", action="store_true",
help="should this action be stored in the <-RUN-detect_tissue.json> file for the slide?")
args = p.parse_args()
if args.min_area is None:
args.min_area = min_obj_size[str(WORK_MAG_2)]
else:
min_obj_size[str(WORK_MAG_2)] = args.min_area
in_path = Path(args.mri_path).expanduser().absolute()
out_path = (in_path.parent.parent / 'annot').expanduser().absolute()
__description__['params'] = vars(args)
__description__['input'] = [str(in_path)]
__description__['output'] = [str(out_path / args.out)]
if args.track_processing:
(out_path.parent / '.run').mkdir(exist_ok=True)
with open(out_path.parent / '.run' / 'run-detect_tissue.json', 'w') as f:
json.dump(__description__, f, indent=2)
# print(__description__)
wsi = WSIInfo(in_path)
img_src = MRI(in_path)
# use a two pass strategy: first detect a bounding box, then zoom-in and
# detect the final mask
level = wsi.get_level_for_magnification(WORK_MAG_1)
img = img_src.get_plane(level=level)
mask, _ = detect_foreground(img, method='fesi', min_area=min_obj_size[str(WORK_MAG_1)])
contours = mask_to_external_contours(mask, approx_factor=0.0001)
# find the bounding box of the contours:
xmin, ymin = img.shape[:2]
xmax, ymax = 0, 0
for c in contours:
minx, miny, maxx, maxy = c.geom.bounds
xmin = min(xmin, minx)
ymin = min(ymin, miny)
xmax = max(xmax, maxx)
ymax = max(ymax, maxy)
# some free space around the ROI and rescale to new magnification level:
f = WORK_MAG_2 / WORK_MAG_1
xmin = int(f * max(0, xmin - 5))
ymin = int(f * max(0, ymin - 5))
xmax = int(f * min(img.shape[1] - 1, xmax + 5))
ymax = int(f * min(img.shape[0] - 1, ymax + 5))
# print("ROI @{}x: {},{} -> {},{}".format(WORK_MAG_2, xmin, ymin, xmax, ymax))
level = wsi.get_level_for_magnification(WORK_MAG_2)
img = img_src.get_region_px(xmin, ymin,
width=xmax - xmin, height=ymax - ymin,
level=level, as_type=np.uint8)
# print("Image size 2: {}x{}".format(img.shape[0], img.shape[1]))
if args.he:
mask, _ = detect_foreground(img, method='simple-he', min_area=min_obj_size[str(WORK_MAG_2)])
else:
mask, _ = detect_foreground(img, method='fesi',
laplace_ker=15, gauss_ker=17, gauss_sigma=25.0,
morph_open_ker=5, morph_open_iter=7, morph_blur=17,
min_area=min_obj_size[str(WORK_MAG_2)])
contours = mask_to_external_contours(mask,
approx_factor=0.00005,
min_area=min_obj_size[str(WORK_MAG_2)])
# don't forget to shift detections by (xmin, ymin) to obtain coords in original space for
# this magnification level...
for c in contours:
c.geom = translate(c.geom, xoff=xmin, yoff=ymin)
c._name = "tissue"
# ...and get image extent at working magnification
img_shape = img_src.extent(level)
annot = Annotation(name=args.annotation_name,
image_shape={'height': int(img_shape[1]), 'width': int(img_shape[0])},
magnification=WORK_MAG_2)
annot.add_annotations(contours)
# get back to native magnification...
annot.set_magnification(wsi.get_native_magnification())
# ...and correct the image extent (due to rounding it may be off by a few pixels), since
# we actually know it:
img_shape = img_src.extent(0)
annot._image_shape = dict(width=img_shape[0], height=img_shape[1])
with open(out_path / args.out , 'w') as f:
gjson.dump(annot.asGeoJSON(), f, cls=NumpyJSONEncoder)
annot_idx = out_path.parent / '.annot_idx.json'
with TinyDB(annot_idx) as db:
q = Query()
r = db.search(q.unique_id == __description__['unique_id'])
if len(r) == 0:
# empty DB or no such record
db.insert({'unique_id' : __description__['unique_id'],
'annotator': __description__['name'], 'parameters': __description__['params']})
else:
db.update({'annotator': __description__['name'], 'parameters': __description__['params']},
q.unique_id == __description__['unique_id'])
return
##
if __name__ == '__main__':
main()
|
python
|
numbers = []
posbetter = []
poslower = []
for c in range(0, 5):
numbers.append(float(input('Type a number: ')))
for pos, value in enumerate(numbers):
if value == max(numbers):
posbetter.append(pos)
if value == min(numbers):
poslower.append(pos)
print(f'The better number typed was {max(numbers)} in the position {posbetter}')
print(f'The lower number typed was {min(numbers)} in the position {poslower}')
|
python
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import statsmodels.formula.api as smf
import statsmodels.api as sm
import pickle
#df = pd.read_pickle('sav.txt')
X = pd.read_pickle('Xpart.csv')
Y = pd.read_pickle('Ypart.csv')
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
## Prediction
model = LinearRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
## RMSE
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print('RMSE: ', rmse)
accu = round((model.score(x_test, y_test)*100), 2)
print('Accuracy: ', accu, '%')# *100 = percenatge of model accurate
## Calculate the new OLS
x_train = sm.add_constant(x_train)
fit_new = sm.OLS(y_train, x_train).fit()
ols_print = fit_new.summary()
print('new OLS: ', ols_print)
## save
with open('summary', 'wb') as f:
pickle.dump([rmse, fit_new],f)
|
python
|
from numbers import Number
import numpy as np
import sympy
from collections.abc import Mapping
from sympy import Symbol
from sympy.core.relational import Relational
from toy.unit import DIMENSIONLESS, parse_unit_msg
from toy.utils import as_dict, is_numeric
from toy.core.value import Value
base_model = None
class ModelMeta(type):
"""
Metaclass for all Model subclasses.
"""
@classmethod
def __prepare__(mcs, name, bases):
if base_model is None:
return {}
return Environment()
def __new__(mcs, name, bases, ns):
global base_model
if base_model is None:
base_model = type.__new__(mcs, name, bases, ns)
return base_model
else:
return type.__new__(mcs, name, bases, ns.finalize())
class Environment(Mapping):
"""
A mapping that interprets a Model class creation.
"""
def __init__(self, ns=None, values=None, equations=None, invariants=None):
self.namespace = as_dict(ns)
self.values = as_dict(values)
self.equations = as_dict(equations)
self.invariants = as_dict(invariants)
self.symbols = {'t'}
self.namespace['t'] = Symbol('t')
self.namespace['values'] = self.values
self.namespace['equations'] = self.equations
self.namespace['invariants'] = self.invariants
self.lower = {}
self.upper = {}
def __iter__(self):
yield from self.namespace
def __getitem__(self, key):
return self.namespace[key]
def __len__(self):
return len(self.namespace)
def __setitem__(self, k, v):
if k.startswith('_'):
self.namespace[k] = v
elif k.startswith('D_'):
self.declare_derivative(k[2:], v)
elif k == 'bounds':
self.declare_bounds(v)
else:
self.declare_value(k, v)
def _add_symbol(self, name):
self.symbols.add(name)
self.namespace[name] = Symbol(name, real=True)
def declare_derivative(self, name, spec):
"""
Derivative declarations automatically promotes value to a dynamic
variable. Spec can be a constant, a function, or an expression.
"""
if name not in self.values:
raise TypeError(f'derivative of unknown variable: D_{name}')
self.equations[name] = spec
self._add_symbol('D_' + name)
def declare_value(self, name, value):
"""
Value declarations can have many different forms
``name = value``:
...
``name = value, '[unit] description``:
...
"""
unit = DIMENSIONLESS
msg = ''
if isinstance(value, tuple):
value, spec = value
unit, msg = parse_unit_msg(spec)
if not isinstance(value, Value):
value = Value(name, value, unit=unit, description=msg)
self.values[name] = value
self._add_symbol(name)
def declare_bounds(self, bounds):
"""
Declare variable bounds.
"""
# Lists are spliced
if isinstance(bounds, (list, tuple, set)):
for boundary in bounds:
self.declare_bounds(boundary)
return
# Bound is expressed as an inequality such as x > y
if isinstance(bounds, Relational):
self._declare_relational_bound(bounds)
else:
raise NotImplementedError(bounds)
def _declare_relational_bound(self, bound):
lhs, rhs = bound.lhs, bound.rhs
# Auxiliary functions
gt = lambda x: isinstance(x, (sympy.StrictGreaterThan, sympy.GreaterThan))
lt = lambda x: isinstance(x, (sympy.StrictLessThan, sympy.LessThan))
# Normalize "number {op} expr" or "other {op} symbol"
if is_numeric(lhs) or isinstance(rhs, Symbol):
rhs, lhs = lhs, rhs
# symbol {op} number
if isinstance(lhs, Symbol) and is_numeric(rhs):
if gt(bound):
self.lower[str(lhs)] = rhs
elif lt(bound):
self.upper[str(lhs)] = rhs
else:
raise TypeError(f'invalid bound expression: {bound}')
# Other bound
else:
raise NotImplementedError(bound)
def finalize(self):
"""
Return a namespace dictionary used to create the Model subclass.
"""
symbols = self.symbols
return {k: v for k, v in self.namespace.items() if k not in symbols}
|
python
|
import cv2
import time
import csv
import os
import picamera
import picamera.array
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(36, GPIO.OUT)
GPIO.setup(33, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
def gpio_fun():
val = ""
if GPIO.input(36) == 1:
## print("Left")
val+="1"
if GPIO.input(33) == 1:
## print("Right")
val += "2"
if GPIO.input(10) == 1:
## print("Back")
val += "3"
if GPIO.input(11) == 1:
## print("Forward")
val += "4"
if 1 != (GPIO.input(36) or GPIO.input(33) or GPIO.input(10) or GPIO.input(11)):
## print("Open")
val += "0"
print(val)
return val
img_dir = './ training_data'
filename = 'training_data.csv'
if not os.path.exists(img_dir):
os.makedirs(img_dir)
training_start_time = time.time()
start_time= int(time.time())
try:
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (320, 240)
while True:
camera.capture(stream, 'bgr', use_video_port=True)
cv2.imshow("video_frames", stream.array)
stop_time = int(time.time())
print(stop_time - start_time)
if int(stop_time - start_time) > 0:
start_time = stop_time
print("TIME PER SEC ",int(time.time()))
print("part2",stop_time - start_time)
# Writting the frames
cv2.imwrite(img_dir+'/ img-{}.jpg'.format(int(time.time())), stream.array)
print("image write done")
# Writting the csv
row = []
row.append('img-{}'.format(int(time.time())))
row.append(gpio_fun())
with open(filename, 'a', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
# reset the stream before the next capture
stream.seek(0)
stream.truncate()
# It Means Press ESC Key to Exit the Loop
k = cv2.waitKey(30) & 0xff
if k ==27:
break
except Exception as e:
print("type err ", e)
cv2.destroyAllWindows()
print("TOTAL TRAINING TIME CAPTURED ",time.time()-training_start_time)
GPIO.cleanup()
|
python
|
# -*- coding: utf-8 -*-
"""Module with utility functions to call the GPT translation API"""
import json
import requests
# ==============================================================================
# CONSTANT DEFINITION
# ==============================================================================
API_EXCEPTIONS = (requests.HTTPError,)
# ==============================================================================
# CLASS AND FUNCTION DEFINITION
# ==============================================================================
class GPTClient:
def __init__(self, api_key) -> None:
self.api_key = api_key
self.url = "https://gpt-text-generation.p.rapidapi.com/complete"
self.host = "gpt-text-generation.p.rapidapi.com"
def format_prompt(self, text, task, input_desc, output_desc, example_in=None, example_out=None):
"""
Returns prompt of form:
Correct grammar mistakes.
Original: Where do you went?
Standard American English: Where did you go?
Original: Where is you?
Standard American English:
Args:
task: The task for GPT, e.g. Correct grammar mistakes.
input_desc: Description of input column
output_desc: Description of output column
example_in: Example of an input text
example_out: Example of desired output text
Returns:
prompt: Formatted prompt
"""
### Preprocess
text = text.replace("\n", "")
### Put all together
task_prompt = f"{task}\n\n"
if example_in:
example_prompt = f"{input_desc}: {example_in}\n{output_desc}: {example_out}\n\n"
else:
example_prompt = ""
final_prompt = f"{input_desc}: {text}\n{output_desc}:"
full_prompt = task_prompt + example_prompt + final_prompt
return full_prompt
def generate(
self,
text,
task,
input_desc,
output_desc,
example_in=None,
example_out=None,
temperature=0.8,
):
"""
Generates Text.
"""
prompt = self.format_prompt(text, task, input_desc, output_desc, example_in, example_out)
print("Sending prompt:")
print(prompt)
response = requests.post(
url=self.url,
data=json.dumps({"prompt": prompt, "temperature": temperature}),
headers={
"content-type": "application/json",
"x-rapidapi-key": self.api_key,
"x-rapidapi-host": self.host,
},
)
if "generation" in response.text:
# Returns text from the response object which is a json string, so no need to dump it into json anymore
return response.text
else:
# Extract & send error related information
user_message = (
"Encountered the following error while sending an API request:"
+ f" Error Code: {response.status_code}"
+ f" Error message: {response.text}"
)
raise requests.HTTPError(user_message)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 14:33:34 2019
"""
import os
def walklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
# -*- coding: utf-8 -*-
from os import listdir
from numpy import sqrt
from numpy import log
from numpy import sum
from numpy import dot
from numpy import loadtxt
from numpy import zeros
from numpy import eye
from numpy import maximum
from pandas import DataFrame
from pandas import concat
MY_EPSILON = 1e-20
def buildLabeledDatabaseFromDirectory(directory_location):
"""
@brief : build a pandas database to perform learning on subtrees.
@param directory_location : path to folder containing all patients to load in the database
For each patient, load all descriptors with legends and all probabilistic labels with legend.
The directory_location folder is expected to contain one folder per patient.
Each patient folder is expected to contain one folder per acquisition (case).
Each case is expected to contain the following:
- subtree_descriptors.txt : containing descriptors for every subtree
- subtree_label_probabilities.txt.txt : containing all labels probability for every subtree
- computed_descriptors_meaning.txt : containing all descriptors meaning (legend)
- label_names.txt : containing all labels meaning (legend)
"""
differentPatients = listdir(directory_location)
PandasDB = DataFrame()
all_features = set({})
all_labels = set({})
for patDir in differentPatients:
subtreeDir = listdir(directory_location + "/" + patDir)
for caseDir in subtreeDir:
currLoc2read = directory_location + "/" + patDir + "/" + caseDir
curr_case_X = loadtxt(currLoc2read + "/subtree_descriptors.txt")
curr_case_Y = loadtxt(currLoc2read + "/subtree_label_probabilities.txt")
with open(currLoc2read + "/computed_descriptors_meaning.txt", "r") as descriptorsLegendFile:
currDescriptorsLegend = descriptorsLegendFile.read().split()
for leg in currDescriptorsLegend:
all_features.add(leg)
curr_dataframe_X = DataFrame(curr_case_X, columns=currDescriptorsLegend)
curr_dataframe_X["patient"] = patDir
curr_dataframe_X["case"] = caseDir
(curr_case_label_indexes, curr_case_label_names) = loadLabelLegend(currLoc2read + "/label_names.txt")
# add read labels to the set of all labels
for lab in curr_case_label_names:
all_labels.add(lab)
curr_dataframe_Y = DataFrame(curr_case_Y, columns=curr_case_label_names)
curr_dataframe = concat([curr_dataframe_X,curr_dataframe_Y], axis=1)
PandasDB = PandasDB.append(curr_dataframe)
all_features = list(all_features)
all_labels = list(all_labels)
return (PandasDB, all_features, all_labels)
def loadLabelLegend(label_legend_location):
res_label_legend = []
res_label_index = []
with open(label_legend_location, "r") as labelLegendFile:
for line in labelLegendFile.read().splitlines():
splitRes = line.split(" : ")
res_label_index.append(int(splitRes[0]))
res_label_legend.append(splitRes[1].replace(" ", "_"))
return (res_label_index, res_label_legend)
def loadSubtreeDescriptors(subtree_descriptors_location, subtree_descriptors_meaning_location):
descriptors = loadtxt(subtree_descriptors_location)
with open(subtree_descriptors_meaning_location, "r") as descriptorsLegendFile:
currDescriptorsLegend = descriptorsLegendFile.read().split()
descriptorsDataframe = DataFrame(descriptors, columns=currDescriptorsLegend)
return descriptorsDataframe
def computePatientLengthConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file):
"""
@brief: Compute the patient length confusion matrix given the location of its prediction and ground truth.
@param patient_prediction_location : folder containing the prediction data
@param patient_ground_truth_location : folder containing the ground truth data
@param labels_names_file : file containing the name of the labels (stored as integer)
We define the length confusion matrix as the confusion matrix were branches contribute with respect to their length.
Length is computed based on the branches stored in patient_ground_truth_location.
The matrix is defined with the following convention:
- each line correspond to a given prediction class
- each column correspond to a given ground truth class
Both folders are assumed to have a particular hierarchy:
- The folder patient_ground_truth_location:
* all branches named "branch????.txt"
* a "branch_labels.txt" file
-The folder patient_prediction_location:
* all branches named "branch????.txt"
* a file "recomputed_labels.txt"
N.B. It is assumed that the number of branches in both folder are identical and that the files storing labels have the same number lines.
"""
# Loading:
ground_truth_couple_branchID_labelNb = loadtxt(patient_ground_truth_location + "/branch_labels.txt")
prediction_couple_branchID_labelNb = loadtxt(patient_prediction_location + "/recomputed_labels.txt")
(label_index, label_legend) = loadLabelLegend(labels_names_file)
# Assert that all sizes are correct
assert (len(prediction_couple_branchID_labelNb) == len(ground_truth_couple_branchID_labelNb))
# Compute length for all branches
branch_length = []
for branch_index in prediction_couple_branchID_labelNb[:,0]:
curr_branch = loadtxt(patient_ground_truth_location + "/branch" + format(int(branch_index), '04d') + ".txt")
if len(curr_branch) == 0:
# print("Ignoring empty branch in computePatientLengthConfusionMatrix")
curr_length = 0.0
else:
curr_XYZ = curr_branch[:,0:-1] # Ignore the radius when computing the length
diff_XYZ = curr_XYZ[1:] - curr_XYZ[0:-1]
elementary_distances = sqrt(sum(diff_XYZ * diff_XYZ, axis=1))
curr_length = sum(elementary_distances)
branch_length.append(curr_length)
# Add the unknown label if not present:
if label_index.count(-1) == 0:
label_index.append(-1)
label_legend.append("Unknown")
# compute confusion matrix
nb_labels = len(label_legend)
nb_branches = ground_truth_couple_branchID_labelNb.shape[0]
resulting_confusion_matrix = zeros((nb_labels, nb_labels))
for branchID in range(nb_branches):
int_label_GT = int(ground_truth_couple_branchID_labelNb[branchID,1])
int_label_pred = int(prediction_couple_branchID_labelNb[branchID,1])
index_GT = label_index.index(int_label_GT)
index_pred = label_index.index(int_label_pred)
resulting_confusion_matrix[index_pred, index_GT] += branch_length[branchID]
# return the confusion matrix with legend
return (resulting_confusion_matrix, label_legend)
def computePatientConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file):
"""
@brief: Compute the patient confusion matrix given the location of its prediction and ground truth.
@param patient_prediction_location : folder containing the prediction data
@param patient_ground_truth_location : folder containing the ground truth data
@param labels_names_file : file containing the name of the labels (stored as integer)
We define the confusion matrix as the length confusion matrix with column normalization.
It represents the repartition (ratio) of predicted labels for a given GT label.
As for the length confusion matrix, it is defined with the following convention:
- each line correspond to a given prediction class
- each column correspond to a given ground truth class
Both folders are assumed to have a particular hierarchy:
- The folder patient_ground_truth_location:
* all branches named "branch????.txt"
* a "branch_labels.txt" file
-The folder patient_prediction_location:
* all branches named "branch????.txt"
* a file "recomputed_labels.txt"
N.B. It is assumed that the number of branches in both folder are identical and that the files storing labels have the same number lines.
"""
# compute the patient length confusion matrix:
(resulting_confusion_matrix, label_legend) = computePatientLengthConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file)
# normalize each column:
totalColumnLength = sum(resulting_confusion_matrix, axis=0)
totalColumnLength = maximum(totalColumnLength, MY_EPSILON) # prevent 0-division
resulting_confusion_matrix /= totalColumnLength
# return the confusion matrix with legend
return (resulting_confusion_matrix, label_legend)
def PatientClassificationMetric(confusion_matrix):
"""
@brief : compute the classification metric on a patient from its confusion matrix
The metric is defined as the ratio between the length of *badly* annotated branches over the total length of branches.
@param confusion_matrix : length confusion matrix (as computed in utils.computePatientLengthConfusionMatrix
"""
(nb_line, nb_col) = confusion_matrix.shape
assert nb_line == nb_col
total_length = sum(confusion_matrix)
confusion_without_diagonal = confusion_matrix * (1 - eye(nb_line))
bad_annotation_length = sum(confusion_without_diagonal)
return (bad_annotation_length / total_length)
def plotConfusionMatrix(confusion_matrix, label_legend):
""" Plot a length confusion matrix with legend """
from matplotlib.pyplot import imshow, xticks, yticks, show, figure, xlabel, ylabel, colorbar
figure()
imshow(confusion_matrix)
xticks(range(len(label_legend)), label_legend, rotation='vertical')
xlabel("GT label")
ylabel("Predicted label")
yticks(range(len(label_legend)), label_legend)
colorbar()
show()
def mergeLabelsInDatabase(PandasDB, first_label_to_merge, second_label_to_merge, merged_label_name):
assert isinstance(PandasDB, DataFrame)
merged_column = DataFrame(PandasDB[first_label_to_merge] + PandasDB[second_label_to_merge], columns=[merged_label_name])
PandasDB = PandasDB.drop(columns=[first_label_to_merge, second_label_to_merge])
PandasDB = concat([PandasDB, merged_column], axis=1)
return PandasDB
def manualFeatureGaussianization(PandasDB):
"""
Heuristically defined transformation of features used to "Gaussianize" their distributions.
In practice, sqrt, log and identity are used.
"""
assert isinstance(PandasDB, DataFrame)
identity = lambda x : x
logCompatibleWithZero = lambda x : log(1.0 + x)
transformation2apply = {}
transformation2apply["length"] = sqrt
transformation2apply["end_points_length"] = sqrt
transformation2apply["average_radius"] = identity
transformation2apply["stdev_radius"] = identity
transformation2apply["average_curvature"] = identity
transformation2apply["stdev_curvature"] = identity
transformation2apply["invariant_moment_1"] = logCompatibleWithZero
transformation2apply["invariant_moment_2"] = logCompatibleWithZero
transformation2apply["invariant_moment_3"] = logCompatibleWithZero
transformation2apply["lambda_1"] = identity
transformation2apply["lambda_2"] = identity
transformation2apply["lambda_3"] = identity
transformation2apply["principal_direction_x"] = identity
transformation2apply["principal_direction_y"] = identity
transformation2apply["principal_direction_z"] = identity
transformation2apply["average_endpoint_direction_x"] = identity
transformation2apply["average_endpoint_direction_y"] = identity
transformation2apply["average_endpoint_direction_z"] = identity
for feature in PandasDB.columns :
if feature in transformation2apply.keys() :
PandasDB[feature] = PandasDB[feature].apply(transformation2apply[feature])
def extractFeatures(PandasDB, invariant="rotation_translation"):
"""
Extract a set of features used to perform classification based on their invariance to some transformations.
Admissible invariants are:
- rotation_translation
- translation
- none
By default, rotation and translation invariant are used
"""
assert isinstance(PandasDB, DataFrame)
if invariant == "rotation_translation":
features = ["length" , "end_points_length" , "average_radius" , "stdev_radius" , "average_curvature" , "stdev_curvature" , "invariant_moment_1" , "invariant_moment_2" , "invariant_moment_3" , "lambda_1" , "lambda_2" , "lambda_3" ]
elif invariant == "translation":
features = ["length" , "end_points_length" , "average_radius" , "stdev_radius" , "average_curvature" , "stdev_curvature" , "invariant_moment_1" , "invariant_moment_2" , "invariant_moment_3" , "lambda_1" , "lambda_2" , "lambda_3" , "principal_direction_x" , "principal_direction_y" , "principal_direction_z" , "average_endpoint_direction_x" , "average_endpoint_direction_y" , "average_endpoint_direction_z"]
else :
features = ["length" , "end_points_length" , "average_radius" , "stdev_radius" , "average_curvature" , "stdev_curvature" , "invariant_moment_1" , "invariant_moment_2" , "invariant_moment_3" , "lambda_1" , "lambda_2" , "lambda_3" , "principal_direction_x" , "principal_direction_y" , "principal_direction_z" , "average_endpoint_direction_x" , "average_endpoint_direction_y" , "average_endpoint_direction_z", 'baricenter_x', 'baricenter_y', 'baricenter_z', 'weighted_baricenter_x', 'weighted_baricenter_y', 'weighted_baricenter_z']
return DataFrame(PandasDB, columns=features)
def compute_balanced_sample_weight(Y):
"""
Compute weights to apply to each example Y to have balanced training.
This function extends the function sklearn.utils.class_weight.compute_class_weight
to Y that are not 1-hot encoded but with probabilities.
"""
sum_of_proba = sum(Y, axis=0)
total = sum(sum_of_proba)
nb_classes = Y.shape[1]
class_weight = total / (float(nb_classes) * sum_of_proba + MY_EPSILON)
sample_weight = dot(Y,class_weight)
return(sample_weight)
def sample_weighted_mean_squared_error(Y_true, Y_pred):
weights = compute_balanced_sample_weight(Y_true)
sumSquared = ((Y_pred - Y_true)**2).mean(axis=1)
weightedSum = sum(sumSquared * weights, axis=0)
res = weightedSum / sum(weights)
return res
def keras_sample_weighted_mean_squared_error(Y_true, Y_pred):
import keras.backend as K
sum_of_proba = K.sum(Y_true, axis=0)
total = K.sum(sum_of_proba)
nb_cases = K.shape(Y_true)[0]
nb_classes = K.shape(Y_true)[1]
class_weight = total / ( K.cast(nb_classes, dtype='float32') * sum_of_proba + MY_EPSILON)
weights = K.dot(Y_true, K.reshape(class_weight, (nb_classes, -1)))
sumSquared = K.mean((Y_pred - Y_true)**2, axis=1)
weightedSum = K.sum(K.reshape(sumSquared, (nb_cases, -1)) * weights)
res = weightedSum / K.sum(weights)
return res
|
python
|
import urequests
class InfluxDBClient():
def __init__(self,url, token, org, bucket):
self.url=url
self.token=token
self.org=org
self.params={"bucket":bucket ,"org":org}
self.headers={"Authorization":"Token {}".format(token)}
self.conveq=lambda x: ["{}={}".format(k,v) for k,v in x.items()]
def write(self,point,measurement,tag=None):
if not tag: tag = {}
data=[str(point)]+self.conveq(tag)
data=",".join(data)
data=data+" "+",".join(self.conveq(measurement))
params="&".join(self.conveq(self.params))
response = urequests.post(
self.url+'/api/v2/write?'+params,data=data,
headers=self.headers)
return response
|
python
|
'''
Calculate astrometric motion of star over given time frame using JPL Horizons ephermerides
and SIMBAD proper motion and parallax
'''
from __future__ import print_function
import matplotlib
matplotlib.use('agg')
import csv
import jplephem
import de421
from jplephem.spk import SPK
kernel = SPK.open('de430.bsp')
import numpy as np
import matplotlib.pyplot as pl
import astropy.units as u
from astropy.constants import G
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
from datetime import datetime
import matplotlib.ticker as ticker
import warnings
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
warnings.filterwarnings('ignore', category=UserWarning)
startTime = datetime.now()
def convert_coords(array):
RADEC = SkyCoord(array[0],array[1],frame='icrs')
RA = RADEC.ra.rad
DEC = RADEC.dec.rad
return RA, DEC
def proj_RA_DEC(RA, DEC, pm_ra,pm_dec,prlx,epoch,JD):
"""
Takes the star's ra, dec, parallax, proper motion and a given epoch.
Returns the parallax and proper motion shift in star's position, in milliarcseconds (sexagesimal seconds for raoff; decimal seconds for decoff)
INPUTS:
RA -- RA position of star, in radians
DEC -- DEC position of star, in radians
parallax -- parallax of star, in MILLIarcseconds (1000.0*1/distance_pc)
epoch -- epoch (decimal years) to compute parallax offset (scalar or monotonically increasing vector)
OUTPUTS:
prop_DEC -- Declination of each point along a time baseline
prop_RA -- Right Ascension of each point along a time baseline
prop_par_DEC -- parallax shift in star's position, in milliarcseconds
prop_par_RA -- parallax shift in star's position, in milliarcseconds
"""
x,y,z = earthPos(JD) # get earth geocenter ephemerides using jplephem package
prop_DEC = DEC_J2000 + pm_dec * epoch # Calculate DEC first here
if np.size(prop_DEC)>1:
prop_RA = RA + pm_ra * epoch/np.cos(prop_DEC[0]) # rads
else:
prop_RA = RA + pm_ra * epoch/np.cos(prop_DEC) # rads
prop_par_RA = prop_RA + prlx/np.cos(prop_DEC)*(x*np.sin(prop_RA) - y*np.cos(prop_RA))
prop_par_DEC = prop_DEC + prlx*(x*np.cos(prop_RA)*np.sin(prop_DEC) + y*np.sin(prop_RA)*np.sin(prop_DEC) - z* np.cos(prop_RA))
return prop_DEC,prop_RA,prop_par_RA,prop_par_DEC
def earthPos(JD):
eph = jplephem.Ephemeris(de421)
barycenter = eph.position('earthmoon', JD)/ 1.49597870700e+8
moonvector = eph.position('moon', JD) / 1.49597870700e+8
earthPos = (barycenter - moonvector * eph.earth_share)
x = earthPos[0,:]
y = earthPos[1,:]
z = earthPos[2,:]
return x,y,z
def mas2rad(mas):
return mas*2*np.pi/1000/3600/360 # rads
def rad2mas(rad):
return rad*360/2/np.pi*3600*1000
def Sec(theta):
return 1/np.cos(theta)
# Set these values for each system
target = 'HAT-P-7'
params = 'TRENDS_ast_params.txt'
data_name = target+ '_ast.txt'
npoints =25 # number of points that makes up the "tornado path"
## Read in Data
labels = np.loadtxt(params, delimiter=',', dtype=np.str, usecols=[0])
ICRS = np.loadtxt(params, delimiter=',', dtype=np.str,usecols=[1,2])
values = np.loadtxt(params, delimiter=',', dtype=np.float,usecols=[3,4,5,6,7,8])
data = np.loadtxt(data_name, delimiter=',', dtype=np.float)
a = np.where(np.char.find(labels, target) > -1) # string comparison
ind = a[0][0] # save index and strip extra array things. not the cleanest code
# Convert input params into radians
RA_J2000, DEC_J2000 = convert_coords(ICRS[ind,:]) # convert coordinates to radians
values = mas2rad(values) # convert everything else to radians now
# Assign SIMBAD parameters individual names
prlx = values[0][0] # mas
dprlx = values[0][1] # mas
pm_ra = values[0][2] # mas/yr
dpm_ra = values[0][3] # mas/yr
pm_dec = values[0][4] # mas/yr
dpm_dec = values[0][5] # mas/yr
# Assign measured astrometry parameters individual names
D0_NS = data[0][3] # NS initial positions - mas
D0_EW = data[0][1] # EW initial positions - mas
JD = data[:,0] # full JD
NS = data[:,3] # NS data
EW = data[:,1] # EW data
dEW = data[:,2] # EW error
dNS = data[:,4] # NS error
# Convert JD into decimal year
t = Time(JD,format = 'jd').decimalyear
# create time baseline for tornado path
# dt = np.max(t)-np.min(t)
# tl = np.linspace(np.min(t),np.max(t),npoints)
tl = np.linspace(np.floor(np.min(t)), np.ceil(np.max(t)), npoints)
JDL = Time(tl,format = 'decimalyear').jd
# Print out system parameters... dont really need this anymore
print("""Astrometric parameters in radians:
Name : {0}
RA_J2000 = {1}
DEC_J2000 = {2}
Prlx = {3} +/- {4}
pm_ra = {5} +/- {6}
pm_dec = {7} +/- {8}
D_NS = {9}
D_EW = {10}
""".format(labels[ind],RA_J2000,DEC_J2000,prlx,dprlx,pm_ra,dpm_ra,pm_dec,dpm_dec,D0_NS,D0_EW))
# position = kernel[0,3].compute(JD)/ 1.49597870700e+8
# print(position)
# Use jplephem with de421 to determine position of Earth geocenter wrt SS Barycenter
# earthPos = np.loadtxt('HAT-P-7.txt', delimiter='\t', dtype=np.float)
# print(earthPos)
# print(earthPos)
# Combine proper motion and parallax with RA and DEC to project star path in time
# Calculate initial positions
x0,y0,z0=earthPos(JD[0])
_,_,RA_start,DEC_start = proj_RA_DEC(RA_J2000, DEC_J2000, pm_ra,pm_dec,prlx,t[0],JD[0])#,x0,y0,z0)
# Calculate full curves
x,y,z = earthPos(JDL)
prop_DEC,prop_RA,prop_par_RA,prop_par_DEC = proj_RA_DEC(RA_J2000, DEC_J2000, pm_ra,pm_dec,prlx,tl,JDL)#,x,y,z)
# pl.figure()
# pl.plot(prop_DEC,tl)
# pl.show()
# Error Propagation
dDEC = mas2rad(dNS[0])
dRA = mas2rad(dEW[0])
# RA Error
sigma_ra_2 = Sec(DEC_J2000 + pm_dec*tl)**2 *(dprlx**2 * (y*np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) - x*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2 +\
dpm_ra**2*prlx**2*tl**2*Sec(DEC_J2000 + pm_dec*tl)**2 * (x*np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) + y*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2 +\
dpm_dec**2*prlx**2*tl**2*(np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl))*(-y + pm_ra*tl*x*Sec(DEC_J2000 + pm_dec*tl)) + (x + pm_ra*tl*y*Sec(DEC_J2000 + pm_dec*tl))*\
np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2*np.tan(DEC_J2000 + pm_dec*tl)**2)+\
prlx**2*Sec(DEC_J2000 + pm_dec*tl)**2 *( dRA**2*(x*np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) + y*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2 + dDEC**2*\
(np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl))*(-y + pm_ra*tl*x*Sec(DEC_J2000 + pm_dec*tl)) + (x + pm_ra*tl*y*Sec(DEC_J2000 + pm_dec*tl))*\
np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2*np.tan(DEC_J2000 + pm_dec*tl)**2)
sigma_ra = np.sqrt(rad2mas(np.sqrt(sigma_ra_2))**2+np.min(dEW)**2)
# DEC Error
sigma_dec_2=dprlx**2 *(np.cos(RA_J2000 + pm_ra* tl* Sec(DEC_J2000 + pm_dec* tl))* (-z + x* np.sin(DEC_J2000 + pm_dec* tl)) + y *np.sin(DEC_J2000 + pm_dec* tl)\
*np.sin(RA_J2000 + pm_ra *tl *Sec(DEC_J2000 + pm_dec* tl)))**2 + dRA**2 *prlx**2 *(y *np.cos(RA_J2000 + pm_ra* tl *Sec(DEC_J2000 + pm_dec* tl))\
*np.sin(DEC_J2000 + pm_dec* tl) + (z* - x *np.sin(DEC_J2000 + pm_dec* tl))*np.sin(RA_J2000 + pm_ra *tl *Sec(DEC_J2000 + pm_dec* tl)))**2\
+dpm_ra**2 *prlx**2* tl**2* (z *Sec(DEC_J2000 + pm_dec* tl) *np.sin(RA_J2000 +pm_ra*tl *Sec(DEC_J2000 + pm_dec*tl)) + \
(y* np.cos(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) - x* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)))*np.tan(DEC_J2000 + pm_dec*tl))**2\
+dDEC**2*prlx**2*(np.cos(DEC_J2000 +pm_dec*tl)* (x *np.cos(RA_J2000 + pm_ra*tl *Sec(DEC_J2000 + pm_dec*tl)) +y* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)))\
+pm_ra*tl* np.tan(DEC_J2000 +pm_dec*tl)* (z* Sec(DEC_J2000 + pm_dec*tl)*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) + \
(y* np.cos(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) - x* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl))) *np.tan(DEC_J2000 + pm_dec*tl)))**2 +\
dpm_dec**2* prlx**2* tl**2* (np.cos(DEC_J2000 +pm_dec*tl)* (x* np.cos(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) +y *np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)))\
+pm_ra*tl *np.tan(DEC_J2000 + pm_dec*tl)* (z* Sec(DEC_J2000 + pm_dec*tl)* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) + (y* np.cos(RA_J2000 + pm_ra*tl\
*Sec(DEC_J2000 + pm_dec*tl)) - x* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl))) *np.tan(DEC_J2000 + pm_dec*tl)))**2
sigma_dec = np.sqrt(rad2mas(np.sqrt(sigma_dec_2))**2+np.min(dNS)**2)
# Total change in RA and DEC in mas
dprop_par_RA_mas = rad2mas(prop_par_RA - RA_start)
dprop_par_DEC_mas = rad2mas(prop_par_DEC - DEC_start)
# dprop_par_RA_mas = ((prop_par_RA-prop_par_RA[0])*u.rad).to(u.arcsec).value *1000
# dprop_par_DEC_mas = ((prop_par_DEC-prop_par_DEC[0])*u.rad).to(u.arcsec).value *1000
# Convert RA and DEC to NS,EW
prop_par_EW_mas=-dprop_par_RA_mas*np.cos(prop_DEC) # EW motion of a background object relative to star (mas)
prop_par_NS_mas=-dprop_par_DEC_mas # NS motion of a background object relative to star (mas)
# Start from initial NS,EW offset (start from the first data point)
NS_vector = D0_NS + prop_par_NS_mas
EW_vector = D0_EW + prop_par_EW_mas
hfont = {'fontname':'Helvetica'}
# plt.title('title',**csfont)
# plt.xlabel('xlabel', **hfont)
majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(0.2)
'''
trying to fix the fill between plots.
'''
fig, axarr=pl.subplots(2,sharex=True)
axarr[1].xaxis.set_major_locator(majorLocator)
axarr[1].xaxis.set_major_formatter(majorFormatter)
axarr[1].xaxis.set_minor_locator(minorLocator)
axarr[0].plot(tl, NS_vector, color="black", lw=2, alpha=1)
# Plot North Offset 1 and 2 sigma errors
axarr[0].fill_between(tl, NS_vector, NS_vector+sigma_dec,alpha=0.3,color='black',linewidth=0)
axarr[0].fill_between(tl, NS_vector, NS_vector-sigma_dec,alpha=0.3,color='black',linewidth=0)
axarr[0].fill_between(tl, NS_vector, NS_vector+2*sigma_dec,alpha=0.2,color='black',linewidth=0)
axarr[0].fill_between(tl, NS_vector, NS_vector-2*sigma_dec,alpha=0.2,color='black',linewidth=0)
axarr[0].errorbar(t,NS, yerr=dNS, fmt=".k")
axarr[0].text(0.8, 0.8,target , fontweight='bold', fontsize = 25, horizontalalignment='center', verticalalignment='center', transform=axarr[0].transAxes)
# pl.errorbar(t, data, yerr=rverr, fmt=".k")
axarr[0].set_ylabel("North offset (mas)",fontweight='bold',fontsize=16)
axarr[0].yaxis.set_major_locator(MaxNLocator(prune='both'))
axarr[0].locator_params(axis = 'y', nbins = 6)
axarr[1].plot(tl, EW_vector, color="black", lw=2, alpha=1)
# Plot East Offset 1 and 2 sigma errors
axarr[1].fill_between(tl, EW_vector, EW_vector+sigma_ra,alpha=0.3,color='black',linewidth=0)
axarr[1].fill_between(tl, EW_vector, EW_vector-sigma_ra,alpha=0.3,color='black',linewidth=0)
axarr[1].fill_between(tl, EW_vector, EW_vector+2*sigma_ra,alpha=0.2,color='black',linewidth=0)
axarr[1].fill_between(tl, EW_vector, EW_vector-2*sigma_ra,alpha=0.2,color='black',linewidth=0)
axarr[1].errorbar(t,EW, yerr=dEW, fmt=".k")
# axarr[1].plot(tl, EW_vector-sigma_ra, color="black", lw=1, alpha=0.9)
axarr[1].set_ylabel("East offset (mas)",fontweight='bold',fontsize=16)
axarr[1].set_xlabel("Epoch (yrs)",fontweight='bold',fontsize=16)
axarr[1].yaxis.set_major_locator(MaxNLocator(prune='both'))
# axarr[1].get_xaxis().get_major_formatter().set_useOffset(False)
axarr[1].locator_params(axis = 'y', nbins = 6) #(or axis = 'y')
pl.xlim([np.min(tl),np.max(tl)])
fig.tight_layout()
fig.subplots_adjust(hspace=0.001) # no horizontal space between figures
pl.savefig(target + '_plot_NSEW.png')
# print(NS_vector,EW_vector)
# Plot PA and SEP now
# print(NS_vector**2,EW_vector**2)
PA =np.abs((180./np.pi)*np.arctan2(NS_vector,EW_vector))+90
SEP = np.sqrt(NS_vector**2+EW_vector**2)
# print(PA,SEP)
fig, axarr=pl.subplots(2,sharex=True)
axarr[1].xaxis.set_major_locator(majorLocator)
axarr[1].xaxis.set_major_formatter(majorFormatter)
axarr[1].xaxis.set_minor_locator(minorLocator)
axarr[0].plot(tl, PA, color="black", lw=2, alpha=1)
# Plot North Offset 1 and 2 sigma errors
# axarr[0].fill_between(tl, NS_vector, NS_vector+sigma_dec,alpha=0.3,color='black',linewidth=0)
# axarr[0].fill_between(tl, NS_vector, NS_vector-sigma_dec,alpha=0.3,color='black',linewidth=0)
# axarr[0].fill_between(tl, NS_vector, NS_vector+2*sigma_dec,alpha=0.2,color='black',linewidth=0)
# axarr[0].fill_between(tl, NS_vector, NS_vector-2*sigma_dec,alpha=0.2,color='black',linewidth=0)
# axarr[0].errorbar(t,NS, yerr=dNS, fmt=".k")
axarr[0].text(0.8, 0.8,target , fontweight='bold', fontsize = 25, horizontalalignment='center', verticalalignment='center', transform=axarr[0].transAxes)
# pl.errorbar(t, data, yerr=rverr, fmt=".k")
axarr[0].set_ylabel("PA (rad)",fontweight='bold',fontsize=16)
axarr[0].yaxis.set_major_locator(MaxNLocator(prune='both'))
axarr[0].locator_params(axis = 'y', nbins = 6)
axarr[1].plot(tl, SEP, color="black", lw=2, alpha=1)
# Plot East Offset 1 and 2 sigma errors
# axarr[1].fill_between(tl, EW_vector, EW_vector+sigma_ra,alpha=0.3,color='black',linewidth=0)
# axarr[1].fill_between(tl, EW_vector, EW_vector-sigma_ra,alpha=0.3,color='black',linewidth=0)
# axarr[1].fill_between(tl, EW_vector, EW_vector+2*sigma_ra,alpha=0.2,color='black',linewidth=0)
# axarr[1].fill_between(tl, EW_vector, EW_vector-2*sigma_ra,alpha=0.2,color='black',linewidth=0)
# axarr[1].errorbar(t,EW, yerr=dEW, fmt=".k")
# axarr[1].plot(tl, EW_vector-sigma_ra, color="black", lw=1, alpha=0.9)
axarr[1].set_ylabel("Sep (mas)",fontweight='bold',fontsize=16)
axarr[1].set_xlabel("Epoch (yrs)",fontweight='bold',fontsize=16)
axarr[1].yaxis.set_major_locator(MaxNLocator(prune='both'))
# axarr[1].get_xaxis().get_major_formatter().set_useOffset(False)
axarr[1].locator_params(axis = 'y', nbins = 6) #(or axis = 'y')
pl.xlim([np.min(tl),np.max(tl)])
fig.tight_layout()
fig.subplots_adjust(hspace=0.001) # no horizontal space between figures
pl.savefig(target + '_plot_PASEP.png')
pl.figure()
pl.plot(EW_vector,NS_vector,color='black')
pl.errorbar(EW[0],NS[0],yerr=dEW[0],xerr=dNS[0],fmt='ok',markersize=12)
pl.errorbar(EW[1],NS[1],yerr=dEW[1],xerr=dNS[1],fmt='dk',markersize=12)
# pl.errorbar(EW[2],NS[2],yerr=dEW[2],xerr=dNS[2],fmt='sk',markersize=12)
[]
pl.xlabel('East Offset (mas)')
pl.ylabel('North Offset (mas)')
pl.tight_layout()
pl.savefig('test.png')
|
python
|
# -*- coding: utf-8 -*-
"""
@Project : activationFunction
@Author : Xu-Shan Zhao
@Filename: activationFunction202003121138.py
@IDE : PyCharm
@Time1 : 2020-03-12 11:38:55
@Time2 : 2020/3/12 11:38
@Month1 : 3ๆ
@Month2 : ไธๆ
"""
import torch
import matplotlib.pyplot as plt
x_data = torch.arange(-6, 6, 0.01)
y_tanh = torch.tanh(x_data)
y_sigmoid = torch.sigmoid(x_data)
y_relu = torch.relu(x_data)
y_leakyrelu = torch.nn.functional.leaky_relu(x_data, negative_slope=0.05)
y_prelu = torch.prelu(x_data, weight=torch.tensor(0.25))
y_rrelu = torch.rrelu(x_data, lower=0., upper=1)
plt.ion()
plt.cla()
plt.plot(x_data.data.numpy(), y_tanh.data.numpy(), c='red', label='tanh')
plt.legend()
plt.xlabel('x')
plt.ylabel('tanh(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_sigmoid.data.numpy(), c='red', label='sigmoid')
plt.legend()
plt.xlabel('x')
plt.ylabel('Sigmoid(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_relu.data.numpy(), c='red', label='ReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('ReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_leakyrelu.data.numpy(), c='red', label='Leaky ReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('Leaky ReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_prelu.data.numpy(), c='red', label='PReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('PReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_rrelu.data.numpy(), c='red', label='RReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('RReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_tanh.data.numpy(), c='red', linestyle='-', label='tanh')
plt.plot(x_data.data.numpy(), y_sigmoid.data.numpy(), c='blue', linestyle='-.', label='sigmoid')
plt.plot(x_data.data.numpy(), y_relu.data.numpy(), c='green', linestyle='--', label='ReLU')
plt.plot(x_data.data.numpy(), y_leakyrelu.data.numpy(), c='black', linestyle=':', label='Leaky ReLU')
plt.plot(x_data.data.numpy(), y_prelu.data.numpy(), c='orange', label='PReLU')
plt.plot(x_data.data.numpy(), y_rrelu.data.numpy(), c='gold', label='RReLU')
plt.legend()
plt.ylim(-1.05, 1.5)
plt.xlabel('x')
plt.pause(1)
plt.ioff()
plt.show()
|
python
|
with open('day1_input.txt') as file:
input = file.read()
# Part 1
sum = 0
for i in range(len(input) - 1):
if input[i] == input[i + 1]:
sum += int(input[i])
if input[len(input) - 1] == input[0]:
sum += int(input[0])
print(sum)
# Part 2
sum = 0
forward = len(input) / 2
for i in range(forward):
if input[i] == input[i + forward]:
sum += int(input[i])
for i in range(forward, len(input)):
if input[i] == input[(i + forward) % len(input)]:
sum += int(input[i])
print(sum)
|
python
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.testing import requires_py3
from ..utils import get_check
pytestmark = [
requires_py3,
pytest.mark.openmetrics,
pytest.mark.openmetrics_transformers,
pytest.mark.openmetrics_transformers_metadata,
]
def test_basic(aggregator, datadog_agent, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.
# TYPE kubernetes_build_info gauge
kubernetes_build_info{buildDate="2016-11-18T23:57:26Z",compiler="gc",gitCommit="3872cb93abf9482d770e651b5fe14667a6fca7e0",gitTreeState="dirty",gitVersion="v1.6.0-alpha.0.680+3872cb93abf948-dirty",goVersion="go1.7.3",major="1",minor="6+",platform="linux/amd64"} 1
""" # noqa: E501
)
check = get_check(
{'metrics': [{'kubernetes_build_info': {'name': 'version', 'type': 'metadata', 'label': 'gitVersion'}}]}
)
check.check_id = 'test:instance'
dd_run_check(check)
version_metadata = {
'version.major': '1',
'version.minor': '6',
'version.patch': '0',
'version.release': 'alpha.0.680',
'version.build': '3872cb93abf948-dirty',
'version.raw': 'v1.6.0-alpha.0.680+3872cb93abf948-dirty',
'version.scheme': 'semver',
}
datadog_agent.assert_metadata('test:instance', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
aggregator.assert_all_metrics_covered()
def test_options(aggregator, datadog_agent, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.
# TYPE kubernetes_build_info gauge
kubernetes_build_info{buildDate="2016-11-18T23:57:26Z",compiler="gc",gitCommit="3872cb93abf9482d770e651b5fe14667a6fca7e0",gitTreeState="dirty",gitVersion="v1.6.0-alpha.0.680+3872cb93abf948-dirty",goVersion="go1.7.3",major="1",minor="6+",platform="linux/amd64"} 1
""" # noqa: E501
)
check = get_check(
{
'metrics': [
{
'kubernetes_build_info': {
'name': 'version',
'type': 'metadata',
'label': 'gitVersion',
'scheme': 'regex',
'final_scheme': 'semver',
'pattern': 'v(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<fix>\\d+)',
}
}
],
}
)
check.check_id = 'test:instance'
dd_run_check(check)
version_metadata = {
'version.major': '1',
'version.minor': '6',
'version.fix': '0',
'version.raw': 'v1.6.0-alpha.0.680+3872cb93abf948-dirty',
'version.scheme': 'semver',
}
datadog_agent.assert_metadata('test:instance', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
aggregator.assert_all_metrics_covered()
|
python
|
# CSV to JSON
# 1. run the following from the root of project directory: `cd data/achievements; python3 achievements-gen.py`
# 2. commit and push changes
# 3. website will automatically update
import csv, json
with open("./achievements.csv", encoding='utf-8', mode='r') as fin:
with open("./achievements.json", mode="w") as fout:
csv_data = list(csv.reader(fin))
json_data = []
for (_i, row) in enumerate(csv_data[1:]):
i = _i + 2
try:
# Handle Year columns
if row[0].strip() != "":
try:
int_year = int(row[0])
except:
raise TypeError("Invalid year on Row {}.".format(i))
json_data.append({
"year": int_year,
"competitions": []
})
# Handle Competition columns
if row[1].strip() != "":
if row[2].strip() == "": raise ValueError("Missing Region on Row {}.".format(i))
json_data[-1]["competitions"].append({
"code": row[1],
"region": row[2],
"desc": row[3],
"awards": []
})
# Handle Award columns
json_data[-1]["competitions"][-1]["awards"].append({
"category": row[4],
"title": row[5],
"team": row[6],
"recipients": row[7].split(", ")
})
except:
raise RuntimeError("Error occurred on Row {}. This error may or may not have been due to input data.".format(i))
json_data.reverse()
for year in json_data:
year["competitions"].reverse()
print(json.dumps(json_data, indent=2, sort_keys=True))
fout.write(json.dumps(json_data, indent=2, sort_keys=True))
|
python
|
"""
CODE ADAPTED FROM: https://github.com/sjblim/rmsn_nips_2018
Treatment Effects with RNNs:
Common routines to use across all training scripts
"""
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
import treatments.RMSN.configs
import treatments.RMSN.libs.net_helpers as helpers
from treatments.RMSN.libs.model_rnn import RnnModel
ROOT_FOLDER = treatments.RMSN.configs.ROOT_FOLDER
MODEL_ROOT = treatments.RMSN.configs.MODEL_ROOT
logger = logging.getLogger()
logger.disabled = True
# --------------------------------------------------------------------------
# Training routine
# --------------------------------------------------------------------------
def train(
net_name,
expt_name,
training_dataset,
validation_dataset,
dropout_rate,
memory_multiplier,
num_epochs,
minibatch_size,
learning_rate,
max_norm,
use_truncated_bptt,
num_features,
num_outputs,
model_folder,
hidden_activation,
output_activation,
tf_config,
additonal_info="",
b_use_state_initialisation=False,
b_use_seq2seq_feedback=False,
b_use_seq2seq_training_mode=False,
adapter_multiplier=0,
b_use_memory_adapter=False,
):
"""
Common training routine to all RNN models - seq2seq + standard
"""
min_epochs = 1
tf.reset_default_graph()
with tf.Graph().as_default(), tf.Session(config=tf_config) as sess:
tf_data_train = convert_to_tf_dataset(training_dataset)
tf_data_valid = convert_to_tf_dataset(validation_dataset)
# Setup default hidden layer size
hidden_layer_size = int(memory_multiplier * num_features)
if b_use_state_initialisation:
full_state_size = int(training_dataset["initial_states"].shape[-1])
adapter_size = adapter_multiplier * full_state_size
else:
adapter_size = 0
# Training simulation
model_parameters = {
"net_name": net_name,
"experiment_name": expt_name,
"training_dataset": tf_data_train,
"validation_dataset": tf_data_valid,
"test_dataset": None,
"dropout_rate": dropout_rate,
"input_size": num_features,
"output_size": num_outputs,
"hidden_layer_size": hidden_layer_size,
"num_epochs": num_epochs,
"minibatch_size": minibatch_size,
"learning_rate": learning_rate,
"max_norm": max_norm,
"model_folder": model_folder,
"hidden_activation": hidden_activation,
"output_activation": output_activation,
"backprop_length": 60, # backprop over 60 timesteps for truncated backpropagation through time
"softmax_size": 0, # not used in this paper, but allows for categorical actions
"performance_metric": "xentropy" if output_activation == "sigmoid" else "mse",
"use_seq2seq_feedback": b_use_seq2seq_feedback,
"use_seq2seq_training_mode": b_use_seq2seq_training_mode,
"use_memory_adapter": b_use_memory_adapter,
"memory_adapter_size": adapter_size,
}
# Get the right model
model = RnnModel(model_parameters)
serialisation_name = model.serialisation_name
if helpers.hyperparameter_result_exists(model_folder, net_name, serialisation_name):
logging.warning("Combination found: skipping {}".format(serialisation_name))
return helpers.load_hyperparameter_results(model_folder, net_name)
training_handles = model.get_training_graph(
use_truncated_bptt=use_truncated_bptt, b_use_state_initialisation=b_use_state_initialisation
)
validation_handles = model.get_prediction_graph(
use_validation_set=True, with_dropout=False, b_use_state_initialisation=b_use_state_initialisation
)
# Start optimising
num_minibatches = int(np.ceil(training_dataset["scaled_inputs"].shape[0] / model_parameters["minibatch_size"]))
i = 1
epoch_count = 1
step_count = 1
min_loss = np.inf
with sess.as_default():
sess.run(tf.global_variables_initializer())
optimisation_summary = pd.Series([])
while True:
try:
loss, _ = sess.run([training_handles["loss"], training_handles["optimiser"]])
# Flog output
logging.info(
"Epoch {} | iteration = {} of {}, loss = {} | net = {} | info = {}".format(
epoch_count, step_count, num_minibatches, loss, model.net_name, additonal_info
)
)
if step_count == num_minibatches:
# Reinit dataset
sess.run(validation_handles["initializer"])
means = []
UBs = []
LBs = []
while True:
try:
mean, upper_bound, lower_bound = sess.run(
[
validation_handles["mean"],
validation_handles["upper_bound"],
validation_handles["lower_bound"],
]
)
means.append(mean)
UBs.append(upper_bound)
LBs.append(lower_bound)
except tf.errors.OutOfRangeError:
break
means = np.concatenate(means, axis=0)
active_entries = validation_dataset["active_entries"]
output = validation_dataset["scaled_outputs"]
if model_parameters["performance_metric"] == "mse":
validation_loss = np.sum((means - output) ** 2 * active_entries) / np.sum(active_entries)
elif model_parameters["performance_metric"] == "xentropy":
_, _, features_size = output.shape
partition_idx = features_size
# Do binary first
validation_loss = np.sum(
(
output[:, :, :partition_idx] * -np.log(means[:, :, :partition_idx] + 1e-5)
+ (1 - output[:, :, :partition_idx])
* -np.log(1 - means[:, :, :partition_idx] + 1e-5)
)
* active_entries[:, :, :partition_idx]
) / (np.sum(active_entries[:, :, :partition_idx]))
optimisation_summary[epoch_count] = validation_loss
# Compute validation loss
logging.info(
"Epoch {} Summary| Validation loss = {} | net = {} | info = {}".format(
epoch_count, validation_loss, model.net_name, additonal_info
)
)
if np.isnan(validation_loss):
logging.warning("NAN Loss found, terminating routine")
break
# Save model and loss trajectories
if validation_loss < min_loss and epoch_count > min_epochs:
cp_name = serialisation_name + "_optimal"
helpers.save_network(sess, model_folder, cp_name, optimisation_summary)
min_loss = validation_loss
# Update
epoch_count += 1
step_count = 0
step_count += 1
i += 1
except tf.errors.OutOfRangeError:
break
# Save final
cp_name = serialisation_name + "_final"
helpers.save_network(sess, model_folder, cp_name, optimisation_summary)
helpers.add_hyperparameter_results(optimisation_summary, model_folder, net_name, serialisation_name)
hyperparam_df = helpers.load_hyperparameter_results(model_folder, net_name)
logging.info("Terminated at iteration {}".format(i))
sess.close()
return hyperparam_df
# --------------------------------------------------------------------------
# Test routine
# --------------------------------------------------------------------------
def test(
test_dataset,
tf_config,
net_name,
expt_name,
dropout_rate,
num_features,
num_outputs,
memory_multiplier,
num_epochs,
minibatch_size,
learning_rate,
max_norm,
hidden_activation,
output_activation,
model_folder,
b_use_state_initialisation=False,
b_dump_all_states=False,
b_mse_by_time=False,
b_use_seq2seq_feedback=False,
b_use_seq2seq_training_mode=False,
adapter_multiplier=0,
b_use_memory_adapter=False,
):
"""
Common test routine to all RNN models - seq2seq + standard
"""
# Start with graph
tf.reset_default_graph()
with tf.Session(config=tf_config) as sess:
tf_data_test = convert_to_tf_dataset(test_dataset)
# For decoder training with external state inputs
if b_use_state_initialisation:
full_state_size = int(test_dataset["initial_states"].shape[-1])
adapter_size = adapter_multiplier * full_state_size
else:
adapter_size = 0
# Training simulation
model_parameters = {
"net_name": net_name,
"experiment_name": expt_name,
"training_dataset": tf_data_test,
"validation_dataset": tf_data_test,
"test_dataset": tf_data_test,
"dropout_rate": dropout_rate,
"input_size": num_features,
"output_size": num_outputs,
"hidden_layer_size": int(memory_multiplier * num_features),
"num_epochs": num_epochs,
"minibatch_size": minibatch_size,
"learning_rate": learning_rate,
"max_norm": max_norm,
"model_folder": model_folder,
"hidden_activation": hidden_activation,
"output_activation": output_activation,
"backprop_length": 60,
# Length for truncated backpropagation over time, matches max time steps here.
"softmax_size": 0, # not used in this paper, but allows for categorical actions
"performance_metric": "xentropy" if output_activation == "sigmoid" else "mse",
"use_seq2seq_feedback": b_use_seq2seq_feedback,
"use_seq2seq_training_mode": b_use_seq2seq_training_mode,
"use_memory_adapter": b_use_memory_adapter,
"memory_adapter_size": adapter_size,
}
# Start optimising
with sess.as_default():
sess.run(tf.global_variables_initializer())
# Get the right model
model = RnnModel(model_parameters)
handles = model.get_prediction_graph(
use_validation_set=False if "treatment_rnn" not in net_name else None,
with_dropout=False,
b_use_state_initialisation=b_use_state_initialisation,
b_dump_all_states=b_dump_all_states,
)
# Load checkpoint
serialisation_name = model.serialisation_name
cp_name = serialisation_name + "_optimal"
_ = helpers.load_network(sess, model_folder, cp_name)
# Init
sess.run(handles["initializer"])
# Get all the data out in chunks
means = []
UBs = []
LBs = []
states = []
while True:
try:
mean, upper_bound, lower_bound, ave_states = sess.run(
[handles["mean"], handles["upper_bound"], handles["lower_bound"], handles["ave_states"]]
)
means.append(mean)
UBs.append(upper_bound)
LBs.append(lower_bound)
states.append(ave_states)
except tf.errors.OutOfRangeError:
break
means = np.concatenate(means, axis=0)
states = np.concatenate(states, axis=0)
active_entries = (
test_dataset["active_entries"] if net_name != "treatment_rnn" else test_dataset["active_entries"]
)
output = test_dataset["scaled_outputs"] if net_name != "treatment_rnn" else test_dataset["scaled_outputs"]
# prediction_map[net_name] = means
# output_map[net_name] = output
if b_mse_by_time:
mse = np.sum((means - output) ** 2 * active_entries, axis=0) / np.sum(active_entries, axis=0)
else:
mse = np.sum((means - output) ** 2 * active_entries) / np.sum(active_entries)
# results[net_name] = mse
# print(net_name, mse)
sess.close()
return means, output, mse, states
# --------------------------------------------------------------------------
# Data processing functions
# --------------------------------------------------------------------------
def convert_to_tf_dataset(dataset_map):
key_map = {
"inputs": dataset_map["scaled_inputs"],
"outputs": dataset_map["scaled_outputs"],
"active_entries": dataset_map["active_entries"],
"sequence_lengths": dataset_map["sequence_lengths"],
}
if "propensity_weights" in dataset_map:
key_map["propensity_weights"] = dataset_map["propensity_weights"]
if "initial_states" in dataset_map:
key_map["initial_states"] = dataset_map["initial_states"]
tf_dataset = tf.data.Dataset.from_tensor_slices(key_map)
return tf_dataset
def get_processed_data(dataset, b_predict_actions, b_use_actions_only):
previous_treatments = dataset["previous_treatments"]
current_treatments = dataset["current_treatments"]
covariates = dataset["current_covariates"]
dataset_outputs = dataset["outputs"]
sequence_lengths = dataset["sequence_lengths"]
active_entries = dataset["active_entries"]
# Parcelling INPUTS
if b_predict_actions:
if b_use_actions_only:
inputs = previous_treatments
actions = previous_treatments
else:
# Uses current covariate, to remove confounding effects between action and current value
inputs = np.concatenate([covariates, previous_treatments], axis=2)
actions = previous_treatments
else:
inputs = np.concatenate([covariates, current_treatments], axis=2)
actions = current_treatments
# Parcelling OUTPUTS
if b_predict_actions:
outputs = current_treatments
else:
outputs = dataset_outputs
return {
"scaled_inputs": inputs,
"scaled_outputs": outputs,
"actions": actions,
"sequence_lengths": sequence_lengths,
"active_entries": active_entries,
}
|
python
|
#####################################
# ColumnRelationships.py
#####################################
# Description:
# * Map all relationships between columns.
from abc import abstractmethod, ABC
from enum import Enum
from itertools import combinations, product
import numpy as np
from pandas import DataFrame
from sortedcontainers import SortedDict
class ColumnRelationships(object):
"""
* Immutable object that store relationships between columns.
"""
def __init__(self, data):
"""
Inputs:
* data: Expecting dataframe of columns.
"""
self.__relationships = ColumnRelationships.MapRelationships(data)
###############
# Properties:
###############
@property
def Relationships(self):
return self.__relationships
###############
# Interface Methods:
###############
def ToDataFrame(self, countinfo = True):
"""
* Return full symmetric dataframe matrix as representation of object.
Inputs:
* countinfo: If True, fills cell with "leftcount_rightcount", else fills with
full relationship name, ex "one_one".
"""
return ColumnRelationships.__AsDataFrame(self.__relationships, countinfo)
@classmethod
def MapRelationships(cls, data):
"""
* Map all relationships between columns in passed data.
Inputs:
* data: Expecting dataframe of columns.
Output:
* Returns lower triangular Dataframe of relationships with columns as dimensions.
"""
results = SortedDict({col : {} for col in data.columns})
combs = combinations(data.columns, 2)
for comb in combs:
if comb[0] != comb[1]:
results[comb[0]][comb[1]] = cls.__MapRelationships(data, comb[0], comb[1])
return results
###############
# Private Helpers:
###############
@classmethod
def __MapRelationships(cls, data, col1, col2):
"""
* Determine if one-to-one/one-to-many/many-to-one/many-to-many relationship exists
between columns.
"""
colset = data[[col1, col2]].drop_duplicates([col1, col2])
left_max = colset.groupby(col1).count().max()[0]
right_max = colset.groupby(col2).count().max()[0]
if left_max==1:
if right_max==1:
enum = RelationshipEnum.ONE_TO_ONE
else:
enum = RelationshipEnum.ONE_TO_MANY
else:
if right_max==1:
enum = RelationshipEnum.MANY_TO_ONE
else:
enum = RelationshipEnum.MANY_TO_MANY
return ColumnRelationship(enum, left_max, right_max)
@staticmethod
def __AsDataFrame(data, countinfo):
"""
* Convert stored dictionary into symmetric DataFrame.
"""
sorted_keys = sorted(list(data.keys()))
newdata = SortedDict()
for key in sorted_keys:
newdata[key] = SortedDict()
for subkey in sorted_keys:
if subkey == key:
newdata[key][subkey] = '='
elif subkey not in data[key]:
newdata[key][subkey] = data[subkey][key].Reverse().CountStr if countinfo == True else data[subkey][key].Reverse().TypeStr
else:
newdata[key][subkey] = data[key][subkey].CountStr if countinfo == True else data[key][subkey].TypeStr
return DataFrame(newdata)
class RelationshipEnum(Enum):
ONE_TO_ONE = 0
ONE_TO_MANY = 1
MANY_TO_ONE = 2
MANY_TO_MANY = 3
class ColumnRelationship:
"""
* Immutable class representing relationship
between two columns.
"""
__countstr = '%d_%d'
__typestrs = { RelationshipEnum.ONE_TO_ONE: "one_one",
RelationshipEnum.ONE_TO_MANY : "one_many",
RelationshipEnum.MANY_TO_MANY : "many_many",
RelationshipEnum.MANY_TO_ONE : "many_one" }
def __init__(self, enum, leftcount, rightcount):
self.__type = enum
self.__leftcount = leftcount
self.__rightcount = rightcount
def __eq__(self, val):
return self.__type == val.Type
def __str__(self):
"""
* Return CountStr by default.
"""
return self.CountStr
#############
# Properties:
#############
@property
def CountStr(self):
"""
* Return string of form "<KeyGroupByCount>_<ValueGroupByCount>".
"""
return self.__countstr % (self.__leftcount, self.__rightcount)
@property
def Type(self):
"""
* Return type enumeration.
"""
return self.__type
@property
def TypeStr(self):
"""
* Type enumeration in string form (ex: "one_one").
"""
return ColumnRelationship.__typestrs[self.__type]
##############
# Interface Methods:
##############
def Reverse(self):
"""
* Return new relationship with reversed characteristics.
"""
enum = self.__type
if self.__type == RelationshipEnum.MANY_TO_ONE:
enum = RelationshipEnum.ONE_TO_MANY
elif self.__type == RelationshipEnum.ONE_TO_MANY:
enum = RelationshipEnum.MANY_TO_ONE
return ColumnRelationship(enum, self.__rightcount, self.__leftcount)
|
python
|
from collections import OrderedDict
from elasticsearch.exceptions import RequestError
from elasticsearch_dsl import Search
import settings
from core.cursor import decode_cursor, get_next_cursor
from core.exceptions import (APIPaginationError, APIQueryParamsError,
APISearchError)
from core.filter import filter_records
from core.group_by import (get_group_by_results,
get_group_by_results_external_ids,
get_group_by_results_transform, group_by_records,
group_by_records_transform, is_transform)
from core.paginate import Paginate
from core.search import check_is_search_query, full_search
from core.sort import sort_records
from core.utils import (get_field, map_filter_params, map_sort_params,
set_number_param)
from core.validate import validate_params
def shared_view(request, fields_dict, index_name, default_sort):
"""Primary function used to search, filter, and aggregate across all five entities."""
# params
validate_params(request)
cursor = request.args.get("cursor")
filter_params = map_filter_params(request.args.get("filter"))
group_by = request.args.get("group_by") or request.args.get("group-by")
page = set_number_param(request, "page", 1)
per_page = (
set_number_param(request, "per-page", 25)
if not group_by
else set_number_param(request, "per-page", 200)
)
search = request.args.get("search")
sort_params = map_sort_params(request.args.get("sort"))
s = Search(index=index_name)
# pagination
paginate = Paginate(group_by, page, per_page)
paginate.validate()
if group_by:
s = s.extra(size=0)
else:
s = s.extra(size=per_page)
if cursor and page != 1:
raise APIPaginationError("Cannot use page parameter with cursor.")
if cursor and cursor != "*":
decoded_cursor = decode_cursor(cursor)
s = s.extra(search_after=decoded_cursor)
# search
if search and search != '""':
s = full_search(index_name, s, search)
# filter
if filter_params:
s = filter_records(fields_dict, filter_params, s)
# sort
is_search_query = check_is_search_query(filter_params, search)
# do not allow sorting by relevance score without search query
if not is_search_query and sort_params and "relevance_score" in sort_params:
raise APIQueryParamsError(
"Must include a search query (such as ?search=example or /filter=display_name.search:example) in order to sort by relevance_score."
)
if sort_params:
s = sort_records(fields_dict, group_by, sort_params, s)
elif is_search_query and not sort_params and index_name.startswith("works"):
s = s.sort("_score", "publication_date", "id")
elif is_search_query and not sort_params:
s = s.sort("_score", "-works_count", "id")
elif not group_by:
s = s.sort(*default_sort)
# group by
transform = False
if group_by:
field = get_field(fields_dict, group_by)
transform = is_transform(field, index_name, filter_params)
if (
type(field).__name__ == "DateField"
or type(field).__name__ == "RangeField"
and field.param != "publication_year"
and field.param != "level"
):
raise APIQueryParamsError("Cannot group by date or number fields.")
elif field.param == "referenced_works":
raise APIQueryParamsError(
"Group by referenced_works is not supported at this time."
)
elif field.param == "cited_by" or field.param == "related_to":
raise APIQueryParamsError("Cannot group cited_by or related_to filters.")
if transform:
s = group_by_records_transform(field, index_name, sort_params)
else:
s = group_by_records(field, s, sort_params)
if not group_by:
try:
response = s[paginate.start : paginate.end].execute()
except RequestError as e:
if "search_after has" in str(e) and "but sort has" in str(e):
raise APIPaginationError("Cursor value is invalid.")
else:
raise APISearchError("Something went wrong.")
count = s.count()
else:
response = s.execute()
if group_by in settings.EXTERNAL_ID_FIELDS:
count = 2
elif transform:
count = len(response)
else:
count = len(response.aggregations.groupby.buckets)
result = OrderedDict()
result["meta"] = {
"count": count,
"db_response_time_ms": response.took,
"page": page if not cursor else None,
"per_page": 200 if group_by else per_page,
}
result["results"] = []
if cursor:
result["meta"]["next_cursor"] = get_next_cursor(response)
if group_by:
if group_by in settings.EXTERNAL_ID_FIELDS:
result["group_by"] = get_group_by_results_external_ids(response)
elif transform:
result["group_by"] = get_group_by_results_transform(group_by, response)
else:
result["group_by"] = get_group_by_results(group_by, response)
else:
result["group_by"] = []
result["results"] = response
if settings.DEBUG:
print(s.to_dict())
return result
|
python
|
from setuptools import setup, find_packages
import sys
import platform
# python version check
python_min_version = (3, 6, 2)
python_min_version_str = '.'.join(map(str, python_min_version))
if sys.version_info < python_min_version:
print(
f"You are using Python {platform.python_version()}. At least Python >={python_min_version_str} is required.")
sys.exit(-1)
setup(
name='gooogloo',
version='0.0.1',
author='sansmoraxz',
# fix this to use `find_packages`
packages=['gooogloo', 'gooogloo.modules', 'gooogloo.modules.utils'],
url='https://github.com/sansmoraxz/py-gooogloo',
license='LICENSE',
description='Easy google search for python'
)
|
python
|
import pytest
from spacy.lang.ja import Japanese
def test_ja_morphologizer_factory():
pytest.importorskip("sudachipy")
nlp = Japanese()
morphologizer = nlp.add_pipe("morphologizer")
assert morphologizer.cfg["extend"] is True
|
python
|
from base import CodeTyper, SNIPPETS_ROOT, COMMAND
import panel as pn
pn.extension(sizing_mode="stretch_width")
CodeTyper(
title="# Cross Filtering with hvPlot, Holoviews and PANEL",
value=SNIPPETS_ROOT/"holoviews_linked_brushing_app.py",
command="$ pip install panel holoviews hvplot shapely\n" + COMMAND,
accent_base_color="#ff286e"
).servable()
|
python
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
'''
Transcoder classes to be used in combination with a Coherence MediaServer,
using GStreamer pipelines for the actually work and feeding the output into
a http response.
'''
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
from gi.repository import GObject
Gst.init(None)
import os.path
import urllib.request
import urllib.parse
import urllib.error
from twisted.web import resource, server
from twisted.internet import protocol
from coherence import log
import struct
def get_transcoder_name(transcoder):
return transcoder.name
class InternalTranscoder(object):
'''Just a class to inherit from and which we can look
for upon creating our list of available transcoders.'''
class FakeTransformer(Gst.Element, log.LogAble):
logCategory = 'faker_datasink'
_sinkpadtemplate = Gst.PadTemplate.new(
'sinkpadtemplate',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any(),
)
_srcpadtemplate = Gst.PadTemplate.new(
'srcpadtemplate',
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any(),
)
def __init__(self, destination=None, request=None):
Gst.Element.__init__(self)
log.LogAble.__init__(self)
self.sinkpad = Gst.Pad.new_from_template(self._sinkpadtemplate, 'sink')
self.srcpad = Gst.Pad.new_from_template(self._srcpadtemplate, 'src')
self.add_pad(self.sinkpad)
self.add_pad(self.srcpad)
self.sinkpad.set_chain_function_full(self.chainfunc)
self.buffer = ''
self.buffer_size = 0
self.proxy = False
self.got_new_segment = False
self.closed = False
@staticmethod
def get_fake_header():
return (
struct.pack('>L4s', 32, 'ftyp')
+ b'mp42\x00\x00\x00\x00mp42mp41isomiso2'
)
def chainfunc(self, pad, buffer):
if self.proxy:
# we are in proxy mode already
self.srcpad.push(buffer)
return Gst.FlowReturn.OK
self.buffer = self.buffer + buffer.data
if not self.buffer_size:
try:
self.buffer_size, a_type = struct.unpack(
'>L4s', self.buffer[:8]
)
except Exception:
return Gst.FlowReturn.OK
if len(self.buffer) < self.buffer_size:
# we need to buffer more
return Gst.FlowReturn.OK
buffer = self.buffer[self.buffer_size :]
fake_header = self.get_fake_header()
n_buf = Gst.Buffer(fake_header + buffer)
self.proxy = True
self.srcpad.push(n_buf)
return Gst.FlowReturn.OK
GObject.type_register(FakeTransformer)
class DataSink(Gst.Element, log.LogAble):
logCategory = 'transcoder_datasink'
_sinkpadtemplate = Gst.PadTemplate.new(
'sinkpadtemplate',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any(),
)
def __init__(self, destination=None, request=None):
Gst.Element.__init__(self)
log.LogAble.__init__(self)
self.sinkpad = Gst.Pad.new_from_template(self._sinkpadtemplate, 'sink')
self.add_pad(self.sinkpad)
self.sinkpad.set_chain_function_full(self.chainfunc)
self.sinkpad.set_event_function_full(self.eventfunc)
self.destination = destination
self.request = request
if self.destination is not None:
self.destination = open(self.destination, 'wb')
self.buffer = ''
self.data_size = 0
self.got_new_segment = False
self.closed = False
def chainfunc(self, pad, inst, buffer):
size = buffer.get_size()
buf_data = buffer.extract_dup(0, size)
if not isinstance(buf_data, bytes):
buf = buffer.encode('ascii')
if self.closed:
return Gst.FlowReturn.OK
if self.destination is not None:
self.destination.write(buf_data)
elif self.request is not None:
self.buffer += buf_data
if len(self.buffer) > 200000:
self.request.write(self.buffer)
self.buffer = b''
else:
self.buffer += buffer.data
self.data_size += size
return Gst.FlowReturn.OK
def eventfunc(self, pad, inst, event):
if event.type == Gst.Event.new_stream_start('').type:
if not self.got_new_segment:
self.got_new_segment = True
else:
self.closed = True
elif event.type == Gst.Event.new_eos().type:
if self.destination is not None:
self.destination.close()
elif self.request is not None:
if len(self.buffer) > 0:
self.request.write(self.buffer)
self.request.finish()
return True
GObject.type_register(DataSink)
class GStreamerPipeline(resource.Resource, log.LogAble):
logCategory = 'gstreamer'
addSlash = True
def __init__(self, pipeline, content_type):
self.pipeline_description = pipeline
self.contentType = content_type
self.requests = []
# if stream has a streamheader (something that has to be prepended
# before any data), then it will be a tuple of GstBuffers
self.streamheader = None
self.parse_pipeline()
resource.Resource.__init__(self)
log.LogAble.__init__(self)
def parse_pipeline(self):
self.pipeline = Gst.parse_launch(self.pipeline_description)
self.appsink = Gst.ElementFactory.make('appsink', 'sink')
self.appsink.set_property('emit-signals', True)
self.pipeline.add(self.appsink)
enc = self.pipeline.get_by_name('enc')
enc.link(self.appsink)
self.appsink.connect('new-preroll', self.new_preroll)
self.appsink.connect('new-buffer', self.new_buffer)
self.appsink.connect('eos', self.eos)
def start(self, request=None):
self.info(
f'GStreamerPipeline start {request} {self.pipeline_description}'
)
self.requests.append(request)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished, request)
def new_preroll(self, appsink):
self.debug('new preroll')
buffer = appsink.emit('pull-preroll')
if not self.streamheader:
# check caps for streamheader buffer
caps = buffer.get_caps()
s = caps[0]
if 'streamheader' in s:
self.streamheader = s['streamheader']
self.debug('setting streamheader')
for r in self.requests:
self.debug('writing streamheader')
for h in self.streamheader:
r.write(h.data)
for r in self.requests:
self.debug('writing preroll')
r.write(buffer.data)
def new_buffer(self, appsink):
buffer = appsink.emit('pull-buffer')
if not self.streamheader:
# check caps for streamheader buffers
caps = buffer.get_caps()
s = caps[0]
if 'streamheader' in s:
self.streamheader = s['streamheader']
self.debug('setting streamheader')
for r in self.requests:
self.debug('writing streamheader')
for h in self.streamheader:
r.write(h.data)
for r in self.requests:
r.write(buffer.data)
def eos(self, appsink):
self.info('eos')
for r in self.requests:
r.finish()
self.cleanup()
def getChild(self, name, request):
self.info(f'getChild {name}, {request}')
return self
def render_GET(self, request):
self.info(f'render GET {request}')
request.setResponseCode(200)
if hasattr(self, 'contentType'):
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
headers = request.getAllHeaders()
if 'connection' in headers and headers['connection'] == 'close':
pass
if self.requests:
if self.streamheader:
self.debug('writing streamheader')
for h in self.streamheader:
request.write(h.data)
self.requests.append(request)
else:
self.parse_pipeline()
self.start(request)
return server.NOT_DONE_YET
def render_HEAD(self, request):
self.info(f'render HEAD {request}')
request.setResponseCode(200)
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
def requestFinished(self, result, request):
self.info(f'requestFinished {result}')
# TODO: we need to find a way to destroy the pipeline here
# from twisted.internet import reactor
# reactor.callLater(0, self.pipeline.set_state, Gst.State.NULL)
self.requests.remove(request)
if not self.requests:
self.cleanup()
def on_message(self, bus, message):
t = message.type
print('on_message', t)
if t == Gst.Message.ERROR:
# err, debug = message.parse_error()
# print(f'Error: {err}', debug)
self.cleanup()
elif t == Gst.Message.EOS:
self.cleanup()
def cleanup(self):
self.info('pipeline cleanup')
self.pipeline.set_state(Gst.State.NULL)
self.requests = []
self.streamheader = None
class BaseTranscoder(resource.Resource, log.LogAble):
logCategory = 'transcoder'
addSlash = True
def __init__(self, uri, destination=None, content_type=None):
if uri[:7] not in ['file://', 'http://']:
uri = 'file://' + urllib.parse.quote(uri) # FIXME
self.uri = uri
self.destination = destination
self.contentType = None
self.pipeline = None
resource.Resource.__init__(self)
log.LogAble.__init__(self)
self.info(f'uri {uri} {type(uri)}')
def getChild(self, name, request):
self.info(f'getChild {name}, {request}')
return self
def render_GET(self, request):
self.info(f'render GET {request}')
request.setResponseCode(200)
if self.contentType is not None:
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
headers = request.getAllHeaders()
if 'connection' in headers and headers['connection'] == 'close':
pass
self.start(request)
return server.NOT_DONE_YET
def render_HEAD(self, request):
self.info(f'render HEAD {request}')
request.setResponseCode(200)
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
def requestFinished(self, result):
self.info(f'requestFinished {result}')
''' we need to find a way to destroy the pipeline here
'''
# from twisted.internet import reactor
# reactor.callLater(0, self.pipeline.set_state, Gst.State.NULL)
GObject.idle_add(self.cleanup)
def on_message(self, bus, message):
t = message.type
print('on_message', t)
if t == Gst.Message.ERROR:
# err, debug = message.parse_error()
# print(f'Error: {err}', debug)
self.cleanup()
elif t == Gst.Message.EOS:
self.cleanup()
def cleanup(self):
self.pipeline.set_state(Gst.State.NULL)
def start(self, request=None):
'''This method should be sub classed for each
class which inherits from BaseTranscoder'''
pass
class PCMTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/L16;rate=44100;channels=2'
name = 'lpcm'
def start(self, request=None):
self.info(f'PCMTranscoder start {request} {self.uri}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin ! audioconvert name=conv'
)
conv = self.pipeline.get_by_name('conv')
caps = Gst.Caps.from_string(
'audio/x-raw-int,rate=44100,endianness=4321,'
+ 'channels=2,width=16,depth=16,signed=true'
)
# FIXME: UGLY. 'filter' is a python builtin!
filter = Gst.ElementFactory.make('capsfilter', 'filter')
filter.set_property('caps', caps)
self.pipeline.add(filter)
conv.link(filter)
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
filter.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class WAVTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/x-wav'
name = 'wav'
def start(self, request=None):
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin ! audioconvert ! wavenc name=enc'
)
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
# bus = self.pipeline.get_bus()
# bus.connect('message', self.on_message)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP3Transcoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/mpeg'
name = 'mp3'
def start(self, request=None):
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin ! audioconvert ! lame name=enc'
)
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP4Transcoder(BaseTranscoder, InternalTranscoder):
''' Only works if H264 inside Quicktime/MP4 container is input
Source has to be a valid uri
'''
contentType = 'video/mp4'
name = 'mp4'
def start(self, request=None):
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! qtdemux name=d ! queue ! h264parse '
+ f'! mp4mux name=mux d. ! queue ! mux.'
)
mux = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
mux.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP2TSTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'video/mpeg'
name = 'mpegts'
def start(self, request=None):
self.info(f'start {request}')
# FIXME - mpeg2enc
self.pipeline = Gst.parse_launch(
f'mpegtsmux name=mux {self.uri} ! decodebin2 name=d ! queue '
+ f'! ffmpegcolorspace ! mpeg2enc ! queue ! mux. d. '
+ f'! queue ! audioconvert ! twolame ! queue ! mux.'
)
enc = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class ThumbTranscoder(BaseTranscoder, InternalTranscoder):
'''
Should create a valid thumbnail according to the DLNA spec
.. warning:: Neither width nor height must exceed 160px
'''
contentType = 'image/jpeg'
name = 'thumb'
def start(self, request=None):
self.info(f'start {request}')
'''
# what we actually want here is a pipeline that calls
# us when it knows about the size of the original image,
# and allows us now to adjust the caps-filter with the
# calculated values for width and height
new_width = 160
new_height = 160
if original_width > 160:
new_heigth = \
int(float(original_height) * (160.0/float(original_width)))
if new_height > 160:
new_width = \
int(float(new_width) * (160.0/float(new_height)))
elif original_height > 160:
new_width = \
int(float(original_width) * (160.0/float(original_height)))
'''
try:
type = request.args['type'][0]
except IndexError:
type = 'jpeg'
if type == 'png':
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin2 ! videoscale '
+ f'! video/x-raw-yuv,width=160,height=160 ! pngenc name=enc'
)
self.contentType = 'image/png'
else:
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin2 ! videoscale '
+ f'! video/x-raw-yuv,width=160,height=160 ! jpegenc name=enc'
)
self.contentType = 'image/jpeg'
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class GStreamerTranscoder(BaseTranscoder):
'''
A generic Transcoder based on GStreamer.
'''
pipeline_description = None
'''
The pipeline which will be parsed upon calling the start method,
has to be set as the attribute :attr:`pipeline_description` to
the instantiated class.
'''
def start(self, request=None):
if self.pipeline_description is None:
raise NotImplementedError(
'Warning: operation cancelled. You must set a value for '
+ 'GStreamerTranscoder.pipeline_description'
)
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(self.pipeline_description % self.uri)
enc = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class ExternalProcessProtocol(protocol.ProcessProtocol):
def __init__(self, caller):
self.caller = caller
def connectionMade(self):
print('pp connection made')
def outReceived(self, data):
# print(f'outReceived with {len(data):d} bytes!')
self.caller.write_data(data)
def errReceived(self, data):
# print(f'errReceived! with {len(data):d} bytes!')
print('pp (err):', data.strip())
def inConnectionLost(self):
# print('inConnectionLost! stdin is closed! (we probably did it)')
pass
def outConnectionLost(self):
# print('outConnectionLost! The child closed their stdout!')
pass
def errConnectionLost(self):
# print('errConnectionLost! The child closed their stderr.')
pass
def processEnded(self, status_object):
print(f'processEnded, status {status_object.value.exitCode:d}')
print('processEnded quitting')
self.caller.ended = True
self.caller.write_data('')
class ExternalProcessProducer(object):
logCategory = 'externalprocess'
def __init__(self, pipeline, request):
self.pipeline = pipeline
self.request = request
self.process = None
self.written = 0
self.data = ''
self.ended = False
request.registerProducer(self, 0)
def write_data(self, data):
if data:
# print(f'write {len(data):d} bytes of data')
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.ended:
print('closing')
self.request.unregisterProducer()
self.request.finish()
self.request = None
def resumeProducing(self):
# print('resumeProducing', self.request)
if not self.request:
return
if self.process is None:
argv = self.pipeline.split()
executable = argv[0]
argv[0] = os.path.basename(argv[0])
from twisted.internet import reactor
self.process = reactor.spawnProcess(
ExternalProcessProtocol(self), executable, argv, {}
)
def pauseProducing(self):
pass
def stopProducing(self):
print('stopProducing', self.request)
self.request.unregisterProducer()
self.process.loseConnection()
self.request.finish()
self.request = None
class ExternalProcessPipeline(resource.Resource, log.LogAble):
logCategory = 'externalprocess'
addSlash = False
pipeline_description = None
contentType = None
def __init__(self, uri):
self.uri = uri
resource.Resource.__init__(self)
log.LogAble.__init__(self)
def getChildWithDefault(self, path, request):
return self
def render(self, request):
print('ExternalProcessPipeline render')
if self.pipeline_description is None:
raise NotImplementedError(
'Warning: operation cancelled. You must set a value for '
+ 'ExternalProcessPipeline.pipeline_description'
)
if self.contentType is not None:
request.setHeader(b'Content-Type', self.contentType)
ExternalProcessProducer(self.pipeline_description % self.uri, request)
return server.NOT_DONE_YET
def transcoder_class_wrapper(klass, content_type, pipeline):
def create_object(uri):
transcoder = klass(uri)
transcoder.contentType = content_type
transcoder.pipeline_description = pipeline
return transcoder
return create_object
class TranscoderManager(log.LogAble):
'''
Singleton class which holds information about all available transcoders.
They are put into a transcoders dict with their id as the key.
We collect all internal transcoders by searching for all subclasses of
InternalTranscoder, the class will be the value.
Transcoders defined in the config are parsed and stored as a dict in the
transcoders dict.
In the config, a transcoder description has to look like this:
*** preliminary, will be extended and
might even change without further notice ***
.. code-block:: xml
<transcoder>
<pipeline>%s ...</pipeline> <!-- we need a %s here to insert the
source uri (or can we have all the
times pipelines we can prepend with
a '%s !') and an element named mux
where we can attach our sink -->
<type>gstreamer</type> <!-- could be gstreamer or process -->
<name>mpegts</name>
<target>video/mpeg</target>
<fourth_field> <!-- value for the 4th field of the
protocolInfo phalanx, default is
'*' -->
</transcoder>
'''
logCategory = 'transcoder_manager'
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
'''Creates the singleton.'''
if cls._instance_ is None:
obj = super(TranscoderManager, cls).__new__(cls)
if 'coherence' in kwargs:
obj.coherence = kwargs['coherence']
cls._instance_ = obj
return cls._instance_
def __init__(self, coherence=None):
'''
Initializes the class :class:`TranscoderManager`.
It should be called at least once with the main
:class:`~coherence.base.Coherence` class passed as an argument,
so we have access to the config.
'''
log.LogAble.__init__(self)
self.transcoders = {}
for transcoder in InternalTranscoder.__subclasses__():
self.transcoders[get_transcoder_name(transcoder)] = transcoder
if coherence is not None:
self.coherence = coherence
try:
transcoders_from_config = self.coherence.config['transcoder']
if isinstance(transcoders_from_config, dict):
transcoders_from_config = [transcoders_from_config]
except KeyError:
transcoders_from_config = []
for transcoder in transcoders_from_config:
# FIXME: is anyone checking if all keys are given ?
pipeline = transcoder['pipeline']
if '%s' not in pipeline:
self.warning(
"Can't create transcoder %r:"
+ " missing placehoder '%%s' in 'pipeline'",
transcoder,
)
continue
try:
transcoder_name = transcoder['name'] # .decode('ascii')
except UnicodeEncodeError:
self.warning(
"Can't create transcoder %r:"
+ " the 'name' contains non-ascii letters",
transcoder,
)
continue
transcoder_type = transcoder['type'].lower()
if transcoder_type == 'gstreamer':
wrapped = transcoder_class_wrapper(
GStreamerTranscoder,
transcoder['target'],
transcoder['pipeline'],
)
elif transcoder_type == 'process':
wrapped = transcoder_class_wrapper(
ExternalProcessPipeline,
transcoder['target'],
transcoder['pipeline'],
)
else:
self.warning(f'unknown transcoder type {transcoder_type}')
continue
self.transcoders[transcoder_name] = wrapped
# FIXME reduce that to info later
self.warning(f'available transcoders {self.transcoders}')
def select(self, name, uri, backend=None):
# FIXME:why do we specify the name when trying to get it?
if backend is not None:
''' try to find a transcoder provided by the backend
and return that here,
if there isn't one continue with the ones
provided by the config or the internal ones
'''
pass
transcoder = self.transcoders[name](uri)
return transcoder
|
python
|
import posix_ipc
import utils
params = utils.read_params()
try:
posix_ipc.unlink_message_queue(params["MESSAGE_QUEUE_NAME"])
s = "message queue %s removed" % params["MESSAGE_QUEUE_NAME"]
print (s)
except:
print ("queue doesn't need cleanup")
print ("\nAll clean!")
|
python
|
import keywords as kw
import pca_tsne as pt
import math
import re
import numpy as np
from collections import Counter
from sklearn.cluster import KMeans
def conventional_kmeans(data, tfidf, kmeans_size_keywords, k):
matrix = tfidf.fit_transform(data.setting_value)
fit = KMeans(n_clusters=k, random_state=20).fit(matrix)
means_clusters = fit.predict(matrix)
distances = fit.transform(matrix)
sse = 0
i = 0
for cluster in means_clusters:
sse = sse + distances[i][cluster]
i = i + 1
print("\nSSE = {}".format(sse))
ssd = 0
i = 0
for cluster in means_clusters:
ssd = ssd + math.pow(distances[i][cluster] - (sse/2702), 2)
i = i + 1
ssd = math.sqrt(ssd/2702)
print("\nSSD = {}".format(ssd))
sizes = np.bincount(means_clusters)
top_keywords = kw.get_top_keywords(matrix, means_clusters, tfidf.get_feature_names(), 10)
index = 1
for size in sizes:
regex = "{}(.*)".format(index)
cluster_keywords = re.search(regex, top_keywords).group(1)
print("Cluster {}".format(index))
print(cluster_keywords)
kmeans_size_keywords.append([size, cluster_keywords])
index += 1
print("\nClusters Size")
print(sizes)
pt.plot_tsne_pca(matrix, means_clusters)
return means_clusters
def iteractive_kmeans(data, tfidf, clusters_size_keywords, t):
sse = 0
dists_array = []
while (data.size > 0):
print("Applying TFIDF...\n")
matrix = tfidf.fit_transform(data.setting_value)
k = 1
found = False
while (found == False):
print("Clustering with k = {}...".format(k))
fit = KMeans(n_clusters=k, random_state=20).fit(matrix)
means_clusters = fit.predict(matrix)
distances = fit.transform(matrix)
cluster_size = np.bincount(means_clusters)
print("Clusters sizes = {}".format(cluster_size))
min_sizes = sorted(i for i in cluster_size if i <= 50)
print("Min cluster sizes = {}\n".format(min_sizes))
if min_sizes:
rows_removal = []
counts = Counter(means_clusters)
print("Counter occurrences = {}\n".format(counts))
for min_size in min_sizes:
min_element = list(counts.keys())[list(counts.values()).index(min_size)]
print("Current min_element = {}".format(min_element))
del counts[min_element]
print("Removed min_element {} from counter {}\n".format(min_element, counts))
print("Removing smallest cluster elements = {} with occurrences = {}".format(min_element, min_size))
print("Getting element indexes...")
min_element_positions = [index for index, value in enumerate(means_clusters) if value == min_element]
rows_removal.extend(min_element_positions)
min_size_position = list(cluster_size).index(min_size) + 1
print("Getting cluster {} size and top keywords...".format(min_size_position))
top_keywords = kw.get_top_keywords(matrix, means_clusters, tfidf.get_feature_names(), 10)
regex = "{}(.*)".format(min_size_position)
min_cluster_keywords = re.search(regex, top_keywords).group(1)
print("Top keywords are {}\n".format(min_cluster_keywords))
clusters_size_keywords.append([min_size, min_cluster_keywords, data.iloc[min_element_positions]])
print("Being removed {} elements...".format(len(rows_removal)))
print("Old data size = {}".format(data.index))
data = data.drop(data.index[rows_removal]).reset_index(drop=True)
print("New data size = {}\n".format(data.index))
for position in rows_removal:
print("Adding sse of element {} of cluster {}".format(position, means_clusters[position]))
sse = sse + distances[position][means_clusters[position]]
dists_array.append(distances[position][means_clusters[position]])
print("Current sse = {}".format(sse))
found = True
else:
print("k = {} failed\n".format(k))
k = k + 2
ssd = 0
for dist in dists_array:
ssd = ssd + math.pow(dist - (sse/2702),2)
ssd = math.sqrt(ssd/2702)
print("Total ssd = {}".format(ssd))
|
python
|
from yunionclient.common import base
class CdnDomain(base.ResourceBase):
pass
class CdnDomainManager(base.StandaloneManager):
resource_class = CdnDomain
keyword = 'cdn_domain'
keyword_plural = 'cdn_domains'
_columns = ["ID", "Name", "Status", "Cloudaccount_id", "External_id", "Cname", "Origins", "ServiceType", "Area"]
_admin_columns = []
|
python
|
import simplejson
import urllib2
import feedparser
import logging
from datetime import timedelta
from django.http import Http404, HttpResponse
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.shortcuts import render_to_response
from django.core.cache import cache
from molly.utils.views import BaseView
from molly.utils.breadcrumbs import NullBreadcrumb
logger = logging.getLogger(__name__)
class IndexView(BaseView):
def get_metadata(self, request):
return {
'exclude_from_search': True}
breadcrumb = NullBreadcrumb
def initial_context(self, request):
return {
'blog_feed': self._cache(self._get_blog_feed, 'blog',
args=[getattr(self.conf,
'blog_rss_url')], timeout=300),
'blog_url': getattr(self.conf, 'blog_url', None),
'facebook_url': getattr(self.conf, 'facebook_url', None),
'twitter_username': getattr(self.conf, 'twitter_username', None),
'twitter_widget_id': getattr(self.conf, 'twitter_widget_id', None),
}
def handle_GET(self, request, context):
# Can't render fragment
if 'fragment' in self.FORMATS: del self.FORMATS['fragment']
return self.render(request, context, 'desktop/index',
expires=timedelta(days=1))
def _cache(self, f, key, args=None, kwargs=None, timeout=None):
key = '.'.join(['molly', self.conf.local_name, key])
value = cache.get(key)
if value is None:
value = f(*(args or ()), **(kwargs or {}))
cache.set(key, value, timeout)
return value
def _get_blog_feed(self, url):
if not url:
return None
try:
return feedparser.parse(url)
except Exception, e:
logger.warn("Failed to fetch blog feed.", exc_info=True)
return None
|
python
|
from rpy import r
import os.path
# find out where the temp directory is
tempdir = r.tempdir()
# write its name into a file
f = open('tempdir','w')
f.write(tempdir)
f.close()
# put something there..
r.postscript(os.path.join(tempdir,"foo.ps"))
r.plot(1,1)
r.dev_off()
|
python
|
import os
from math import ceil
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from app.aws_s3 import S3
from app.mongo import MongoDB
API = FastAPI(
title='DocDB DS API',
version="1.0.0",
docs_url='/',
)
API.db = MongoDB()
API.s3 = S3()
API.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
@API.get("/version")
async def version():
return API.version
@API.post("/search")
async def search(query: str, page_number: int = 0, results_per_page: int = 100):
start = page_number * results_per_page
stop = start + results_per_page
search_results = API.db.search(query)[start:stop]
count = API.db.count({"$text": {"$search": query}})
n_pages = ceil(count / results_per_page)
return {"Pages": n_pages, "Count": count, "Response": list(search_results)}
@API.get("/lookup/{file_id}")
async def lookup(file_id: str):
""" Returns everything for a single match
Example: https://ds.humanrightsfirstdocdb.dev/lookup/76737668329
{'Response': {'box_id': String,
'name': String,
'summary': String,
'path': String,
'url': String,
'tags': Array of Strings,
'text': String}}
"""
return {"Response": API.db.find_one({"box_id": file_id})}
@API.get("/thumbnail/{file_id}")
async def thumbnail(file_id: str):
""" Returns the jpg thumbnail for a single document.
Returns default image on error.
"""
file_name = f"{file_id}.jpg"
file_path = f"app/thumbnails/{file_name}"
if not os.path.exists(file_path):
API.s3.download("docdb-thumbnails", file_name, file_path)
if os.path.exists(file_path):
return FileResponse(file_path, media_type="image/jpg")
else:
return FileResponse("app/thumbnails/default.jpg", media_type="image/jpg")
@API.get("/raw_text/{file_id}")
async def raw_text(file_id: str):
file = API.db.find_one({"box_id": file_id})
file_name = file["name"].replace(".pdf", ".txt")
file_path = f"app/text-files/{file_name}"
with open(file_path, "w") as f:
f.write(file["text"])
return FileResponse(file_path, media_type="text/plain")
@API.post("/add_tag")
async def add_tag(file_id: str = Form(...), tag: str = Form(...)):
""" Adds a custom tag to a document """
API.db.push_list({"box_id": file_id}, "tags", tag)
return {'Result': 'Success', "file_id": file_id, "tag": tag}
@API.delete("/remove_tag")
async def remove_tag(file_id: str, tag: str):
""" Removes a tag from a document """
API.db.pull_list({"box_id": file_id}, "tags", tag)
return {'Result': 'Success', "file_id": file_id, "tag": tag}
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""" Analyse des efforts statiques dans un treillis
(assemblage de barres et de pivots)
Le problรจme est traitรฉ en 2D (dans un plan)
Pierre Haessig โ Mars 2013
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
### 1) Construction du treillis ###
# Points du treillis
N_piv = 5
print('treillis ร {} pivots (dont 2 pivots de fixation)'.format(N_piv))
# Choix des points d'accroche:
iP_A = 0 # 1er point du treillis
iP_B = 1 # 2รจme point
#iP_B = N_piv-1 # dernier point
# Dimensions d'une cellule รฉlรฉmentaire du treillis
dx = 1.
dy = 2.
h = 0. # amplitude des variations de hauteur
def courbe_y(x, haut):
'''courbe de "mise en forme" du treillis'''
L = (N_piv-1)*dx # Longueur totale du treillis
y = h*x*(L-x)/(L/2)**2 # parabole
#y = h*np.sin(2*np.pi/L*x) # sinus (1 pรฉriode)
#y = h*np.sqrt((L/2)**2 - (x-L/2)**2) # demi-cercle
if haut: # Partie haute du treillis
return y + dy
else: # Partie basse
return y
# end courbe_y()
# Calcul des points:
iy = 0
ix = 0
# Liste des points du treillis:
pivots = []
for i in range(N_piv):
# Calcul des coordonnรฉes:
x = ix*dx
y = courbe_y(x, iy==1)
pivots.append((x,y))
# Incrรฉmentation des compteurs:
iy = (iy+1) % 2
ix = ix + 1
# Conversion en positions entiรจres
#pivots = [(int(x),int(y)) for x,y in pivots]
# Coordonnรฉes des points d'accroche:
P_A = pivots[iP_A]
P_B = pivots[iP_B]
print('points d\'accroche: {!s} et {!s}'.format(P_A, P_B))
# Construction des barres:
barres = []
# Accroches ร l'intรฉrieur du treillis
for i, Pi in enumerate(pivots[:-2]):
# Chaque point s'accroche aux 2 suivants:
barres.append((Pi,pivots[i+1]))
barres.append((Pi,pivots[i+2]))
# Barre entre les deux derniers points:
barres.append((pivots[-2], pivots[-1]))
## Problรจmes d'hyperstaticitรฉ:
# Si on dรฉtecte une barre entre P_A et P_B on l'enlรจve (car elle est en hyperstaticitรฉ)
barres = [(P1,P2) for (P1,P2) in barres
if (P1,P2) != (P_A,P_B) and (P1,P2) != (P_B,P_A)]
if len(barres) > (N_piv-2)*2:
print('Il y a une barre en trop:')
guess = 1
ind = raw_input('barre ร enlever ({} par dรฉfaut) > '.format(guess))
if ind.strip() == '':
ind = int(guess)
else:
ind = int(ind)
print('barre enlevรฉe : {!s}'.format(barres.pop(ind)))
N_bar = len(barres)
print('treillis ร {} barres'.format(N_bar))
### Calcul du vecteurs directeur des barres:
barres_arr = np.array(barres) # shape is (2N, 2, 2)
# "AB = OB - OA":
barre_dir = barres_arr[:,1,:] - barres_arr[:,0,:]
# Longueur des barres:
barre_l = np.sqrt((barre_dir**2).sum(axis = 1))
# normalisation:
barre_dir = barre_dir/barre_l.reshape(-1,1)
### Matrice d'incidence:
Inc_mat = np.zeros((N_piv, len(barres)), dtype=int)
for j, bj in enumerate(barres):
P1, P2 = bj
# Remarquons la convention de signe:
i1 = pivots.index(P1)
Inc_mat[i1,j] = -1 # la barre bj "quitte" P1
i2 = pivots.index(P2)
Inc_mat[i2,j] = +1 # la barre bj "arrive ร " P2
print('Matrice d\'incidence:')
print(str(Inc_mat).replace('0','.'))
# Enlever les lignes correspondant au pivot d'accroche
piv_ind = range(N_piv)
piv_ind.remove(iP_A)
piv_ind.remove(iP_B)
# Matrice d'incidence rรฉduite:
Inc_mat_red = Inc_mat[piv_ind, :]
### Construction du systรจme d'รฉquation ร inverser:
# 1) Matrice A
Ax = Inc_mat_red*barre_dir[:,0]
Ay = Inc_mat_red*barre_dir[:,1]
# ou bien: Ax = np.dot(Inc_mat_red,np.diag(barre_dir[:,0]))
A = np.vstack((Ax, Ay))
# Image de la matrice:
# plt.imshow(A, interpolation='nearest')
# 2) Vecteur b : force extรฉrieure appliquรฉe ร chaque pivot:
F_ext = np.zeros((N_piv, 2))
# Appui sur le dernier pivot:
F_ext[-1] = (0., -1) # effort vers le bas
## Pesanteur sur tous les pivots:
#F_ext[:,1] = -1/N_piv
## Appui sur la clรฉ de voute:
#F_ext[N_piv//2] = (0,-1)
print('Force extรฉrieure (en cartรฉsien) appliquรฉe ร chaque pivot:')
print(F_ext.T)
# Empilement des composantes selon x et y:
b_ext = np.hstack((F_ext[piv_ind,0], F_ext[piv_ind,1]))
# 3) Rรฉsolution: Force de traction sur les barres
trac_barres = np.linalg.solve(A,b_ext)
print('Effort de traction sur chaque barre:')
print(trac_barres.round(2))
trac_max = np.max(np.abs(trac_barres))
print(' -> effort max (en val. absolue) : {:.1f}'.format(trac_max))
# Efforts sur les points d'accroche (de la part du treillis et de l'extรฉrieur):
resul_A = -np.inner(Inc_mat[iP_A,:]*trac_barres, barre_dir.T) + F_ext[iP_A]
resul_B = -np.inner(Inc_mat[iP_B,:]*trac_barres, barre_dir.T) + F_ext[iP_B]
print('action du treillis sur pt A: {!s} ({:.2f})'.format(resul_A, np.linalg.norm(resul_A,2)))
print('action du treillis sur pt B: {!s} ({:.2f})'.format(resul_B, np.linalg.norm(resul_B,2)))
### Tracรฉ du treillis #########################################################
fig = plt.figure('efforts treillis', figsize=(12,5))
ax = fig.add_subplot(111, title='efforts sur le treillis '
'({:d} pivots, {:d} barres)'.format(N_piv, N_bar))
# รchelle pour le tracรฉ des forces
F_scale = 0.4*barre_l.mean()/trac_max
# Couleur des efforts:
F_color = (0,0.8,0) # vert
# Colormap pour colorer les barres selon l'effort subit: rouge-bleu
col_list = [(0.9,0,0.0), (0.7,0.7,0.7), (0,0,0.9)]
rb = mpl.colors.LinearSegmentedColormap.from_list('red-blue', col_list)
cm = cm_rb
#cm = plt.cm.coolwarm_r
# TODO: essayer de tracer les forces avec des FancyArrowPatch:
# a = mpl.patches.FancyArrowPatch((0,0), (1,1), arrowstyle='->, head_width=5,head_length=10')
# ax.add_patch(a)
# Tracรฉ des barres et des efforts
for j, bj in enumerate(barres):
# Coordonnรฉes des 2 pivots d'accroche:
(x1,y1), (x2, y2) = bj
# direction :
uj = barre_dir[j]
# effort de traction sur la barre bj:
trac = trac_barres[j]
color = cm(trac/(2*trac_max)+0.5)
plt.plot((x1, x2), (y1, y2), '-', color = color, lw=4, zorder=1)
# Tracรฉ des efforts barre -> pivot1 et pivot 2
plt.arrow(x1, y1, +trac*uj[0]*F_scale, +trac*uj[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=F_color)
plt.arrow(x2, y2, -trac*uj[0]*F_scale, -trac*uj[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=F_color)
# end for each barre
# Couleur des pivots
piv_color = (1.,1.,1.) # blanc
piv_color_AB = (1.,1.,0.5) # jaune clair
piv_alpha = 1 # opaque
# Tracรฉ des pivots
for i, Pi in enumerate(pivots):
# Tracรฉ du pivot:
marker = 'D' if Pi in (P_A,P_B) else 'o' # marqueur Diamond 'D' ou disque 'o'
color = piv_color_AB if Pi in (P_A,P_B) else piv_color
plt.plot(Pi[0], Pi[1], marker, ms=8, c=color, alpha = piv_alpha, zorder=3)
# Force extรฉrieur s'appliquant sur le pivot
Fi = F_ext[i]
if Fi.any():
plt.arrow(Pi[0], Pi[1], Fi[0]*F_scale, Fi[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=(1,0,0))
if Pi in (P_A,P_B):
F_soutien = -resul_A if Pi==P_A else -resul_B
plt.arrow(Pi[0], Pi[1], F_soutien[0]*F_scale, F_soutien[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=(1,1,0))
# end for each pivot
# Limites du tracรฉ
plt.xlim(min([x for (x,y) in pivots]) - dx*1,
max([x for (x,y) in pivots]) + dx*1)
plt.ylim(min([y for (x,y) in pivots]) - dy*.3,
max([y for (x,y) in pivots]) + dy*.3)
# Couleur de fond:
ax.patch.set_fc((0.9,)*3)
ax.set_aspect('equal')
plt.grid(False)
fig.tight_layout()
plt.show()
|
python
|
import logging
from datetime import datetime
from enum import Enum
import ntplib
from scapy.layers.ntp import NTPHeader
from ntp_raw import RawNTP
_PCK_1_YEAR = 1995
_PCK_2_YEAR = 2000
class CP3Mode(Enum):
"""
In CP3 an NTP package can be marked as a type 1, or 2 package or as nothing. This class represents the different
types.
"""
NONE = 0,
PCK_1 = 1,
PCK_2 = 2,
@staticmethod
def from_year(year: int):
if year == _PCK_1_YEAR:
return CP3Mode.PCK_1
elif year == _PCK_2_YEAR:
return CP3Mode.PCK_2
else:
return CP3Mode.NONE
class CP3Package(RawNTP):
def __init__(self, ntp_pck: NTPHeader = NTPHeader(), log: logging.Logger = logging.getLogger('CP3Package-Logger')):
"""
A child of RawNTP which adds functionality in order to extract and insert CP3 specific data into
and from a NTPRaw package.
"""
super().__init__(ntp_pck)
self.log: logging.Logger = log
def _extract_transmit_year(self) -> int:
year = datetime.fromtimestamp(ntplib.ntp_to_system_time(self.ntp().sent)).year
return year
def get_cp3_mode(self) -> CP3Mode:
transmit_year = self._extract_transmit_year()
if transmit_year is None or transmit_year == 0:
return CP3Mode.NONE
return CP3Mode.from_year(transmit_year)
def extract_payload(self) -> str:
return self.origin_timestamp()[0:32] + self.receive_timestamp()[0:32]
def add_payload(self,payload_bits):
self.set_origin_timestamp(payload_bits[0:32]+self.origin_timestamp()[32:64])
self.set_receive_timestamp(payload_bits[32:64]+self.receive_timestamp()[32:64])
def set_cp3_mode_1(self):
self._set_cp3_mode(_PCK_1_YEAR)
def set_cp3_mode_2(self):
self._set_cp3_mode(_PCK_2_YEAR)
def _set_cp3_mode(self,year:int):
ntp = self.ntp()
time = ntplib.system_to_ntp_time(datetime.fromtimestamp(ntplib.ntp_to_system_time(ntp.sent))
.replace(year=year).timestamp())
ntp.sent = time
raw = RawNTP(ntp)
self.set_transmit_timestamp(raw.transmit_timestamp())
|
python
|
from helusers.oidc import ApiTokenAuthentication as HelApiTokenAuth
from django.conf import settings
class ApiTokenAuthentication(HelApiTokenAuth):
def __init__(self, *args, **kwargs):
super(ApiTokenAuthentication, self).__init__(*args, **kwargs)
def authenticate(self, request):
jwt_value = self.get_jwt_value(request)
if jwt_value is None:
return None
payload = self.decode_jwt(jwt_value)
user, auth = super(ApiTokenAuthentication, self).authenticate(request)
# amr (Authentication Methods References) should contain the used auth
# provider name e.g. suomifi
if payload.get('amr') in settings.STRONG_AUTH_PROVIDERS:
user.has_strong_auth = True
else:
user.has_strong_auth = False
user.save()
return (user, auth)
|
python
|
from inquire_sql_backend.semantics.embeddings.vector_models import VECTOR_EMBEDDERS
def vector_embed_sentence(sent, tokenized=False, model="default"):
embed_func = VECTOR_EMBEDDERS[model]
return embed_func(sent, batch=False, tokenized=tokenized)
def vector_embed_sentence_batch(sent, tokenized=False, model="default"):
embed_func = VECTOR_EMBEDDERS[model]
return embed_func(sent, batch=True, tokenized=tokenized)
|
python
|
import torch.nn as nn
import torch
from .conv2d_repeat import Conv2dRepeat
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.activ = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.activ(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.activ(out)
return out
class ResNetCifar(nn.Module):
def __init__(self, block, layers, width=1, num_classes=1000, args=None):
super(ResNetCifar, self).__init__()
self.inplanes = 16
self.args = args
self.width = width
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.activ = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16 * width, layers[0], stride=1)
self.layer2 = self._make_layer(block, 32 * width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64 * width, layers[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Linear(64 * width, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0.01)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv_module = nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False)
bn_module = nn.BatchNorm2d(planes * block.expansion)
downsample = nn.Sequential(conv_module, bn_module)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activ(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, width=1, num_classes=1000, args=None):
super(ResNet, self).__init__()
self.inplanes = 64
self.args = args
self.width = width
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.activ = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64*self.width, layers[0])
self.layer2 = self._make_layer(block, 128*self.width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256*self.width, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512*self.width, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Linear(512 * block.expansion * self.width, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0.01)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv_module = nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False)
bn_module = nn.BatchNorm2d(planes * block.expansion)
downsample = nn.Sequential(conv_module, bn_module)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activ(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet(model_name, num_classes=1000, args=None):
k = int(model_name.split('_')[-1])
model = ResNet(BasicBlock, [2, 2, 2, 2], width=k, num_classes=num_classes, args=args)
return model
def resnetcifar(model_name, num_classes=10, args=None):
k = int(model_name.split('_')[-1])
model = ResNetCifar(BasicBlock, [2, 2, 2], width=k, num_classes=num_classes, args=args)
return model
|
python
|
from flask import Blueprint
socket_client = Blueprint('socket_client', __name__)
from . import events
|
python
|
from rest_framework import serializers
from puzzle.models import Offer
class OfferSerializer(serializers.ModelSerializer):
author_name = serializers.CharField(
source="author.user.username", read_only=True)
class Meta:
model = Offer
fields = ["id", "author_name", "created",
"updated", "trade", "note"]
def create(self, validated_data):
trade_instance = Offer.objects.create(**validated_data)
return trade_instance
|
python
|
"""Collection of PBM-based click simulators."""
from typing import Optional
from typing import Tuple
import torch as _torch
from pytorchltr.utils import mask_padded_values as _mask_padded_values
_SIM_RETURN_TYPE = Tuple[_torch.LongTensor, _torch.FloatTensor]
def simulate_pbm(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, relevance_probs: _torch.FloatTensor,
cutoff: Optional[int] = None,
eta: float = 1.0) -> _SIM_RETURN_TYPE:
"""Simulates clicks according to a position-biased user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
relevance_prob: A tensor of size (max_relevance) where the entry at
index "i" indicates the probability of clicking a document with
relevance label "i" (given that it is observed).
cutoff: The maximum list size to simulate.
eta: The severity of position bias (0.0 = no bias)
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
# Cutoff at n for observation probabilities.
if cutoff is not None:
n = _torch.min(_torch.ones_like(n) * cutoff, n)
# Compute position-biased observation probabilities.
ranks = 1.0 + _torch.arange(
rankings.shape[1], device=rankings.device, dtype=_torch.float)
obs_probs = 1.0 / (1.0 + ranks) ** eta
obs_probs = _torch.repeat_interleave(
obs_probs[None, :], rankings.shape[0], dim=0)
obs_probs = _mask_padded_values(obs_probs, n, mask_value=0.0, mutate=True)
# Compute relevance labels at every rank.
ranked_ys = _torch.gather(ys, 1, rankings)
# Compute click probabilities (given observed).
relevance_probs = _torch.repeat_interleave(
relevance_probs[None, :], rankings.shape[0], dim=0)
click_probs = _torch.gather(relevance_probs, 1, ranked_ys)
# Sample clicks from bernoulli distribution with probabilities.
clicks = _torch.bernoulli(click_probs * obs_probs)
# Invert back to regular ranking.
invert_ranking = _torch.argsort(rankings, dim=1)
# Return click realization and propensities.
return (
_torch.gather(clicks, 1, invert_ranking).to(dtype=_torch.long),
_torch.gather(obs_probs, 1, invert_ranking)
)
def simulate_perfect(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, cutoff: Optional[int] = None):
"""Simulates clicks according to a perfect user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
cutoff: The maximum list size to simulate.
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
rel_probs = _torch.FloatTensor(
[0.0, 0.2, 0.4, 0.8, 1.0], device=rankings.device)
return simulate_pbm(rankings, ys, n, rel_probs, cutoff, 0.0)
def simulate_position(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, cutoff: Optional[int] = None,
eta: float = 1.0) -> _SIM_RETURN_TYPE:
"""Simulates clicks according to a binary position-biased user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
cutoff: The maximum list size to simulate.
eta: The severity of position bias (0.0 = no bias)
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
rel_probs = _torch.FloatTensor(
[0.1, 0.1, 0.1, 1.0, 1.0], device=rankings.device)
return simulate_pbm(rankings, ys, n, rel_probs, cutoff, eta)
def simulate_nearrandom(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, cutoff: Optional[int] = None,
eta: float = 1.0) -> _SIM_RETURN_TYPE:
"""Simulates clicks according to a near-random user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
cutoff: The maximum list size to simulate.
eta: The severity of position bias (0.0 = no bias)
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
rel_probs = _torch.FloatTensor(
[0.4, 0.45, 0.5, 0.55, 0.6], device=rankings.device)
return simulate_pbm(rankings, ys, n, rel_probs, cutoff, eta)
|
python
|
import ila
import riscv_um
def genVlg():
rm = riscv_um.riscvModel()
rm.loadUnprivNxtFromDir('unpriv_asts')
rm.model.generateVerilog('RISC-V-VLG.v')
if __name__ == '__main__':
genVlg()
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db import db
from nailgun.db.sqlalchemy.models import NovaNetworkConfig
from nailgun.network.manager import AllocateVIPs70Mixin
from nailgun.network.manager import AssignIPs61Mixin
from nailgun.network.manager import AssignIPs70Mixin
from nailgun.network.manager import AssignIPsLegacyMixin
from nailgun.network.manager import NetworkManager
class NovaNetworkManager(NetworkManager):
@classmethod
def create_nova_network_config(cls, cluster):
nova_net_config = NovaNetworkConfig(cluster_id=cluster.id)
meta = cluster.release.networks_metadata["nova_network"]["config"]
for key, value in meta.iteritems():
if hasattr(nova_net_config, key):
setattr(nova_net_config, key, value)
db().add(nova_net_config)
db().flush()
return nova_net_config
@classmethod
def generate_vlan_ids_list(cls, data, cluster, ng):
if ng["name"] == "fixed":
netw_params = data.get("networking_parameters", {})
start = netw_params.get("fixed_networks_vlan_start")
amount = netw_params.get("fixed_networks_amount")
if start and amount:
return range(int(start), int(start) + int(amount))
if ng.get("vlan_start") is None:
return []
return [int(ng.get("vlan_start"))]
class NovaNetworkManagerLegacy(AssignIPsLegacyMixin, NovaNetworkManager):
pass
class NovaNetworkManager61(AssignIPs61Mixin, NovaNetworkManager):
pass
class NovaNetworkManager70(
AllocateVIPs70Mixin, AssignIPs70Mixin, NovaNetworkManager
):
@classmethod
def build_role_to_network_group_mapping(cls, *_):
"""Not needed due to always using default net role to network mapping
:return: Empty network role to network map
:rtype: dict
"""
return {}
@classmethod
def get_network_group_for_role(cls, network_role, _):
"""Returns network group to which network role is associated
The default network group from the network role description is
returned.
:param network_role: Network role dict
:type network_role: dict
:return: Network group name
:rtype: str
"""
return network_role['default_mapping']
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#import libraries
import string
import ast
from itertools import islice
import csv
from nltk.tokenize import RegexpTokenizer
def createTsvFile_Search1(listUrl_Movies3):
#create tsv file in 'tsv_correct' directory wehere we have preprocessed the tsv file (just created in parser.py)
tokenizer = RegexpTokenizer(r'\w+')
name="article_"
extension2=".tsv"
exclude = string.punctuation
for index in range(totalMovies):
print(index)
file="{}{}{}".format(name,index,extension2)
with open("HW3 ADM/tsv/"+file,"r") as tsvfile, open("HW3 ADM/tsv_correct/"+file,"w") as outfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
tsvwriter = csv.writer(outfile, delimiter="\t")
for row in tsvreader:
for i in range(len(row)):
#take every words, deleting ountuaction and other symbols
row[i] = tokenizer.tokenize(row[i])
#remove duplicate case-insensitive elements
row[i]= list(set(map(str.lower, row[i])))
#row[i] = row[i].translate({ord(c): None for c in string.punctuation})
tsvwriter.writerow(row)
#create tsv file in 'tsv_correct2' directory wehere we have preprocessed the tsv file (just created in parser.py). These preprocessed files contain preprocessed texts.
#The difference between these tvs files and other tsv files in tsv_correct is that these files contains duplicates words in texts. It's important for the second search engine
def createTsvFile_Search2(listUrl_Movies3):
tokenizer = RegexpTokenizer(r'\w+')
name="article_"
extension2=".tsv"
exclude = string.punctuation
for index in range(totalMovies):
print(index)
file="{}{}{}".format(name,index,extension2)
with open("HW3 ADM/tsv/"+file,"r") as tsvfile, open("HW3 ADM/tsv_correct2/"+file,"w") as outfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
tsvwriter = csv.writer(outfile, delimiter="\t")
for row in tsvreader:
for i in range(len(row)):
#take every words, deleting ountuaction and other symbols
row[i] = tokenizer.tokenize(row[i])
#remove duplicate case-insensitive elements
row[i]= list(map(str.lower, row[i]))
#row[i] = row[i].translate({ord(c): None for c in string.punctuation})
tsvwriter.writerow(row)
|
python
|
from django import template
from classytags.core import Options
from classytags.helpers import AsTag
from classytags.arguments import Argument
from ..models import CallToActionRepository
class GetCallToAction(AsTag):
name = 'get_call_to_action'
options = Options(
Argument('code', required=True),
'as',
Argument('varname', required=False, resolve=False)
)
def get_value(self, context, code):
try:
return CallToActionRepository.objects.get(code=code).rendered()
except CallToActionRepository.DoesNotExist:
return ''
register = template.Library()
register.tag(GetCallToAction)
|
python
|
import json
def get_list():
with open("config.json", "r") as f_obj:
f_json = json.load(f_obj)
return f_json
def get_lang(item):
return get_list()[item]
def set_lang(item, lang):
with open("list_lang.json", "r") as f_obj:
list_lang = json.load(f_obj)
if not (lang in list_lang):
return False
f_json = get_list()
f_json[item] = lang
with open("config.json", "w") as f_obj:
json.dump(f_json, f_obj)
return True
if __name__ == "__main__":
print(get_list())
print(get_lang("My_Language"))
print(get_lang("Obj_Language"))
print(set_lang("My_Language", "zh-CN"))
|
python
|
import os
import os.path as osp
import copy
import yaml
import numpy as np
from ast import literal_eval
from utils.collections import AttrDict
__C = AttrDict()
cfg = __C
# ---------------------------------------------------------------------------- #
# MISC options
# ---------------------------------------------------------------------------- #
# Device for training or testing
# E.g., 'cuda' for using GPU, 'cpu' for using CPU
__C.DEVICE = 'cuda'
# Number of GPUs to use (applies to both training and testing)
__C.NUM_GPUS = 1
# Pixel mean values (BGR order) as a list
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# Pixel std values (BGR order) as a list
__C.PIXEL_STDS = np.array([[[1.0, 1.0, 1.0]]])
# Directory for saving checkpoints and loggers
__C.CKPT = 'ckpts/mscoco_humanparts/e2e_hier_rcnn_R-50-FPN_1x/'
# Display the log per iteration
__C.DISPLAY_ITER = 20
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# A very small number that's used many times
__C.EPS = 1e-14
# Convert image to BGR format (for Caffe2 models), in range 0-255
__C.TO_BGR255 = True
# ---------------------------------------------------------------------------- #
# Model options
# ---------------------------------------------------------------------------- #
__C.MODEL = AttrDict()
# The type of model to use
# The string must match a function in the modeling.model_builder module
# (e.g., 'generalized_rcnn', 'retinanet', ...)
__C.MODEL.TYPE = 'generalized_rcnn'
# FPN is enabled if True
__C.MODEL.FPN_ON = False
# Indicates the model makes semantic segmentation predictions (as in Semantic Segmentation)
__C.MODEL.SEMSEG_ON = False
# RPN is enabled if True
# Default is True, if RPN_ON = False means that only training the backbone
__C.MODEL.RPN_ON = True
# The meaning of FASTER_RCNN depends on the context (training vs. inference):
# 1) During training, FASTER_ON = True means that end-to-end training will be
# used to jointly train the RPN subnetwork and the Fast R-CNN subnetwork
# (Faster R-CNN = RPN + Fast R-CNN).
# 2) During inference, FASTER_ON = True means that the model's RPN subnetwork
# will be used to generate proposals rather than relying on precomputed
# proposals. Note that FASTER_ON = True can be used at inference time even
# if the Faster R-CNN model was trained with stagewise training (which
# consists of alternating between RPN and Fast R-CNN training in a way that
# finally leads to a single network).
__C.MODEL.FASTER_ON = False
# Indicates the model uses Cascade R-CNN
__C.MODEL.CASCADE_ON = False
# Indicates the model makes instance mask predictions (as in Mask R-CNN)
__C.MODEL.MASK_ON = False
# Indicates the model makes part bbox predictions (as in Hier R-CNN)
__C.MODEL.HIER_ON = False # TODO
# Type of batch normalizaiton, default: 'freeze'
# E.g., 'normal', 'freeze', 'sync', ...
__C.MODEL.BATCH_NORM = 'freeze'
# Number of classes in the dataset; must be set
# E.g., 81 for COCO (80 foreground + 1 background)
__C.MODEL.NUM_CLASSES = -1
# Swap model conv1 weight, for pet/rcnn we use BGR input channel (cv2), for pet/cls we use RGB channel,
# for caffe/caffe2 model using BGR channel. Thus if we use pet pretrain weights set 'True', else if use
# caffe or caffe2 weights set 'False'.
__C.MODEL.CONV1_RGB2BGR = True
# ---------------------------------------------------------------------------- #
# Solver options
# Note: all solver options are used exactly as specified; the implication is
# that if you switch from training on 1 GPU to N GPUs, you MUST adjust the
# solver configuration accordingly. We suggest using gradual warmup and the
# linear learning rate scaling rule as described in
# "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour" Goyal et al.
# https://arxiv.org/abs/1706.02677
# ---------------------------------------------------------------------------- #
__C.SOLVER = AttrDict()
# Type of the optimizer
# E.g., 'SGD', 'RMSPROP', 'ADAM' ...
__C.SOLVER.OPTIMIZER = 'SGD'
# Base learning rate for the specified schedule
__C.SOLVER.BASE_LR = 0.001
# Maximum number of max iterations
__C.SOLVER.MAX_ITER = 90000
# Momentum to use with SGD
__C.SOLVER.MOMENTUM = 0.9
# L2 regularization hyperparameter
__C.SOLVER.WEIGHT_DECAY = 0.0005
# L2 regularization hyperparameter for GroupNorm's parameters
__C.SOLVER.WEIGHT_DECAY_GN = 0.0
# Whether to double the learning rate for bias
__C.SOLVER.BIAS_DOUBLE_LR = True
# Whether to have weight decay on bias as well
__C.SOLVER.BIAS_WEIGHT_DECAY = False
# Multiple learning rate for fine-tuning
# Random initial layer learning rate is LR_MULTIPLE * BASE_LR
__C.SOLVER.LR_MULTIPLE = 1.0 # TODO
# Warm up to SOLVER.BASE_LR over this number of SGD iterations
__C.SOLVER.WARM_UP_ITERS = 500
# Start the warm up from SOLVER.BASE_LR * SOLVER.WARM_UP_FACTOR
__C.SOLVER.WARM_UP_FACTOR = 1.0 / 10.0
# WARM_UP_METHOD can be either 'CONSTANT' or 'LINEAR' (i.e., gradual)
__C.SOLVER.WARM_UP_METHOD = 'LINEAR'
# Schedule type (see functions in utils.lr_policy for options)
# E.g., 'POLY', 'STEP', 'COSINE', ...
__C.SOLVER.LR_POLICY = 'STEP'
# For 'POLY', the power in poly to drop LR
__C.SOLVER.LR_POW = 0.9
# For 'STEP', Non-uniform step iterations
__C.SOLVER.STEPS = [60000, 80000]
# For 'STEP', the current LR is multiplied by SOLVER.GAMMA at each step
__C.SOLVER.GAMMA = 0.1
# Suppress logging of changes to LR unless the relative change exceeds this
# threshold (prevents linear warm up from spamming the training log)
__C.SOLVER.LOG_LR_CHANGE_THRESHOLD = 1.1
# Snapshot (model checkpoint) period
__C.SOLVER.SNAPSHOT_ITERS = 10000
# -----------------------------------------------------------------------------
# DataLoader options
# -----------------------------------------------------------------------------
__C.DATALOADER = AttrDict()
# Type of training sampler, default: 'DistributedSampler'
# E.g., 'DistributedSampler', 'RepeatFactorTrainingSampler', ...
__C.DATALOADER.SAMPLER_TRAIN = "DistributedSampler"
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
__C.DATALOADER.ASPECT_RATIO_GROUPING = True
# if True, the dataloader will filter out images that have no associated
# annotations at train time.
__C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True # TODO
# ---------------------------------------------------------------------------- #
# RepeatFactorTrainingSampler options
# ---------------------------------------------------------------------------- #
__C.DATALOADER.RFTSAMPLER = AttrDict()
# parameters for RepeatFactorTrainingSampler
# rep_times = max(MIN_REPEAT_TIMES, min(MAX_REPEAT_TIMES, math.pow((REPEAT_THRESHOLD / cat_freq),POW)))
__C.DATALOADER.RFTSAMPLER.REPEAT_THRESHOLD = 0.001
__C.DATALOADER.RFTSAMPLER.POW = 0.5
__C.DATALOADER.RFTSAMPLER.MAX_REPEAT_TIMES = 10000.0
__C.DATALOADER.RFTSAMPLER.MIN_REPEAT_TIMES = 1.0
# ---------------------------------------------------------------------------- #
# Training options
# ---------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
# Initialize network with weights from this .pkl file
__C.TRAIN.WEIGHTS = ''
# Type of training data augmentation, default: 'none'
# E.g., 'none', 'random_crop', ...
__C.TRAIN.PREPROCESS_TYPE = 'none'
# Datasets to train on
# Available dataset list: datasets.dataset_catalog.DATASETS.keys()
# If multiple datasets are listed, the model is trained on their union
__C.TRAIN.DATASETS = ()
# Scales to use during training
# Each scale is the pixel size of an image's shortest side
# If multiple scales are listed, then one is selected uniformly at random for
# each training image (i.e., scale jitter data augmentation)
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Number of Python threads to use for the data loader during training
__C.TRAIN.LOADER_THREADS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
__C.TRAIN.SIZE_DIVISIBILITY = 32
# Mini-batch size for training
# This is global, so if we have 8 GPUs and BATCH_SIZE = 16, each GPU will
# see 2 images per batch
__C.TRAIN.BATCH_SIZE = 16
# Freeze the backbone architecture during training if set to True
__C.TRAIN.FREEZE_CONV_BODY = False
# Training will resume from the latest snapshot (model checkpoint) found in the
# output directory
__C.TRAIN.AUTO_RESUME = True
# Image ColorJitter Augmentation
__C.TRAIN.BRIGHTNESS = 0.0
__C.TRAIN.CONTRAST = 0.0
__C.TRAIN.SATURATION = 0.0
__C.TRAIN.HUE = 0.0
# Left right mapping for flipping training
__C.TRAIN.LEFT_RIGHT = ()
# ---------------------------------------------------------------------------- #
# Random Crop options
# ---------------------------------------------------------------------------- #
__C.TRAIN.RANDOM_CROP = AttrDict()
# image will resize to min_size * num, num
# If only set one number, real_ratio =1, else real_ratio will random choose from it.
__C.TRAIN.RANDOM_CROP.SCALE_RATIOS = (0.8, 1.2)
# PAD_PIXEL for gap in small picture when random crop. eg.
# If len < 3, real pad_pixel will convert to PIXEL_MEANS, and make it to int by round.
__C.TRAIN.RANDOM_CROP.PAD_PIXEL = ()
# the scale of random crop, if img_size < scale, padding the gap use PAD_PIXEL.
# shape: [H, W], must be divided by SIZE_DIVISIBILITY, default: ([640, 640], )
__C.TRAIN.RANDOM_CROP.CROP_SCALES = ([640, 640], )
# IOU_TH for crop object.
__C.TRAIN.RANDOM_CROP.IOU_THS = (0.9, 0.7, 0.5, 0.3, 0.1)
# Type of instance box for random crop, default: 'horizontal'
# E.g., "horizontal"๏ผ"oriented"
__C.TRAIN.RANDOM_CROP.BOX_TYPE = "horizontal"
# ---------------------------------------------------------------------------- #
# Inference ('test') options
# ---------------------------------------------------------------------------- #
__C.TEST = AttrDict()
# Initialize network with weights from this .pkl file
__C.TEST.WEIGHTS = ''
# Number of Python threads to use for the data loader during testing
__C.TEST.LOADER_THREADS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
__C.TEST.SIZE_DIVISIBILITY = 32
# Datasets to test on
# Available dataset list: datasets.dataset_catalog.DATASETS.keys()
# If multiple datasets are listed, testing is performed on each one sequentially
__C.TEST.DATASETS = ()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALE = 600
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Number of images in each GPU for testing
__C.TEST.IMS_PER_GPU = 1
# If True, force resize the image to [H, W].
__C.TEST.FORCE_TEST_SCALE = [-1, -1]
# ---------------------------------------------------------------------------- #
# Soft NMS
# ---------------------------------------------------------------------------- #
__C.TEST.SOFT_NMS = AttrDict()
# Use soft NMS instead of standard NMS if set to True
__C.TEST.SOFT_NMS.ENABLED = False
# See soft NMS paper for definition of these options
__C.TEST.SOFT_NMS.METHOD = 'linear'
__C.TEST.SOFT_NMS.SIGMA = 0.5
# For the soft NMS overlap threshold, we simply use TEST.NMS
# ---------------------------------------------------------------------------- #
# Bounding box voting (from the Multi-Region CNN paper)
# ---------------------------------------------------------------------------- #
__C.TEST.BBOX_VOTE = AttrDict()
# Use box voting if set to True
__C.TEST.BBOX_VOTE.ENABLED = False
# We use TEST.NMS threshold for the NMS step. VOTE_TH overlap threshold
# is used to select voting boxes (IoU >= VOTE_TH) for each box that survives NMS
__C.TEST.BBOX_VOTE.VOTE_TH = 0.8
# The method used to combine scores when doing bounding box voting
# Valid options include ('ID', 'AVG', 'IOU_AVG', 'GENERALIZED_AVG', 'QUASI_SUM')
__C.TEST.BBOX_VOTE.SCORING_METHOD = 'ID'
# Hyperparameter used by the scoring method (it has different meanings for
# different methods)
__C.TEST.BBOX_VOTE.SCORING_METHOD_BETA = 1.0
# ---------------------------------------------------------------------------- #
# Test-time augmentations for bounding box detection
# ---------------------------------------------------------------------------- #
__C.TEST.BBOX_AUG = AttrDict()
# Enable test-time augmentation for bounding box detection if True
__C.TEST.BBOX_AUG.ENABLED = False
# Horizontal flip at the original scale (id transform)
__C.TEST.BBOX_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
__C.TEST.BBOX_AUG.SCALES = ()
# Max pixel size of the longer side
__C.TEST.BBOX_AUG.MAX_SIZE = 4000
# ---------------------------------------------------------------------------- #
# Test-time augmentations for mask detection
# ---------------------------------------------------------------------------- #
__C.TEST.MASK_AUG = AttrDict()
# Enable test-time augmentation for instance mask detection if True
__C.TEST.MASK_AUG.ENABLED = False
# Heuristic used to combine mask predictions
# SOFT prefix indicates that the computation is performed on soft masks
# Valid options: ('SOFT_AVG', 'SOFT_MAX', 'LOGIT_AVG')
__C.TEST.MASK_AUG.HEUR = 'SOFT_AVG'
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
__C.BACKBONE = AttrDict()
# The backbone conv body to use
__C.BACKBONE.CONV_BODY = 'resnet'
# The eps of batch_norm layer
__C.BACKBONE.BN_EPS = 1e-5
# ---------------------------------------------------------------------------- #
# HRNet options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.HRNET = AttrDict()
# Network initial width
__C.BACKBONE.HRNET.WIDTH = 18
# Use a (2 * 2) kernels avg_pooling layer in downsampling block.
__C.BACKBONE.HRNET.AVG_DOWN = False
# Use a squeeze-and-excitation module in each block
__C.BACKBONE.HRNET.USE_SE = False
# Use a global feature in each stage
__C.BACKBONE.HRNET.USE_GLOBAL = False
# Use group normalization
__C.BACKBONE.HRNET.USE_GN = False
# Use a aligned module in each block
__C.BACKBONE.HRNET.USE_ALIGN = False
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.HRNET.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.HRNET.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# MobileNet V1 options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.MV1 = AttrDict()
# The number of layers in each block
__C.BACKBONE.MV1.LAYERS = (2, 2, 6, 2)
# The initial width of each block
__C.BACKBONE.MV1.NUM_CHANNELS = [32, 64, 128, 256, 512, 1024]
# Kernel size of depth-wise separable convolution layers
__C.BACKBONE.MV1.KERNEL = 3
# Network widen factor
__C.BACKBONE.MV1.WIDEN_FACTOR = 1.0
# C5 stage dilation
__C.BACKBONE.MV1.C5_DILATION = 1
# Use a squeeze-and-excitation module in each block
__C.BACKBONE.MV1.USE_SE = False
# Use dropblock in C4 and C5
__C.BACKBONE.MV1.USE_DP = False
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.MV1.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# MobileNet V2 options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.MV2 = AttrDict()
# Network widen factor
__C.BACKBONE.MV2.WIDEN_FACTOR = 1.0
# Use a squeeze-and-excitation module in each block
__C.BACKBONE.MV2.USE_SE = False
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.MV2.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# MobileNet V3 options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.MV3 = AttrDict()
# Network setting of MobileNet V3
__C.BACKBONE.MV3.SETTING = 'large'
# Network widen factor
__C.BACKBONE.MV3.WIDEN_FACTOR = 1.0
# Se module mid channel base, if True use innerplanes, False use inplanes
__C.BACKBONE.MV3.SE_REDUCE_MID = True
# Se module mid channel divisible. This param is to fit otf-fficial implementation
__C.BACKBONE.MV3.SE_DIVISIBLE = False
# Use conv bias in head. This param is to fit tf-official implementation
__C.BACKBONE.MV3.HEAD_USE_BIAS = False
# Force using residual. This param is to fit tf-official implementation
__C.BACKBONE.MV3.FORCE_RESIDUAL = False
# Sync block act to se module. This param is to fit tf-official implementation
__C.BACKBONE.MV3.SYNC_SE_ACT = True
# Use Conv2dSamePadding to replace Conv2d for fitting tf-original implementation
__C.BACKBONE.MV3.SAME_PAD = False
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.MV3.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# ResNet options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.RESNET = AttrDict()
# The number of layers in each block
# (2, 2, 2, 2) for resnet18 with basicblock
# (3, 4, 6, 3) for resnet34 with basicblock
# (3, 4, 6, 3) for resnet50
# (3, 4, 23, 3) for resnet101
# (3, 8, 36, 3) for resnet152
__C.BACKBONE.RESNET.LAYERS = (3, 4, 6, 3)
# Network initial width
__C.BACKBONE.RESNET.WIDTH = 64
# Use bottleneck block, False for basicblock
__C.BACKBONE.RESNET.BOTTLENECK = True
# Place the stride 2 conv on the 3x3 filter.
# True for resnet-b
__C.BACKBONE.RESNET.STRIDE_3X3 = False
# Use a three (3 * 3) kernels head; False for (7 * 7) kernels head.
# True for resnet-c
__C.BACKBONE.RESNET.USE_3x3x3HEAD = False
# Use a (2 * 2) kernels avg_pooling layer in downsampling block.
# True for resnet-d
__C.BACKBONE.RESNET.AVG_DOWN = False
# Use group normalization
__C.BACKBONE.RESNET.USE_GN = False
# Use attentive normalization
# when it is True means use an_bn (an with bn)
# when it is True and USE_GN is True means use an_gn (an with gn)
__C.BACKBONE.RESNET.USE_AN = False
# Use weight standardization
__C.BACKBONE.RESNET.USE_WS = False
# Use a aligned module in each block
__C.BACKBONE.RESNET.USE_ALIGN = False
# Type of context module in each block
# 'se' for se, 'gcb' for gcb
__C.BACKBONE.RESNET.STAGE_WITH_CONTEXT = ('none', 'none', 'none', 'none')
# Context module innerplanes ratio
__C.BACKBONE.RESNET.CTX_RATIO = 0.0625
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.RESNET.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Apply dilation in stage "c5"
__C.BACKBONE.RESNET.C5_DILATION = 1
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.RESNET.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# ResNeXt options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.RESNEXT = AttrDict()
# The number of layers in each block
# (3, 4, 6, 3) for resnext50
# (3, 4, 23, 3) for resnext101
# (3, 8, 36, 3) for resnext152
__C.BACKBONE.RESNEXT.LAYERS = (3, 4, 6, 3)
# Cardinality (groups) of convolution layers
__C.BACKBONE.RESNEXT.C = 32
# Network initial width of each (conv) group
__C.BACKBONE.RESNEXT.WIDTH = 4
# Use a three (3 * 3) kernels head; False for (7 * 7) kernels head.
# True for resnext-c
__C.BACKBONE.RESNEXT.USE_3x3x3HEAD = False
# Use a (2 * 2) kernels avg_pooling layer in downsampling block.
# True for resnext-d
__C.BACKBONE.RESNEXT.AVG_DOWN = False
# Use group normalization
__C.BACKBONE.RESNEXT.USE_GN = False
# Use weight standardization
__C.BACKBONE.RESNEXT.USE_WS = False
# Use a aligned module in each block
__C.BACKBONE.RESNEXT.USE_ALIGN = False
# Type of context module in each block
# 'se' for se, 'gcb' for gcb
__C.BACKBONE.RESNEXT.STAGE_WITH_CONTEXT = ('none', 'none', 'none', 'none')
# Context module innerplanes ratio
__C.BACKBONE.RESNEXT.CTX_RATIO = 0.0625
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.RESNEXT.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Apply dilation in stage "c5"
__C.BACKBONE.RESNEXT.C5_DILATION = 1
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.RESNEXT.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# VoVNet options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.VOV = AttrDict()
# The number of layers in each block
# (1, 1, 1, 1) for vovnet27_slim
# (1, 1, 2, 2) for vovnet39
# (1, 1, 4, 3) for vovnet57
__C.BACKBONE.VOV.LAYERS = (1, 1, 2, 2)
# Network initial width
__C.BACKBONE.VOV.WIDTH = 64
# Number conv layers for each block
__C.BACKBONE.VOV.NUM_CONV = 5
# Dimension of 3x3 conv for each block
# (64, 80, 96, 112) for vovnet27_slim
# (128, 160, 192, 224) for vovnet39/vovnet57
__C.BACKBONE.VOV.STAGE_DIMS = (128, 160, 192, 224)
# Dimension of 1x1 conv concat for each block
# (128, 256, 384, 512) for vovnet27_slim
# (256, 512, 768, 1024) for vovnet39/vovnet57
__C.BACKBONE.VOV.CONCAT_DIMS = (256, 512, 768, 1024)
# Use group normalization
__C.BACKBONE.VOV.USE_GN = False
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.VOV.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.VOV.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
__C.FPN = AttrDict()
# The Body of FPN to use
# (e.g., "fpn", "hrfpn")
__C.FPN.BODY = "fpn"
# Use C5 or P5 to generate P6
__C.FPN.USE_C5 = True
# Channel dimension of the FPN feature levels
__C.FPN.DIM = 256
# FPN may be used for just RPN, just object detection, or both
# E.g., "conv2"-like level
__C.FPN.LOWEST_BACKBONE_LVL = 2
# E.g., "conv5"-like level
__C.FPN.HIGHEST_BACKBONE_LVL = 5
# Use FPN for RoI transform for object detection if True
__C.FPN.MULTILEVEL_ROIS = True
# Hyperparameters for the RoI-to-FPN level mapping heuristic
__C.FPN.ROI_CANONICAL_SCALE = 224 # s0 # TODO
__C.FPN.ROI_CANONICAL_LEVEL = 4 # k0: where s0 maps to # TODO
# Coarsest level of the FPN pyramid
__C.FPN.ROI_MAX_LEVEL = 5
# Finest level of the FPN pyramid
__C.FPN.ROI_MIN_LEVEL = 2
# Use FPN for RPN if True
__C.FPN.MULTILEVEL_RPN = True
# Coarsest level of the FPN pyramid
__C.FPN.RPN_MAX_LEVEL = 6
# Finest level of the FPN pyramid
__C.FPN.RPN_MIN_LEVEL = 2
# Use extra FPN levels, as done in the RetinaNet paper
__C.FPN.EXTRA_CONV_LEVELS = False
# Use FPN Lite (dwconv) to replace standard FPN
__C.FPN.USE_LITE = False
# Use BatchNorm in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_BN = False
# Use GroupNorm in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_GN = False
# Use Weight Standardization in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_WS = False
# ---------------------------------------------------------------------------- #
# FPN hrfpn body options
# ---------------------------------------------------------------------------- #
__C.FPN.HRFPN = AttrDict()
# Channel dimension of the HRFPN feature levels
__C.FPN.HRFPN.DIM = 256
# Pooling type in HRFPN for down-sampling
__C.FPN.HRFPN.POOLING_TYPE = 'AVG'
# Number of extra pooling layer in HRFPN for down-sampling
__C.FPN.HRFPN.NUM_EXTRA_POOLING = 1
# Use HRFPN Lite (dwconv) to replace standard HRFPN
__C.FPN.HRFPN.USE_LITE = False
# Use BatchNorm in the HRFPN-specific layers
__C.FPN.HRFPN.USE_BN = False
# Use GroupNorm in the HRFPN-specific layers
__C.FPN.HRFPN.USE_GN = False
# ---------------------------------------------------------------------------- #
# Semantic Segmentation options ("SEMSEG" means Semantic Segmentation)
# ---------------------------------------------------------------------------- #
__C.SEMSEG = AttrDict()
# The head of Semantic R-CNN to use
__C.SEMSEG.ROI_SEMSEG_HEAD = 'fused_head'
# Output module of Semantic R-CNN head
__C.SEMSEG.ROI_SEMSEG_OUTPUT = 'semseg_output'
# Multi-task loss weight for Semantic
__C.SEMSEG.SEMSEG_LOSS_WEIGHT = 0.2
# The ignore label
__C.SEMSEG.SEMSEG_IGNORE_LABEL = 255
# The number of Semantic
__C.SEMSEG.SEMSEG_NUM_CLASSES = 183
# ---------------------------------------------------------------------------- #
# Semantic R-CNN semantic head options
# ---------------------------------------------------------------------------- #
__C.SEMSEG.SEMSEG_HEAD = AttrDict()
# (p2, p3, p4, p5), 2 means resize all stages like p3
__C.SEMSEG.SEMSEG_HEAD.FUSION_LEVEL = 2
# Number of Conv layers in the semantic head
__C.SEMSEG.SEMSEG_HEAD.NUM_CONVS = 4
# Number of input feature stage in the semantic head
__C.SEMSEG.SEMSEG_HEAD.NUM_IN_STAGE = 5
# Hidden Conv layer dimension
__C.SEMSEG.SEMSEG_HEAD.CONV_DIM = 256
# Use BatchNorm in the semantic head
__C.SEMSEG.SEMSEG_HEAD.USE_BN = False
# Use GroupNorm in the semantic head
__C.SEMSEG.SEMSEG_HEAD.USE_GN = False
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
__C.RPN = AttrDict()
# Indicates the model's computation terminates with the production of RPN
# proposals (i.e., it outputs proposals ONLY, no actual object detections)
__C.RPN.RPN_ONLY = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
__C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
__C.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
__C.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
__C.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
__C.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
__C.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
__C.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
__C.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
__C.RPN.PRE_NMS_TOP_N_TRAIN = 12000
__C.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
__C.RPN.POST_NMS_TOP_N_TRAIN = 2000
__C.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
__C.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
__C.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
__C.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
__C.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Apply the post NMS per batch (default) or per image during training
# (default is True to be consistent with Detectron, see Issue #672)
__C.RPN.FPN_POST_NMS_PER_BATCH = True
# Custom rpn head, empty to use default conv or separable conv
__C.RPN.RPN_HEAD = "SingleConvRPNHead" # TODO
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
__C.RPN.SMOOTH_L1_BETA = 1.0 / 9
# ---------------------------------------------------------------------------- #
# Fast R-CNN options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN = AttrDict()
# The head of Fast R-CNN to use
# (e.g., "roi_2mlp_head", "roi_convx_head")
__C.FAST_RCNN.ROI_BOX_HEAD = "roi_2mlp_head"
# Output module of Fast R-CNN head
__C.FAST_RCNN.ROI_BOX_OUTPUT = "box_output"
# RoI transformation function (e.g., ROIPool or ROIAlign or ROIAlignV2)
__C.FAST_RCNN.ROI_XFORM_METHOD = 'ROIAlign'
# Number of grid sampling points in ROIAlign (usually use 2)
# Only applies to ROIAlign
__C.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transform output resolution
# Note: some models may have constraints on what they can use, e.g. they use
# pretrained FC layers like in VGG16, and will ignore this option
__C.FAST_RCNN.ROI_XFORM_RESOLUTION = (14, 14)
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
__C.FAST_RCNN.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
__C.FAST_RCNN.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
__C.FAST_RCNN.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 2 * 8 = 8192
__C.FAST_RCNN.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
__C.FAST_RCNN.POSITIVE_FRACTION = 0.25
# Use a class agnostic bounding box regressor instead of the default per-class
# regressor
__C.FAST_RCNN.CLS_AGNOSTIC_BBOX_REG = False
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
__C.FAST_RCNN.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.FAST_RCNN.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
__C.FAST_RCNN.DETECTIONS_PER_IMG = 100
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
__C.FAST_RCNN.SMOOTH_L1_BETA = 1
# Classifier branch switch
__C.FAST_RCNN.CLS_ON = True
# Box regression branch switch
__C.FAST_RCNN.REG_ON = True
# ---------------------------------------------------------------------------- #
# Fast R-CNN mlp head options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN.MLP_HEAD = AttrDict()
# Hidden layer dimension when using an MLP for the RoI box head
__C.FAST_RCNN.MLP_HEAD.MLP_DIM = 1024
# Use BatchNorm in the Fast R-CNN mlp head
__C.FAST_RCNN.MLP_HEAD.USE_BN = False
# Use GroupNorm in the Fast R-CNN mlp head
__C.FAST_RCNN.MLP_HEAD.USE_GN = False
# Use Weight Standardization in the Fast R-CNN mlp head
__C.FAST_RCNN.MLP_HEAD.USE_WS = False
# ---------------------------------------------------------------------------- #
# Fast R-CNN convfc head options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN.CONVFC_HEAD = AttrDict()
# Dilation
__C.FAST_RCNN.CONVFC_HEAD.DILATION = 1
# Hidden Conv layer dimension when using Convs for the RoI box head
__C.FAST_RCNN.CONVFC_HEAD.CONV_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.FAST_RCNN.CONVFC_HEAD.NUM_STACKED_CONVS = 4
# Hidden layer dimension when using an MLP for the RoI box head
__C.FAST_RCNN.CONVFC_HEAD.MLP_DIM = 1024
# Use Fast R-CNN Lite (dwconv) to replace standard Fast R-CNN
__C.FAST_RCNN.CONVFC_HEAD.USE_LITE = False
# Use BatchNorm in the Fast R-CNN convfc head
__C.FAST_RCNN.CONVFC_HEAD.USE_BN = False
# Use GroupNorm in the Fast R-CNN convfc head
__C.FAST_RCNN.CONVFC_HEAD.USE_GN = False
# Use Weight Standardization in the Fast R-CNN convfc head
__C.FAST_RCNN.CONVFC_HEAD.USE_WS = False
# ---------------------------------------------------------------------------- #
# Cascade R-CNN options
# ---------------------------------------------------------------------------- #
__C.CASCADE_RCNN = AttrDict()
# The head of Cascade R-CNN to use
# (e.g., "roi_2mlp_head", "roi_convx_head")
__C.CASCADE_RCNN.ROI_BOX_HEAD = "roi_2mlp_head"
# Output module of Cascade R-CNN head
__C.CASCADE_RCNN.ROI_BOX_OUTPUT = "box_output"
# Number stages of Cascade R-CNN to use
__C.CASCADE_RCNN.NUM_STAGE = 3
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
__C.CASCADE_RCNN.FG_IOU_THRESHOLD = [0.5, 0.6, 0.7]
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
__C.CASCADE_RCNN.BG_IOU_THRESHOLD = [0.5, 0.6, 0.7]
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
__C.CASCADE_RCNN.BBOX_REG_WEIGHTS = ((10., 10., 5., 5.), (20., 20., 10., 10.),
(30., 30., 15., 15.))
# Weights for cascade stages
__C.CASCADE_RCNN.STAGE_WEIGHTS = (1.0, 0.5, 0.25)
# Stage id for testing
__C.CASCADE_RCNN.TEST_STAGE = 3
# Use ensemble results for testing
__C.CASCADE_RCNN.TEST_ENSEMBLE = True
# ---------------------------------------------------------------------------- #
# Mask R-CNN options ("MRCNN" means Mask R-CNN)
# ---------------------------------------------------------------------------- #
__C.MRCNN = AttrDict()
# The head of Mask R-CNN to use
# (e.g., "roi_convx_head")
__C.MRCNN.ROI_MASK_HEAD = "roi_convx_head"
# Output module of Mask R-CNN head
__C.MRCNN.ROI_MASK_OUTPUT = "mask_deconv_output"
# RoI transformation function and associated options
__C.MRCNN.ROI_XFORM_METHOD = 'ROIAlign'
# Mask roi size per image (roi_batch_size = roi_size_per_img * img_per_gpu when using across-sample strategy)
__C.MRCNN.ROI_SIZE_PER_IMG = -1
# Sample the positive box across batch per GPU
__C.MRCNN.ACROSS_SAMPLE = False
# RoI strides for Mask R-CNN head to use
__C.MRCNN.ROI_STRIDES = []
# Number of grid sampling points in ROIAlign (usually use 2)
# Only applies to ROIAlign
__C.MRCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transformation function (e.g., ROIPool or ROIAlign)
__C.MRCNN.ROI_XFORM_RESOLUTION = (14, 14)
# Resolution of mask predictions
__C.MRCNN.RESOLUTION = (28, 28)
# Whether or not resize and translate masks to the input image.
__C.MRCNN.POSTPROCESS_MASKS = False # TODO
__C.MRCNN.POSTPROCESS_MASKS_THRESHOLD = 0.5 # TODO
# Multi-task loss weight to use for Mask R-CNN head
__C.MRCNN.LOSS_WEIGHT = 1.0
# Use Mask IoU for mask head
__C.MRCNN.MASKIOU_ON = False
# ---------------------------------------------------------------------------- #
# Mask R-CNN convx head options
# ---------------------------------------------------------------------------- #
__C.MRCNN.CONVX_HEAD = AttrDict()
# Hidden Conv layer dimension
__C.MRCNN.CONVX_HEAD.CONV_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.MRCNN.CONVX_HEAD.NUM_STACKED_CONVS = 4
# Use dilated convolution in the mask head
__C.MRCNN.CONVX_HEAD.DILATION = 1
# Use Mask R-CNN Lite (dwconv) to replace standard Mask R-CNN
__C.MRCNN.CONVX_HEAD.USE_LITE = False
# Use BatchNorm in the Mask R-CNN convx head
__C.MRCNN.CONVX_HEAD.USE_BN = False
# Use GroupNorm in the Mask R-CNN convx head
__C.MRCNN.CONVX_HEAD.USE_GN = False
# Use Weight Standardization in the Mask R-CNN convx head
__C.MRCNN.CONVX_HEAD.USE_WS = False
# ---------------------------------------------------------------------------- #
# Mask IoU options
# ---------------------------------------------------------------------------- #
__C.MRCNN.MASKIOU = AttrDict()
# The head of Mask IoU to use
# (e.g., "convx_head")
__C.MRCNN.MASKIOU.MASKIOU_HEAD = "convx_head"
# Output module of Mask IoU head
__C.MRCNN.MASKIOU.MASKIOU_OUTPUT = "linear_output"
# Hidden Conv layer dimension of Mask IoU head
__C.MRCNN.MASKIOU.CONV_DIM = 256
# Hidden MLP layer dimension of Mask IoU head
__C.MRCNN.MASKIOU.MLP_DIM = 1024
# Loss weight for Mask IoU head
__C.MRCNN.MASKIOU.LOSS_WEIGHT = 1.0
# ---------------------------------------------------------------------------- #
# hier R-CNN options ("HRCNN" = Mask R-CNN with Hier support)
# ---------------------------------------------------------------------------- #
__C.HRCNN = AttrDict()
# The head of hier R-CNN to use
# (e.g., "roi_convx_head")
__C.HRCNN.ROI_HIER_HEAD = "roi_convx_head"
# Output module of hier R-CNN head
__C.HRCNN.ROI_HIER_OUTPUT = "hier_output"
# RoI transformation function and associated options
__C.HRCNN.ROI_XFORM_METHOD = 'ROIAlign'
# Sample the positive box across batch per GPU ### TODO
__C.HRCNN.ACROSS_SAMPLE = False
# Hier roi size per image (roi_batch_size = roi_size_per_img * img_per_gpu when using across-sample strategy)
__C.HRCNN.ROI_SIZE_PER_IMG = -1
# RoI strides for Hier R-CNN head to use
__C.HRCNN.ROI_STRIDES = []
# Number of grid sampling points in RoIAlign (usually use 2)
# Only applies to RoIAlign
__C.HRCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transformation function (e.g., RoIPool or RoIAlign)
__C.HRCNN.ROI_XFORM_RESOLUTION = (14, 14)
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
__C.HRCNN.FG_IOU_THRESHOLD = 0.7
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
__C.HRCNN.BG_IOU_THRESHOLD = 0.7
# Inference cls score threshold, anchors with score > INFERENCE_TH are
# considered for inference
__C.HRCNN.INFERENCE_TH = 0.05
# NMS threshold used in Hier
__C.HRCNN.NMS_TH = 0.6
# During inference, #locs to select based on cls score before NMS is performed
# per FPN level
__C.HRCNN.PRE_NMS_TOP_N = 1000
# Number of detections per image
__C.HRCNN.DETECTIONS_PER_IMG = 100
# Number of hier in the dataset
__C.HRCNN.NUM_CLASSES = -1
# Focal loss parameter: alpha
__C.HRCNN.LOSS_ALPHA = 0.25
# Focal loss parameter: gamma
__C.HRCNN.LOSS_GAMMA = 2.0
# Multi-task loss weight to use for hier head
__C.HRCNN.LOSS_WEIGHT = 1.0
# Prior prob for the positives at the beginning of training. This is used to set
# the bias init for the logits layer
__C.HRCNN.PRIOR_PROB = 0.01
# Loc loss type, it can be 'iou', 'liou' and 'giou'
__C.HRCNN.LOC_LOSS_TYPE = 'giou'
# Normalizing the regression targets with FPN strides
__C.HRCNN.NORM_REG_TARGETS = True
# Positioning centerness on the regress branch.
__C.HRCNN.CENTERNESS_ON_REG = True
# Use center sample in the hier head
__C.HRCNN.CENTER_SAMPLE = True
# Center sample radius in the hier head
__C.HRCNN.POS_RADIUS = 1.5
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
__C.HRCNN.OUTPUT_NUM_CONVS = 2
# Hidden Conv layer dimension
__C.HRCNN.OUTPUT_CONV_DIM = 256
# Use hier output Lite (dwconv) to replace standard hier output
__C.HRCNN.OUTPUT_USE_LITE = False
# Use BatchNorm in the hier output
__C.HRCNN.OUTPUT_USE_BN = False
# Use GroupNorm in the hier output
__C.HRCNN.OUTPUT_USE_GN = True
# Use dcn in the last layer of towers
__C.HRCNN.OUTPUT_USE_DCN = False
# Eval hier
__C.HRCNN.EVAL_HIER = True
# # considered for hier inference
__C.HRCNN.HIER_TH = 0.2
# Limit hands and feet
__C.HRCNN.LIMIT_TYPE = 'hand_and_foot'
# ---------------------------------------------------------------------------- #
# hier R-CNN convx head options
# ---------------------------------------------------------------------------- #
__C.HRCNN.CONVX_HEAD = AttrDict()
# Hidden Conv layer dimension
__C.HRCNN.CONVX_HEAD.CONV_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.HRCNN.CONVX_HEAD.NUM_STACKED_CONVS = 4
# Use dilated convolution in the mask head
__C.HRCNN.CONVX_HEAD.DILATION = 1
# Use hier R-CNN Lite (dwconv) to replace standard hier R-CNN
__C.HRCNN.CONVX_HEAD.USE_LITE = False
# Use BatchNorm in the Keyoint R-CNN convx head
__C.HRCNN.CONVX_HEAD.USE_BN = False
# Use GroupNorm in the Keyoint R-CNN convx head
__C.HRCNN.CONVX_HEAD.USE_GN = False
# ---------------------------------------------------------------------------- #
# hier R-CNN gce head options
# ---------------------------------------------------------------------------- #
__C.HRCNN.GCE_HEAD = AttrDict()
# Hidden Conv layer dimension
__C.HRCNN.GCE_HEAD.CONV_DIM = 512
# Dimension for ASPPV3
__C.HRCNN.GCE_HEAD.ASPPV3_DIM = 256
# Dilation for ASPPV3
__C.HRCNN.GCE_HEAD.ASPPV3_DILATION = (6, 12, 18)
# Number of stacked Conv layers in GCE head before ASPPV3
__C.HRCNN.GCE_HEAD.NUM_CONVS_BEFORE_ASPPV3 = 0
# Number of stacked Conv layers in GCE head after ASPPV3
__C.HRCNN.GCE_HEAD.NUM_CONVS_AFTER_ASPPV3 = 0
# Use NonLocal in the hier R-CNN gce head
__C.HRCNN.GCE_HEAD.USE_NL = False
# Reduction ration of nonlocal
__C.HRCNN.GCE_HEAD.NL_RATIO = 1.0
# Use BatchNorm in the hier R-CNN gce head
__C.HRCNN.GCE_HEAD.USE_BN = False
# Use GroupNorm in the hier R-CNN gce head
__C.HRCNN.GCE_HEAD.USE_GN = False
# ---------------------------------------------------------------------------- #
# Visualization options
# ---------------------------------------------------------------------------- #
__C.VIS = AttrDict()
# Dump detection visualizations
__C.VIS.ENABLED = False
# Score threshold for visualization
__C.VIS.VIS_TH = 0.9
# ---------------------------------------------------------------------------- #
# Show box options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_BOX = AttrDict()
# Visualizing detection bboxes
__C.VIS.SHOW_BOX.ENABLED = True
# Visualization color scheme
# 'green', 'category' or 'instance'
__C.VIS.SHOW_BOX.COLOR_SCHEME = 'green'
# Color map, 'COCO81', 'VOC21', 'ADE151', 'LIP20', 'MHP59'
__C.VIS.SHOW_BOX.COLORMAP = 'COCO81'
# Border thick
__C.VIS.SHOW_BOX.BORDER_THICK = 2
# ---------------------------------------------------------------------------- #
# Show class options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_CLASS = AttrDict()
# Visualizing detection classes
__C.VIS.SHOW_CLASS.ENABLED = True
# Default: gray
__C.VIS.SHOW_CLASS.COLOR = (218, 227, 218)
# Font scale of class string
__C.VIS.SHOW_CLASS.FONT_SCALE = 0.45
# ---------------------------------------------------------------------------- #
# Show segmentation options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_SEGMS = AttrDict()
# Visualizing detection classes
__C.VIS.SHOW_SEGMS.ENABLED = True
# Whether show mask
__C.VIS.SHOW_SEGMS.SHOW_MASK = True
# False = (255, 255, 255) = white
__C.VIS.SHOW_SEGMS.MASK_COLOR_FOLLOW_BOX = True
# Mask ahpha
__C.VIS.SHOW_SEGMS.MASK_ALPHA = 0.4
# Whether show border
__C.VIS.SHOW_SEGMS.SHOW_BORDER = True
# Border color, (255, 255, 255) for white, (0, 0, 0) for black
__C.VIS.SHOW_SEGMS.BORDER_COLOR = (255, 255, 255)
# Border thick
__C.VIS.SHOW_SEGMS.BORDER_THICK = 2
# ---------------------------------------------------------------------------- #
# Show hier options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_HIER = AttrDict()
# Visualizing detection classes
__C.VIS.SHOW_HIER.ENABLED = True
# Border thick
__C.VIS.SHOW_HIER.BORDER_THICK = 2
# ---------------------------------------------------------------------------- #
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
# ---------------------------------------------------------------------------- #
_DEPCRECATED_KEYS = set()
# ---------------------------------------------------------------------------- #
# Renamed options
# If you rename a config option, record the mapping from the old name to the new
# name in the dictionary below. Optionally, if the type also changed, you can
# make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
# ---------------------------------------------------------------------------- #
_RENAMED_KEYS = {
'EXAMPLE.RENAMED.KEY': 'EXAMPLE.KEY', # Dummy example to follow
'PIXEL_MEAN': 'PIXEL_MEANS',
'PIXEL_STD': 'PIXEL_STDS',
}
def assert_and_infer_cfg(make_immutable=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg settings
during script execution (which can lead to hard to debug errors or code
that's harder to understand than is necessary).
"""
if make_immutable:
cfg.immutable(True)
def merge_cfg_from_file(cfg_filename):
"""Load a yaml config file and merge it into the global config."""
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if _key_is_deprecated(full_key):
continue
if _key_is_renamed(full_key):
_raise_key_rename_error(full_key)
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, d[subkey], subkey, full_key
)
d[subkey] = value
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, str):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
def _key_is_deprecated(full_key):
if full_key in _DEPCRECATED_KEYS:
return True
return False
def _key_is_renamed(full_key):
return full_key in _RENAMED_KEYS
def _raise_key_rename_error(full_key):
new_key = _RENAMED_KEYS[full_key]
if isinstance(new_key, tuple):
msg = ' Note: ' + new_key[1]
new_key = new_key[0]
else:
msg = ''
raise KeyError(
'Key {} was renamed to {}; please update your config.{}'.
format(full_key, new_key, msg)
)
|
python
|
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
def rules_clojure_dependencies():
jvm_maven_import_external(
name = "org_clojure",
artifact = "org.clojure:clojure:1.10.1",
artifact_sha256 = "d4f6f991fd9ed2a59e7ea4779010b3b069a2b905f3463136c42201106b4ad21a",
server_urls = ["https://repo1.maven.org/maven2/"],
)
jvm_maven_import_external(
name = "org_clojure_spec_alpha",
artifact = "org.clojure:spec.alpha:0.2.176",
artifact_sha256 = "fc4e96ecff34ddd2ab7fd050e74ae1379342ee09daa6028da52024c5de836cc4",
server_urls = ["https://repo1.maven.org/maven2/"],
)
jvm_maven_import_external(
name = "org_clojure_core_specs_alpha",
artifact = "org.clojure:core.specs.alpha:0.2.44",
artifact_sha256 = "3b1ec4d6f0e8e41bf76842709083beb3b56adf3c82f9a4f174c3da74774b381c",
server_urls = ["https://repo1.maven.org/maven2/"],
)
def rules_clojure_toolchains():
native.register_toolchains("@rules_clojure//:clojure_toolchain")
|
python
|
from django.apps import AppConfig
class ApacheKafkaConfig(AppConfig):
name = 'apache_kafka'
|
python
|
import click
from neobox.cmd.list import list_
from neobox.cmd.clear_cache import clear_cache
from neobox.cmd.login import login
from neobox.cmd.logout import logout
from neobox.cmd.search import search
from neobox.cmd.play import play
from neobox.cmd.pause import pause
from neobox.cmd.stop import stop
@click.group()
def neobox():
""" neobox ๆฏไธไธช็ฝๆไบ้ณไน็ๅฝไปค่กๅฎขๆท็ซฏ
"""
pass
neobox.add_command(login)
neobox.add_command(logout)
neobox.add_command(list_)
neobox.add_command(search)
neobox.add_command(play)
neobox.add_command(pause)
neobox.add_command(stop)
neobox.add_command(clear_cache)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author : yasin
# @time : 18-12-28 ไธๅ9:28
# @File : config.py
import logging.config
logging.config.fileConfig("logger.conf")
logger = logging.getLogger("novel-update-monitor-account")
bindIp = '0.0.0.0'
bindPort = 12126
token = 'VqmB965wOEBrPLNoMkHCfIOpxF0WWFM6'
getTokenUlr = 'https://gitlab.net.cn/wechat/token?type=access_token&secret=c4ca4238a0b923820dcc509a6f75849b'
baseNotifyUrl = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token='
slientMode = True
slientModeStartTime = 23
slientModeEndTime = 7
notificationQueue = []
notice = {
"touser": "oQHU46Djs5O3yhsTmYGvDz_Hi0vo",
"template_id": "oKa0UsZ6xvSlnFChlGGdMMH1O_yq2l91G-sIQPRg2BI",
"url": "",
"topcolor": "#FF0000",
"data": {
"first": {
"value": "ๆจ่ฎข้
็ๅฐ่ฏดๆดๆฐๅฆ๏ผ",
"color": "#173177"
},
"novelName": {
"value": "",
"color": "#173177"
},
"sectionName": {
"value": "",
"color": "#173177"
},
"updateTime": {
"value": "",
"color": "#173177"
},
"remark": {
"value": "็นๅป่ฏฆๆ
็ซๅป้
่ฏปๆๆฐ็ซ ่โโโ",
"color": "#173177"
}
}
}
|
python
|
"""User profile model"""
# Django
from django.db import models
# Utilities
from mydea.utils.models import MyDeaModel
class Profile(MyDeaModel):
"""Profile model.
A profile holds a user's data"""
# user
user = models.OneToOneField('users.User', on_delete=models.CASCADE)
def __str__(self):
"""Return user's str representation."""
return str(self.user)
|
python
|
#----------------------------------------------------------------------
# Deep learning for classification for contrast CT;
# Transfer learning using Google Inception V3;
#-------------------------------------------------------------------------------------------
import os
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
import glob
import tensorflow
from tensorflow import keras
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array, load_img, ImageDataGenerator
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications import ResNet152
from tensorflow.keras.applications import ResNet101
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras.applications import ResNet101V2
from tensorflow.keras.applications import ResNet152V2
# ----------------------------------------------------------------------------------
# transfer learning CNN model
# ----------------------------------------------------------------------------------
def TLNet(resnet, input_shape, activation):
### determine ResNet base model
if resnet == 'ResNet50V2':
base_model = ResNet50V2(
weights='imagenet',
include_top=False,
input_shape=input_shape,
pooling=None
)
elif resnet == 'ResNet101V2':
base_model = ResNet101V2(
weights='imagenet',
include_top=False,
input_shape=input_shape,
pooling=None
)
elif resnet == 'ResNet152V2':
base_model = ResNet152V2(
weights='imagenet',
include_top=False,
input_shape=input_shape,
pooling=None
)
base_model.trainable = False
### create top model
inputs = Input(shape=input_shape)
x = base_model(inputs, training=False)
x = GlobalAveragePooling2D()(x)
x = Dense(1000, activation='relu')(x)
#x = Dense(1024, activation='relu')(x)
#x = Dense(512, activation='relu')(x)
outputs = Dense(1, activation=activation)(x)
model = Model(inputs, outputs)
return model
|
python
|
import os
from time import time
import krgram.tl.protocol
import krgram.tl.protocol.auth
from krgram.client.crypto import TLEncryptor
from krgram.client.errors import SecurityError
from krgram.mtproto.connection import MTProtoAbridgedConnection
from krgram.mtproto.dcs import DataCenters
from krgram.mtproto.errors import UnexpectedResponseError
from krgram.mtproto.message import PlainMsg
from krgram.mtproto.msg_extra import MsgId
from krgram.mtproto.servers_pk import TelegramServersPublicKeys
from krgram.tl.base import *
from krgram.tl.core_types.native_extends import TL_int128, TL_int256
from krgram.tl.stream import TLBytesStream
from krgram.utils.cryptohash import Hash, Crypto
from krgram.utils.math import factorize
class AuthKey:
def __init__(self, data):
self.data = data
self.key_id = None
self._calculate_id()
def get_id(self):
return self.key_id
def _calculate_id(self):
if self.data is not None:
self.key_id = Hash.sha1(self.data)[-8:]
else:
self.key_id = Bytes('\x00'*8)
class Authorizer:
_NULL_AUTH_KEY = AuthKey(None)
def __init__(self, dc, test_mode=False, connection=None ):
self.dc = dc
self.test_mode = test_mode
self._connection = connection
self._server_salt = None
self._server_time_diff = -1
self._auth_key = None
def get_auth_key(self):
return self._auth_key
def get_server_salt(self):
return self._server_salt
def get_server_time_diff(self):
return self._server_time_diff
def run(self):
conn = self._connection
autoclose = False
if conn is None:
conn = self._init_connection(self.dc, self.test_mode)
autoclose = True
raw_nonce = os.urandom(16)
obj = krgram.tl.protocol.auth.req_pq(nonce=raw_nonce)
nonce = obj.nonce#["nonce"]
resp_tl_obj = self._send_plain_req(conn, obj)
#resp_tl_obj = msg.get_content()
if resp_tl_obj.ID != krgram.tl.protocol.auth.resPQ.ID:
raise UnexpectedResponseError("Expected a resPQ object")
if resp_tl_obj.nonce != obj.nonce:
raise SecurityError("nonce != (server)nonce")
server_nonce, pub_srvs_fingerprints = resp_tl_obj.server_nonce, resp_tl_obj.server_public_key_fingerprints
# check fingerprint
curr_key = None
for f in pub_srvs_fingerprints:
cpk = TelegramServersPublicKeys().get_key_by_fingerprint(f)
if cpk is not None:
curr_key = cpk
break
# compute p and q
pq = resp_tl_obj.pq#["pq"]
p, q = factorize(pq)
p, q = (p, q) if p < q else (q, p)
new_nonce = os.urandom(32)
obj = krgram.tl.protocol.auth.p_q_inner_data(p=p,
q=q,
pq=pq,
server_nonce=server_nonce,
nonce=nonce,
new_nonce=new_nonce)
pq_inner_data_serialized = obj.serialize()
data_with_hash = Hash.sha1(pq_inner_data_serialized) + pq_inner_data_serialized
if len(data_with_hash) < 255:
data_with_hash += Bytes('\0' * (255 - len(data_with_hash)))
# encrypt with rsa and send data
enc_data = Crypto.rsa_encrypt(data_with_hash, curr_key)
obj = krgram.tl.protocol.auth.req_DH_params(nonce=nonce,
p=p,
q=q,
server_nonce=server_nonce,
public_key_fingerprint=curr_key.fingerprint,
encrypted_data=enc_data)
resp_tl_obj = self._send_plain_req(conn, obj)
#resp_tl_obj = msg.get_content()
if resp_tl_obj.nonce != nonce or resp_tl_obj.server_nonce != server_nonce:
raise SecurityError()
server_nonce_raw = TL_int128(server_nonce).serialize()
new_nonce_raw = TL_int256(new_nonce).serialize()
server_salt = new_nonce_raw[:8] ^ server_nonce_raw[:8]
server_dh = TLEncryptor.decrypt_server_dh(new_nonce_raw, server_nonce_raw, resp_tl_obj.encrypted_answer)
# TODO: check hash
# answer_hash = server_dh.data_hash
answer = server_dh.data
id_answer_class = Bytes(answer[:4]).to_int(False, False)
register_class = TLRegister.get_func_type(id_answer_class)
if register_class is None or register_class.ID != krgram.tl.protocol.auth.server_DH_inner_data.ID:
raise UnexpectedResponseError("Unexpected response type from server")
tlstream = TLBytesStream(answer)
#tlstream.write(answer[4:])
resp_tl_obj = krgram.tl.protocol.auth.server_DH_inner_data().deserialize_from(tlstream)
g_a = resp_tl_obj.g_a
server_time_diff = resp_tl_obj.server_time - int(time())
b_raw = os.urandom(256)
b = Bytes(b_raw).to_int()
g = resp_tl_obj.g
dh_prime = resp_tl_obj.dh_prime
g_b = pow(g, b, dh_prime)
retry_id = 0
data = krgram.tl.protocol.auth.client_DH_inner_data(nonce=nonce,
server_nonce=server_nonce,
retry_id=retry_id,
g_b=g_b).serialize()
enc_data = TLEncryptor.encrypt_client_dh(data, server_dh.aes_key_iv)
obj = krgram.tl.protocol.auth.set_client_DH_params(nonce=nonce,
server_nonce=server_nonce,
encrypted_data=enc_data)
resp_tl_obj = self._send_plain_req(conn, obj)
#resp_tl_obj = msg.get_content()
if resp_tl_obj.ID != krgram.tl.protocol.auth.dh_gen_ok.ID:
raise Exception("DH generation not succesfull")
auth_key = pow(g_a, b, dh_prime)
auth_key = Bytes.from_int(auth_key, 256)
if autoclose:
conn.close()
auth_key = AuthKey(auth_key)
self._server_salt = TLBaseSerializer.deserialize_long(server_salt)
self._auth_key = auth_key
self._server_time_diff = server_time_diff
def _init_connection(self, dc, test_mode):
if dc is None:
dc = DataCenters.get_default().get_datacenter(1)
conn = MTProtoAbridgedConnection()
ip = dc.production_ip if not test_mode else dc.test_ip
try:
conn.open( ip, 443 )
return conn
except:
raise Exception("Cannot open a connection on %s:%d" %(ip, 443))
def _send_plain_req(self, conn, req):
if not isinstance(req, TLFunction):
raise TypeError("obj must be an TLFunction instance")
msg_id = MsgId()()
raw_msg = PlainMsg(msg_id, req)
conn.send_message( raw_msg )
resp_msg = PlainMsg(0, None)
conn.read_message_to(resp_msg)
return resp_msg.content
|
python
|
import sqlalchemy
from pydantic import BaseModel
from .model import Base
class Category(BaseModel):
""""""
name: str
color: str
class CategoryTable(Base):
__tablename__ = "category"
name = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
color = sqlalchemy.Column(sqlalchemy.String)
def __init__(self, category: Category):
self.name = category.name
self.color = category.color
def __repr__(self):
return f"Category<{self.name}, {self.color}>"
|
python
|
#!/usr/bin/env python3
import sys
import gzip
import ast
import json
class Dumper:
def __init__(self, path):
self.path = path
self.open_file = {}
def close_all(self):
#close previous file(s)
for name in self.open_file:
self.open_file[name].close()
self.open_file = {}
def dump(self, filename, data):
if filename not in self.open_file:
self.close_all()
f = gzip.open(self.path+filename+'.jsonl.gz', 'wt')
self.open_file[filename] = f
else:
f = self.open_file[filename]
f.write(data+'\n')
def readlineq(self, f):
# Read line and quit if no more data
line = f.readline()
if line == '':
self.close_all()
sys.exit(0)
else:
return line
dp = Dumper(sys.argv[2])
def get_value_tuples(line):
values = line.partition(' VALUES ')[-1].strip().replace('NULL', 'None')
if values[-1] == ';':
values = values[:-1]
return ast.literal_eval(values)
def generate_json_line(columns, data, noiter=False):
jl = {}
if noiter:
jl[columns[0]] = data
else:
for i in range(len(columns)):
jl[columns[i]] = data[i]
return json.dumps(jl, ensure_ascii=False)
with gzip.open(sys.argv[1], 'rt') as f:
# look for the beginning of the table definition
while True:
while True:
line = dp.readlineq(f)
if line.startswith('CREATE TABLE'): break #untill
table = line.split('`')[1] # name of the table
# get names and types of columns
columns = []
while True:
line = dp.readlineq(f)
if line.startswith(' `'):
columns.append(line.split('`')[1]) # = line.split('`')[2].split(' ')[1]
else: break
# look for the beginning of the data
while True:
line = dp.readlineq(f)
if line.startswith('INSERT INTO'): break
while line.startswith('INSERT INTO'):
if line.split('`')[1] == table: # check if the INSERT is for the correct table
data = get_value_tuples(line)
if isinstance(data, str) or isinstance(data, int) or isinstance(data, float):
# Case of a table with a single value
dp.dump(table, generate_json_line(columns, data, noiter=True))
else:
for i in data:
dp.dump(table, generate_json_line(columns, i))
line = dp.readlineq(f)
|
python
|
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gym
import numpy as np
from smarts.core.controllers import ActionSpaceType
# The space of the adapted action.
gym_space: gym.Space = gym.spaces.Box(
low=np.array([0.0, 0.0, -1.0]),
high=np.array([1.0, 1.0, 1.0]),
dtype=np.float32,
)
# This adapter reqiures SMARTS to ensure that the agent is provided a "continuous"
# controller, that is, a controller that allows for actions in the form of an array:
# [throttle, brake, steering].
required_interface = {"action": ActionSpaceType.Continuous}
def adapt(action: np.ndarray) -> np.ndarray:
"""Adapts a given action into an action that SMARTS can understand for a continuous
controller. This adapter expects that the action is already a valid continuous
controller action.
Args:
action (numpy.ndarray): The action to adapt. The action should be in the form of
[throttle, brake, steering] where each element is a float. The throttle
element is in the range [0, 1], the brake element is in the range [0, 1] and
the steering element is in the range [-1, 1].
Returns:
np.ndarray: The same action that was passed in.
"""
return action
|
python
|
import os
class Config:
title = 'VocView'
# URL root of this web application. This gets set in the before_first_request function.
url_root = None
# Subdirectory of base URL. Example, the '/corveg' part of 'vocabs.tern.org.au/corveg'
SUB_URL = ''
# Path of the application's directory.
APP_DIR = os.path.dirname(os.path.realpath(__file__))
# Vocabulary sources config. file.
VOCAB_SOURCES = 'vocabs.yaml'
# Rule-based reasoner
reasoner = False
# -- Triplestore ---------------------------------------------------------------------------------------------------
#
# Options:
#
# - memory
# - No persistence, load in triples on instance start-up (slow start-up time). Graph is required to be kept in
# memory during application's lifetime. Not recommended due to slow start-up.
# - Difficulty: easy
#
# - pickle
# - Persistent store by saving a binary (pickle) copy of the Python rdflib.Graph object to disk. Graph is
# required to be in memory during application's lifetime. Fast start-up time and fast performance, uses
# significantly more memory than Sleepycat. Exact same as the memory method except it persists between
# application restarts.
# - Difficulty: easy
#
# - sleepycat
# - Persistent store by storing the triples in the now defunct Sleepycat's Berkeley DB store. Requires external
# libraries to be installed on the system before using. Does not require to have the whole triplestore in
# memory. Performance is slightly slower than the pickle method (maybe around 10-20%) but uses much less memory.
# For each request, only the required triples are loaded into the application's memory.
# - Difficulty: intermediate
triplestore_type = 'pickle'
# The time which the persistent store is valid before re-harvesting from its sources
store_hours = 0
store_minutes = 10
# Triplestore disk path
_triplestore_name_pickle = 'triplestore.p'
triplestore_path_pickle = os.path.join(APP_DIR, _triplestore_name_pickle)
_triplestore_name_sleepy_cat = 'triplestore'
triplestore_path_sleepy_cat = os.path.join(APP_DIR, _triplestore_name_sleepy_cat)
|
python
|
list1=['car','ara','cabc']
c=0
for i in list1:
a=list1
if(
c=c+1
print(c)
|
python
|
import pyeccodes.accessors as _
def load(h):
h.add(_.Unsigned('n2', 2))
h.add(_.Unsigned('n3', 2))
h.add(_.Unsigned('nd', 3))
h.alias('numberOfDiamonds', 'nd')
h.alias('Nj', 'nd')
h.add(_.Unsigned('Ni', 3))
h.add(_.Codeflag('numberingOrderOfDiamonds', 1, "grib1/grid.192.78.3.9.table"))
h.add(_.Signed('latitudeOfIcosahedronPole', 4))
h.add(_.Unsigned('longitudeOfIcosahedronPole', 4))
h.add(_.Unsigned('longitudeOfFirstDiamondCenterLine', 4))
h.add(_.Unsigned('reservedOctet', 1))
h.add(_.Codeflag('scanningModeForOneDiamond', 1, "grib1/grid.192.78.3.10.table"))
h.add(_.Transient('numberOfPoints', ((_.Get('nd') * (_.Get('Ni') + 1)) * (_.Get('Ni') + 1))))
h.alias('numberOfDataPoints', 'numberOfPoints')
h.add(_.Number_of_values('numberOfValues', _.Get('values'), _.Get('bitsPerValue'), _.Get('numberOfDataPoints'), _.Get('bitmapPresent'), _.Get('bitmap'), _.Get('numberOfCodedValues')))
|
python
|
import struct
from io import BytesIO
p8 = lambda x:struct.pack("<B", x)
u8 = lambda x:struct.unpack("<B", x)[0]
p16 = lambda x:struct.pack("<H", x)
u16 = lambda x:struct.unpack("<H", x)[0]
p32 = lambda x:struct.pack("<I", x)
u32 = lambda x:struct.unpack("<I", x)[0]
p64 = lambda x:struct.pack("<Q", x)
u64 = lambda x:struct.unpack("<Q", x)[0]
def align(addr, alignment=0x1000):
mask = ((1<<64)-1) & -alignment
return (addr + (alignment-1)) & mask
def struct2str(s):
return BytesIO(s).read()
|
python
|
from sacnn.core.we import get_word_to_vector
word_to_vector, WORD_DIMENSION = get_word_to_vector()
|
python
|
from __future__ import annotations
import collections
import copy
import json
import logging
import operator
import os
from typing import Any, Dict, List, Optional, Tuple
import joblib
from poker_ai import utils
from poker_ai.poker.card import Card
from poker_ai.poker.engine import PokerEngine
from poker_ai.games.short_deck.player import ShortDeckPokerPlayer
from poker_ai.poker.pot import Pot
from poker_ai.poker.table import PokerTable
logger = logging.getLogger("poker_ai.games.short_deck.state")
InfoSetLookupTable = Dict[str, Dict[Tuple[int, ...], str]]
def new_game(
n_players: int, card_info_lut: InfoSetLookupTable = {}, **kwargs
) -> ShortDeckPokerState:
"""
Create a new game of short deck poker.
...
Parameters
----------
n_players : int
Number of players.
card_info_lut : InfoSetLookupTable
Card information cluster lookup table.
Returns
-------
state : ShortDeckPokerState
Current state of the game
"""
pot = Pot()
players = [
ShortDeckPokerPlayer(player_i=player_i, initial_chips=10000, pot=pot)
for player_i in range(n_players)
]
if card_info_lut:
# Don't reload massive files, it takes ages.
state = ShortDeckPokerState(
players=players,
load_card_lut=False,
**kwargs
)
state.card_info_lut = card_info_lut
else:
# Load massive files.
state = ShortDeckPokerState(
players=players,
**kwargs
)
return state
class ShortDeckPokerState:
"""The state of a Short Deck Poker game at some given point in time.
The class is immutable and new state can be instanciated from once an
action is applied via the `ShortDeckPokerState.new_state` method.
"""
def __init__(
self,
players: List[ShortDeckPokerPlayer],
small_blind: int = 50,
big_blind: int = 100,
lut_path: str = ".",
pickle_dir: bool = False,
load_card_lut: bool = True,
):
"""Initialise state."""
n_players = len(players)
if n_players <= 1:
raise ValueError(
f"At least 2 players must be provided but only {n_players} "
f"were provided."
)
self._pickle_dir = pickle_dir
if load_card_lut:
self.card_info_lut = self.load_card_lut(lut_path, self._pickle_dir)
else:
self.card_info_lut = {}
# Get a reference of the pot from the first player.
self._table = PokerTable(
players=players, pot=players[0].pot, include_ranks=[10, 11, 12, 13, 14]
)
# Get a reference of the initial number of chips for the payout.
self._initial_n_chips = players[0].n_chips
self.small_blind = small_blind
self.big_blind = big_blind
self._poker_engine = PokerEngine(
table=self._table, small_blind=small_blind, big_blind=big_blind
)
# Reset the pot, assign betting order to players (might need to remove
# this), assign blinds to the players.
self._poker_engine.round_setup()
# Deal private cards to players.
self._table.dealer.deal_private_cards(self._table.players)
# Store the actions as they come in here.
self._history: Dict[str, List[str]] = collections.defaultdict(list)
self._betting_stage = "pre_flop"
self._betting_stage_to_round: Dict[str, int] = {
"pre_flop": 0,
"flop": 1,
"turn": 2,
"river": 3,
"show_down": 4,
}
# Rotate the big and small blind to the final positions for the pre
# flop round only.
player_i_order: List[int] = [p_i for p_i in range(n_players)]
self.players[0].is_small_blind = True
self.players[1].is_big_blind = True
self.players[-1].is_dealer = True
self._player_i_lut: Dict[str, List[int]] = {
"pre_flop": player_i_order[2:] + player_i_order[:2],
"flop": player_i_order,
"turn": player_i_order,
"river": player_i_order,
"show_down": player_i_order,
"terminal": player_i_order,
}
self._skip_counter = 0
self._first_move_of_current_round = True
self._reset_betting_round_state()
for player in self.players:
player.is_turn = False
self.current_player.is_turn = True
def __repr__(self):
"""Return a helpful description of object in strings and debugger."""
return f"<ShortDeckPokerState player_i={self.player_i} betting_stage={self._betting_stage}>"
def apply_action(self, action_str: Optional[str]) -> ShortDeckPokerState:
"""Create a new state after applying an action.
Parameters
----------
action_str : str or None
The description of the action the current player is making. Can be
any of {"fold, "call", "raise"}, the latter two only being possible
if the agent hasn't folded already.
Returns
-------
new_state : ShortDeckPokerState
A poker state instance that represents the game in the next
timestep, after the action has been applied.
"""
if action_str not in self.legal_actions:
raise ValueError(
f"Action '{action_str}' not in legal actions: " f"{self.legal_actions}"
)
# Deep copy the parts of state that are needed that must be immutable
# from state to state.
lut = self.card_info_lut
self.card_info_lut = {}
new_state = copy.deepcopy(self)
new_state.card_info_lut = self.card_info_lut = lut
# An action has been made, so alas we are not in the first move of the
# current betting round.
new_state._first_move_of_current_round = False
if action_str is None:
# Assert active player has folded already.
assert (
not new_state.current_player.is_active
), "Active player cannot do nothing!"
elif action_str == "call":
action = new_state.current_player.call(players=new_state.players)
logger.debug("calling")
elif action_str == "fold":
action = new_state.current_player.fold()
elif action_str == "raise":
bet_n_chips = new_state.big_blind
if new_state._betting_stage in {"turn", "river"}:
bet_n_chips *= 2
biggest_bet = max(p.n_bet_chips for p in new_state.players)
n_chips_to_call = biggest_bet - new_state.current_player.n_bet_chips
raise_n_chips = bet_n_chips + n_chips_to_call
logger.debug(f"betting {raise_n_chips} n chips")
action = new_state.current_player.raise_to(n_chips=raise_n_chips)
new_state._n_raises += 1
else:
raise ValueError(
f"Expected action to be derived from class Action, but found "
f"type {type(action)}."
)
# Update the new state.
skip_actions = ["skip" for _ in range(new_state._skip_counter)]
new_state._history[new_state.betting_stage] += skip_actions
new_state._history[new_state.betting_stage].append(str(action))
new_state._n_actions += 1
new_state._skip_counter = 0
# Player has made move, increment the player that is next.
while True:
new_state._move_to_next_player()
# If we have finished betting, (i.e: All players have put the
# same amount of chips in), then increment the stage of
# betting.
finished_betting = not new_state._poker_engine.more_betting_needed
if finished_betting and new_state.all_players_have_actioned:
# We have done atleast one full round of betting, increment
# stage of the game.
new_state._increment_stage()
new_state._reset_betting_round_state()
new_state._first_move_of_current_round = True
if not new_state.current_player.is_active:
new_state._skip_counter += 1
assert not new_state.current_player.is_active
elif new_state.current_player.is_active:
if new_state._poker_engine.n_players_with_moves == 1:
# No players left.
new_state._betting_stage = "terminal"
if not new_state._table.community_cards:
new_state._poker_engine.table.dealer.deal_flop(new_state._table)
# Now check if the game is terminal.
if new_state._betting_stage in {"terminal", "show_down"}:
# Distribute winnings.
new_state._poker_engine.compute_winners()
break
for player in new_state.players:
player.is_turn = False
new_state.current_player.is_turn = True
return new_state
@staticmethod
def load_card_lut(
lut_path: str = ".",
pickle_dir: bool = False
) -> Dict[str, Dict[Tuple[int, ...], str]]:
"""
Load card information lookup table.
...
Parameters
----------
lut_path : str
Path to lookupkup table.
pickle_dir : bool
Whether the lut_path is a path to pickle files or not. Pickle files
are deprecated for the lut.
Returns
-------
cad_info_lut : InfoSetLookupTable
Card information cluster lookup table.
"""
if pickle_dir:
logger.info("Loading card information lut in deprecated way")
file_names = [
"preflop_lossless.pkl",
"flop_lossy_2.pkl",
"turn_lossy_2.pkl",
"river_lossy_2.pkl",
]
betting_stages = ["pre_flop", "flop", "turn", "river"]
card_info_lut: Dict[str, Dict[Tuple[int, ...], str]] = {}
for file_name, betting_stage in zip(file_names, betting_stages):
file_path = os.path.join(lut_path, file_name)
if not os.path.isfile(file_path):
raise ValueError(
f"File path not found {file_path}. Ensure lut_path is "
f"set to directory containing pickle files"
)
with open(file_path, "rb") as fp:
card_info_lut[betting_stage] = joblib.load(fp)
elif lut_path:
logger.info(f"Loading card from single file at path: {lut_path}")
card_info_lut = joblib.load(lut_path + '/card_info_lut.joblib')
else:
card_info_lut = {}
return card_info_lut
def _move_to_next_player(self):
"""Ensure state points to next valid active player."""
self._player_i_index += 1
if self._player_i_index >= len(self.players):
self._player_i_index = 0
def _reset_betting_round_state(self):
"""Reset the state related to counting types of actions."""
self._all_players_have_made_action = False
self._n_actions = 0
self._n_raises = 0
self._player_i_index = 0
self._n_players_started_round = self._poker_engine.n_active_players
while not self.current_player.is_active:
self._skip_counter += 1
self._player_i_index += 1
def _increment_stage(self):
"""Once betting has finished, increment the stage of the poker game."""
# Progress the stage of the game.
if self._betting_stage == "pre_flop":
# Progress from private cards to the flop.
self._betting_stage = "flop"
self._poker_engine.table.dealer.deal_flop(self._table)
elif self._betting_stage == "flop":
# Progress from flop to turn.
self._betting_stage = "turn"
self._poker_engine.table.dealer.deal_turn(self._table)
elif self._betting_stage == "turn":
# Progress from turn to river.
self._betting_stage = "river"
self._poker_engine.table.dealer.deal_river(self._table)
elif self._betting_stage == "river":
# Progress to the showdown.
self._betting_stage = "show_down"
elif self._betting_stage in {"show_down", "terminal"}:
pass
else:
raise ValueError(f"Unknown betting_stage: {self._betting_stage}")
@property
def community_cards(self) -> List[Card]:
"""Return all shared/public cards."""
return self._table.community_cards
@property
def private_hands(self) -> Dict[ShortDeckPokerPlayer, List[Card]]:
"""Return all private hands."""
return {p: p.cards for p in self.players}
@property
def initial_regret(self) -> Dict[str, float]:
"""Returns the default regret for this state."""
return {action: 0 for action in self.legal_actions}
@property
def initial_strategy(self) -> Dict[str, float]:
"""Returns the default strategy for this state."""
return {action: 0 for action in self.legal_actions}
@property
def betting_stage(self) -> str:
"""Return betting stage."""
return self._betting_stage
@property
def all_players_have_actioned(self) -> bool:
"""Return whether all players have made atleast one action."""
return self._n_actions >= self._n_players_started_round
@property
def n_players_started_round(self) -> bool:
"""Return n_players that started the round."""
return self._n_players_started_round
@property
def player_i(self) -> int:
"""Get the index of the players turn it is."""
return self._player_i_lut[self._betting_stage][self._player_i_index]
@player_i.setter
def player_i(self, _: Any):
"""Raise an error if player_i is set."""
raise ValueError(f"The player_i property should not be set.")
@property
def betting_round(self) -> int:
"""Betting stagee in integer form."""
try:
betting_round = self._betting_stage_to_round[self._betting_stage]
except KeyError:
raise ValueError(
f"Attemped to get betting round for stage "
f"{self._betting_stage} but was not supported in the lut with "
f"keys: {list(self._betting_stage_to_round.keys())}"
)
return betting_round
@property
def info_set(self) -> str:
"""Get the information set for the current player."""
cards = sorted(
self.current_player.cards,
key=operator.attrgetter("eval_card"),
reverse=True,
)
cards += sorted(
self._table.community_cards,
key=operator.attrgetter("eval_card"),
reverse=True,
)
if self._pickle_dir:
lookup_cards = tuple([card.eval_card for card in cards])
else:
lookup_cards = tuple(cards)
try:
cards_cluster = self.card_info_lut[self._betting_stage][lookup_cards]
except KeyError:
if self.betting_stage not in {"terminal", "show_down"}:
raise ValueError("You should have these cards in your lut.")
return "default info set, please ensure you load it correctly"
# Convert history from a dict of lists to a list of dicts as I'm
# paranoid about JSON's lack of care with insertion order.
info_set_dict = {
"cards_cluster": cards_cluster,
"history": [
{betting_stage: [str(action) for action in actions]}
for betting_stage, actions in self._history.items()
],
}
return json.dumps(
info_set_dict, separators=(",", ":"), cls=utils.io.NumpyJSONEncoder
)
@property
def payout(self) -> Dict[int, int]:
"""Return player index to payout number of chips dictionary."""
n_chips_delta = dict()
for player_i, player in enumerate(self.players):
n_chips_delta[player_i] = player.n_chips - self._initial_n_chips
return n_chips_delta
@property
def is_terminal(self) -> bool:
"""Returns whether this state is terminal or not.
The state is terminal once all rounds of betting are complete and we
are at the show down stage of the game or if all players have folded.
"""
return self._betting_stage in {"show_down", "terminal"}
@property
def players(self) -> List[ShortDeckPokerPlayer]:
"""Returns players in table."""
return self._table.players
@property
def current_player(self) -> ShortDeckPokerPlayer:
"""Returns a reference to player that makes a move for this state."""
return self._table.players[self.player_i]
@property
def legal_actions(self) -> List[Optional[str]]:
"""Return the actions that are legal for this game state."""
actions: List[Optional[str]] = []
if self.current_player.is_active:
actions += ["fold", "call"]
if self._n_raises < 3:
# In limit hold'em we can only bet/raise if there have been
# less than three raises in this round of betting, or if there
# are two players playing.
actions += ["raise"]
else:
actions += [None]
return actions
|
python
|
import json
class SwearWords(object):
def __init__(self):
self.data = json.load(open('data.json'))
def filter_words(self,text,symbol="*"):
text = text.split()
print(text)
for i in range(len(text)):
if text[i] in self.data['word']:
text[i] = symbol * len(text[i])
return " ".join(text)
|
python
|
import RPi.GPIO as GPIO
led_pin = 29
button_pin = 40
buzzer_pin = 31
GPIO.setmode(GPIO.BOARD)
GPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(led_pin, GPIO.OUT)
GPIO.setup(buzzer_pin, GPIO.OUT)
while True:
if GPIO.input(button_pin) == GPIO.HIGH:
GPIO.output(led_pin, True)
GPIO.output(buzzer_pin, True)
else:
GPIO.output(led_pin, False)
GPIO.output(buzzer_pin, False)
|
python
|
"""
MIT License
Copyright (c) 2021 isaa-ctaylor
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord.ext import commands
import asyncio
from typing import Union
class Economy(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def _register_member(self, member_id, starting_value):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT eco_enabled FROM userdata WHERE user_id = $1", member_id)
if data:
if data[0]["eco_enabled"]:
raise NameError
else:
async with self.bot.db.pool.acquire() as con:
await con.execute("UPDATE userdata SET eco_enabled = $1, wallet = $2, bank = $2", True, starting_value)
else:
async with self.bot.db.pool.acquire() as con:
await con.execute("INSERT INTO userdata(user_id, wallet, bank, eco_enabled) values($1, $2, $2, $3) ON CONFLICT (user_id) DO UPDATE SET wallet = $2, bank = $1, eco_enabled = $3 WHERE userdata.user_id = $1", member_id, starting_value, True)
async with self.bot.db.pool.acquire() as con:
return (await con.fetch("SELECT wallet, bank, eco_enabled FROM userdata WHERE user_id = $1", member_id))[0]
async def _check_registered(self, member_id):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT eco_enabled from userdata WHERE user_id = $1", member_id)
return bool(data and dict(data[0])["eco_enabled"])
@commands.command(name="register")
async def _register(self, ctx):
try:
await self._register_member(ctx.author.id, 100)
embed = discord.Embed(
title="Done!", description="I have set up a bank account for you!", colour=self.bot.good_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
except NameError:
embed = discord.Embed(
title="Error!", description="You already have an account!")
return await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="unregister")
async def _unregister(self, ctx):
if await self._check_registered(ctx.author.id):
async with self.bot.db.pool.acquire() as con:
await con.execute("UPDATE userdata SET eco_enabled = $1, wallet = $2, bank = $2 WHERE user_id = $3", False, 0, ctx.author.id)
embed = discord.Embed(
title="Done!", description=f"Sad to see you go! If you want to come back, use the `{ctx.prefix}register` command.", colour=self.bot.good_embed_colour)
else:
embed = discord.Embed(
title="Error!", description=f"You don't have an account! Use the `{ctx.prefix}register` command to make an account", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="balance", aliases=["bal"])
async def _balance(self, ctx, *, member: discord.Member = None):
member = member or ctx.author
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank, eco_enabled FROM userdata WHERE user_id = $1", member.id)
if data:
data = dict(data[0])
else:
if member.id != ctx.author.id:
embed = discord.Embed(
title="Error!", description="That person doesn't have an account!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
data = dict(await self._register_member(member.id, 100))
embed = discord.Embed(
description=f"You didnt have an account, so I made one for you", colour=self.bot.good_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
if not data["eco_enabled"]:
if member.id != ctx.author.id:
embed = discord.Embed(
title="Error!", description="That person doesn't have an account!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
data = dict(await self._register_member(member.id, 100))
embed = discord.Embed(
description=f"You didnt have an account, so I made one for you", colour=self.bot.good_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
desc_string = f"**`Wallet:`** {data['wallet'] or 0}\n**`Bank:`** {data['bank'] or '0'}\n**`Total:`** {(data['wallet'] or 0) + (data['bank'] or 0)}"
embed = discord.Embed(title=f"{member.name}'s balance",
description=desc_string, colour=self.bot.neutral_embed_colour)
embed.set_thumbnail(url=str(member.avatar.url))
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="withdraw", aliases=["with"])
async def _withdraw(self, ctx, *, amount: Union[int, str]):
if await self._check_registered(ctx.author.id):
if isinstance(amount, str):
if amount.lower() == "all":
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
newwallet = data["wallet"] + data["bank"]
newbank = 0
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"`{amount}` isn't an amount I can withdraw!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
elif isinstance(amount, int):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
if data["bank"] < amount:
embed = discord.Embed(title=f"Error!", description=f"You don't have enough coins to do that!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
newbank = data["bank"] - amount
newwallet = data["wallet"] + amount
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"You dont have an account! Use the `{ctx.prefix}register` command to make one!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="deposit", aliases=["dep"])
async def _deposit(self, ctx, *, amount: Union[int, str]):
if await self._check_registered(ctx.author.id):
if isinstance(amount, str):
if amount.lower() == "all":
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
newwallet = 0
newbank = data["wallet"] + data["bank"]
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"`{amount}` isn't an amount I can withdraw!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
elif isinstance(amount, int):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
if data["wallet"] < amount:
embed = discord.Embed(title=f"Error!", description=f"You don't have enough coins to do that!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
newbank = data["bank"] + amount
newwallet = data["wallet"] - amount
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"You dont have an account! Use the `{ctx.prefix}register` command to make one!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="transfer", aliases=["pay"])
async def _transfer(self, ctx, member: discord.Member, amount: int):
if await self._check_registered(ctx.author.id) and await self._check_registered(member.id):
if ctx.author.id == member.id and ctx.author.id != self.bot.owner_id:
return await ctx.error("You can't give yourself money.", reply=True)
if amount < 0:
return await ctx.error("Invalid amount.", reply=True)
async with self.bot.db.pool.acquire() as con:
authordata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
memberdata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", member.id)
if authordata and memberdata:
authordata = dict(authordata[0])
memberdata = dict(memberdata[0])
if amount > authordata["wallet"] and ctx.author.id != self.bot.owner_id:
embed = discord.Embed(title="Error!", description="You don't have enough coins to do that!", colour=self.bot.bad_embed_colour)
else:
newauthorwallet = authordata["wallet"]
if ctx.author.id != self.bot.owner_id:
newauthorwallet = authordata["wallet"] - amount
newmemberwallet = memberdata["wallet"] + amount
await con.execute("UPDATE userdata SET wallet = $1 WHERE user_id = $2", newauthorwallet, ctx.author.id)
await con.execute("UPDATE userdata SET wallet = $1 WHERE user_id = $2", newmemberwallet, member.id)
embed = discord.Embed(title="Done!", description=f"You successfully payed {member.mention} `{amount}` coins!", colour=self.bot.good_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
async with self.bot.db.pool.acquire() as con:
authordata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
memberdata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", member.id)
if not authordata:
embed = discord.Embed(title="Error!", description="You do not have an account!", colour=self.bot.bad_embed_colour)
else:
embed = discord.Embed(title="Error!", description=f"{member.mention} does not have an account!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="rob", aliases=["steal"])
async def _rob(self, ctx, member: discord.Member):
pass
def setup(bot):
bot.add_cog(Economy(bot))
|
python
|
"""
The Importer feature sets up the ability to work with cuneiform text(s)
one-on-one, whether it is the Code of Hammurabi, a collection of texts such as
ARM01, or whatever your research desires.
This cdli_corpus module is for working with text files having already been read
by file_importer. The file_lines required by CDLICorpus are taken from prior
use of FileImport(text_file).read_file().
e.g.:
# FileImport takes a txt file and reads it; this becomes file_lines.
text_path = os.path.join('texts', 'ARM01_texts.txt')
f_i = FileImport(text_path)
f_i.read_file()
ARM01 = f_i.file_lines
# CDLICorpus takes file_lines and uses it to work:
cdli = CDLICorpus()
cdli.parse_file(ARM01)
cdli.print_catalog()
The output of CDLICorpus will be able to further utilized by the feature
ATFConverter and its subsequent classes: Tokenizer, ATFConverter, Lemmatizer,
and PPrint.
"""
import re
__author__ = ['Andrew Deloucas <ADeloucas@g.harvard.com>']
__license__ = 'MIT License. See LICENSE.'
class CDLICorpus(object):
"""
Takes file_lines, prepares and organizes data.
"""
def __init__(self):
"""
Empty.
"""
self.chunks = []
self.catalog = {}
def parse_file(self, file_lines):
"""
Parses lines of file into a dictionary of texts.
:param file_lines: file_importer.file_lines
:return: Each text as the form:
Pnum: {'metadata': List of lines of metadata,
'pnum': P-number,
'edition': Bibliographic edition,
'raw_text': Raw lines of ATF text,
'transliteration': lines of transliteration,
'normalization': lines of normalization (if present),
'translation': lines of translation (if present)}
"""
# separate the file into chunks of text
chunks, chunk = [], []
# check to see what format the corpus is in, we assume that the headers are the same for all
# texts in the file... (maybe not safe?)
if re.match('Primary publication:', file_lines[0]):
header = re.compile('Primary publication:')
else:
header = re.compile(r'&?P\d{6}')
for line in file_lines:
if header.match(line):
if len(chunk) > 0: # pylint: disable=len-as-condition
chunks.append(chunk)
chunk = [line]
else:
if len(line) > 0: # pylint: disable=len-as-condition
chunk.append(line)
chunks.append(chunk)
self.chunks = chunks
# create a rich catalog from the chunks
re_translit = re.compile(r'(\d+\'?\.) ?(.*)')
re_normaliz = re.compile(r'(#tr\.ts:) ?(.*)')
re_translat = re.compile(r'(#tr\.en:) ?(.*)')
for chunk in self.chunks:
text = chunk
if chunk[0].startswith('Primary publication:'):
# we've got full metadata, add additional parsing later
metadata = chunk[:25]
text = chunk[26:]
else: # no metadata
metadata = []
pnum = ''.join([c for c in text[0].split('=')[0] if c != '&']).rstrip()
edition = text[0].split('=')[1].lstrip()
text = text[3:]
translit = []
normaliz = []
translat = []
for line in text:
if re.match(r'\d+\'?\.', line):
translit.append(re_translit.match(line).groups()[1])
if line.startswith('#tr.ts:'):
normaliz.append(re_normaliz.match(line).groups()[1])
if line.startswith('#tr.en:'):
translat.append(re_translat.match(line).groups()[1])
self.catalog[pnum] = {'metadata': metadata,
'pnum': pnum,
'edition': edition,
'raw_text': text,
'transliteration': translit,
'normalization': normaliz,
'translation': translat}
def toc(self):
"""
Returns a rich list of texts in the catalog.
"""
return [
f"Pnum: {key}, Edition: {self.catalog[key]['edition']}, "
f"length: {len(self.catalog[key]['transliteration'])} line(s)"
for key in sorted(self.catalog.keys())]
def list_pnums(self):
"""
Lists all Pnums in the catalog.
"""
return sorted([key for key in self.catalog])
def list_editions(self):
"""
Lists all text editions in the catalog.
"""
return sorted([self.catalog[key]['edition'] for key in self.catalog])
def print_catalog(self, catalog_filter=[]):
"""
Prints out a catalog of all the texts in the corpus. Can be filtered by passing
a list of keys you want present in the texts.
:param: catalog_filter = If you wish to sort the list, use the keys pnum,
edition, metadata, transliteration, normalization, or translation.
"""
keys = sorted(self.catalog.keys())
if len(catalog_filter) > 0: # pylint: disable=len-as-condition
valid = []
for key in keys:
for f in catalog_filter:
if len(self.catalog[key][f]) > 0: # pylint: disable=len-as-condition
valid.append(key)
keys = valid
for key in keys:
print(f"Pnum: {self.catalog[key]['pnum']}")
print(f"Edition: {self.catalog[key]['edition']}")
print(f"Metadata: {len(self.catalog[key]['metadata']) > 0}")
print(f"Transliteration: {len(self.catalog[key]['transliteration']) > 0}")
print(f"Normalization: {len(self.catalog[key]['normalization']) > 0}")
print(f"Translation: {len(self.catalog[key]['translation']) > 0}")
print()
|
python
|
# Copyright (c) 2016-2018, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
import asyncio
import pylru
from aiorpcx import run_in_thread
from electrumx.lib.hash import hash_to_hex_str
class ChainState(object):
'''Used as an interface by servers to request information about
blocks, transaction history, UTXOs and the mempool.
'''
def __init__(self, env, daemon, bp, notifications):
self._env = env
self._daemon = daemon
self._bp = bp
self._history_cache = pylru.lrucache(256)
# External interface pass-throughs for session.py
self.force_chain_reorg = self._bp.force_chain_reorg
self.tx_branch_and_root = self._bp.merkle.branch_and_root
self.read_headers = self._bp.read_headers
# Cache maintenance
notifications.add_callback(self._notify)
async def _notify(self, height, touched):
# Invalidate our history cache for touched hashXs
hc = self._history_cache
for hashX in set(hc).intersection(touched):
del hc[hashX]
async def broadcast_transaction(self, raw_tx):
return await self._daemon.sendrawtransaction([raw_tx])
async def daemon_request(self, method, args=()):
return await getattr(self._daemon, method)(*args)
def db_height(self):
return self._bp.db_height
def get_info(self):
'''Chain state info for LocalRPC and logs.'''
return {
'daemon': self._daemon.logged_url(),
'daemon_height': self._daemon.cached_height(),
'db_height': self.db_height(),
}
async def get_history(self, hashX):
'''Get history asynchronously to reduce latency.'''
def job():
# History DoS limit. Each element of history is about 99
# bytes when encoded as JSON. This limits resource usage
# on bloated history requests, and uses a smaller divisor
# so large requests are logged before refusing them.
limit = self._env.max_send // 97
return list(self._bp.get_history(hashX, limit=limit))
hc = self._history_cache
if hashX not in hc:
hc[hashX] = await run_in_thread(job)
return hc[hashX]
async def get_utxos(self, hashX):
'''Get UTXOs asynchronously to reduce latency.'''
def job():
return list(self._bp.get_utxos(hashX, limit=None))
return await run_in_thread(job)
def header_branch_and_root(self, length, height):
return self._bp.header_mc.branch_and_root(length, height)
def processing_new_block(self):
'''Return True if we're processing a new block.'''
return self._daemon.cached_height() > self.db_height()
def raw_header(self, height):
'''Return the binary header at the given height.'''
header, n = self._bp.read_headers(height, 1)
if n != 1:
raise IndexError(f'height {height:,d} out of range')
return header
def set_daemon_url(self, daemon_url):
self._daemon.set_urls(self._env.coin.daemon_urls(daemon_url))
return self._daemon.logged_url()
async def query(self, args, limit):
coin = self._env.coin
db = self._bp
lines = []
def arg_to_hashX(arg):
try:
script = bytes.fromhex(arg)
lines.append(f'Script: {arg}')
return coin.hashX_from_script(script)
except ValueError:
pass
try:
hashX = coin.address_to_hashX(arg)
lines.append(f'Address: {arg}')
return hashX
except Base58Error:
print(f'Ingoring unknown arg: {arg}')
return None
for arg in args:
hashX = arg_to_hashX(arg)
if not hashX:
continue
n = None
for n, (tx_hash, height) in enumerate(
db.get_history(hashX, limit), start=1):
lines.append(f'History #{n:,d}: height {height:,d} '
f'tx_hash {hash_to_hex_str(tx_hash)}')
if n is None:
lines.append('No history found')
n = None
for n, utxo in enumerate(db.get_utxos(hashX, limit), start=1):
lines.append(f'UTXO #{n:,d}: tx_hash '
f'{hash_to_hex_str(utxo.tx_hash)} '
f'tx_pos {utxo.tx_pos:,d} height '
f'{utxo.height:,d} value {utxo.value:,d}')
if n is None:
lines.append('No UTXOs found')
balance = db.get_balance(hashX)
lines.append(f'Balance: {coin.decimal_value(balance):,f} '
f'{coin.SHORTNAME}')
return lines
|
python
|
import sys
from secret import FLAG, REGISTER, TAPS
assert FLAG.startswith('flag')
assert len(REGISTER) == 16
assert len(TAPS) == 5
class LFSR:
def __init__(self, register, taps):
self.register = register
self.taps = taps
def next(self):
new = 0
ret = self.register[0]
for i in self.taps:
new ^= self.register[i]
self.register = self.register[1:] + [new]
return ret
def encrypt():
enc_flag = []
for char in FLAG.encode():
enc_char = 0
for binary in '{:08b}'.format(char):
enc_char <<= 1
enc_char += (int(binary) ^ lfsr.next())
enc_flag.append(enc_char)
return bytes(enc_flag)
if __name__ == '__main__':
lfsr = LFSR(REGISTER, TAPS)
while True:
print('> flag')
print('> server.py')
print('> exit')
cmd = input('> Command: ')
if cmd == 'exit':
sys.exit()
elif cmd == 'flag':
print(encrypt().hex())
elif cmd == 'server.py':
print(open('./server.py', 'r').read())
else:
print('Bad hacker')
|
python
|
# -*- coding: utf-8 -*-
# External import
import pytest
from numpy import array, pi
from os.path import join
from multiprocessing import cpu_count
# Pyleecan import
from pyleecan.Classes.ImportGenVectLin import ImportGenVectLin
from pyleecan.Classes.ImportMatrixVal import ImportMatrixVal
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.Output import Output
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
from Tests import save_validation_path as save_path
@pytest.mark.long
@pytest.mark.validation
@pytest.mark.FEMM
@pytest.mark.MeshSol
def test_Magnetic_FEMM_sym():
"""Validation of a polar SIPMSM with surface magnet
Linear lamination material
From publication
Lubin, S. Mezani, and A. Rezzoug,
โ2-D Exact Analytical Model for Surface-Mounted Permanent-Magnet Motors with Semi-Closed Slots,โ
IEEE Trans. Magn., vol. 47, no. 2, pp. 479โ492, 2011.
Test compute the Flux in FEMM, with and without symmetry
and with MANATEE semi-analytical subdomain model
"""
SPMSM_003 = load(join(DATA_DIR, "Machine", "SPMSM_003.json"))
simu = Simu1(name="EM_SPMSM_FL_002", machine=SPMSM_003)
# Definition of the enforced output of the electrical module
N0 = 3000
Is = ImportMatrixVal(
value=array(
[
[6.97244193e-06, 2.25353053e02, -2.25353060e02],
[-2.60215295e02, 1.30107654e02, 1.30107642e02],
[-6.97244208e-06, -2.25353053e02, 2.25353060e02],
[2.60215295e02, -1.30107654e02, -1.30107642e02],
]
)
)
time = ImportGenVectLin(start=0, stop=0.015, num=4, endpoint=True)
Na_tot = 1024
simu.input = InputCurrent(
Is=Is,
Ir=None, # No winding on the rotor
N0=N0,
angle_rotor=None, # Will be computed
time=time,
Na_tot=Na_tot,
angle_rotor_initial=0.5216 + pi,
)
# Definition of the magnetic simulation (no symmetry)
simu.mag = MagFEMM(
type_BH_stator=2,
type_BH_rotor=2,
is_periodicity_a=False,
is_get_mesh=True,
nb_worker=cpu_count(),
)
simu.force = None
simu.struct = None
# Copy the simu and activate the symmetry
assert SPMSM_003.comp_periodicity() == (1, True, 1, True)
simu_sym = Simu1(init_dict=simu.as_dict())
simu_sym.mag.is_periodicity_a = True
out = Output(simu=simu_sym)
out.post.legend_name = "1/2 symmetry"
out.post.line_color = "r--"
simu_sym.run()
out.mag.meshsolution.plot_mesh(
save_path=join(save_path, "EM_SPMSM_FL_002_mesh.png"), is_show_fig=False
)
out.mag.meshsolution.plot_mesh(
group_names="stator core",
save_path=join(save_path, "EM_SPMSM_FL_002_mesh_stator.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_mesh(
group_names=["stator core", "/", "airgap", "stator winding"],
save_path=join(save_path, "EM_SPMSM_FL_002_mesh_stator_interface.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="\mu",
save_path=join(save_path, "EM_SPMSM_FL_002_mu.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="B", save_path=join(save_path, "EM_SPMSM_FL_002_B.png"), is_show_fig=False
)
out.mag.meshsolution.plot_contour(
label="H", save_path=join(save_path, "EM_SPMSM_FL_002_H.png"), is_show_fig=False
)
out.mag.meshsolution.plot_contour(
label="H",
group_names="stator core",
save_path=join(save_path, "EM_SPMSM_FL_002_H_stator.png"),
is_show_fig=False,
)
return out
# To run it without pytest
if __name__ == "__main__":
out = test_Magnetic_FEMM_sym()
|
python
|
#!/usr/bin/env python3
"""SERVICE YET TO BE IMPLEMENTED. THIS FILE IS JUST A PLACEHOLDER."""
print("Sorry! This service has not yet been implemented\n(will you be the one to take care of it?\n --- RIGHT NOW THIS FILE IS JUST AN HANDY PLACEHOLDER ---")
|
python
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for keras_utils.py."""
import collections
import tensorflow as tf
from tensorflow_federated.python.learning.reconstruction import keras_utils
from tensorflow_federated.python.learning.reconstruction import model as model_lib
def _create_input_spec():
return collections.namedtuple('Batch', ['x', 'y'])(
x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32),
y=tf.TensorSpec(shape=[None, 1], dtype=tf.int32))
def _create_keras_model():
model = tf.keras.Sequential([
tf.keras.layers.Reshape(target_shape=[784], input_shape=(28 * 28,)),
tf.keras.layers.Dense(10),
])
return model
class KerasUtilsTest(tf.test.TestCase):
def test_from_keras_model_succeeds(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_fails_bad_input_spec(self):
keras_model = _create_keras_model()
input_spec = collections.namedtuple('Batch', ['x'])(
x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32))
with self.assertRaisesRegex(ValueError, 'input_spec'):
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_fails_compiled(self):
keras_model = _create_keras_model()
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.1))
input_spec = _create_input_spec()
with self.assertRaisesRegex(ValueError, 'compiled'):
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_fails_missing_variables(self):
"""Ensures failure if global/local layers are missing variables."""
keras_model = _create_keras_model()
input_spec = _create_input_spec()
with self.assertRaisesRegex(ValueError, 'variables'):
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1],
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_succeeds_from_set(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=set(keras_model.layers),
local_layers=set(),
input_spec=input_spec)
def test_from_keras_model_properties(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
# Global trainable/non_trainable should include all the variables, and
# local should be empty.
self.assertEqual(recon_model.global_trainable_variables,
keras_model.trainable_variables)
self.assertEqual(recon_model.global_non_trainable_variables,
keras_model.non_trainable_variables)
self.assertEmpty(recon_model.local_trainable_variables)
self.assertEmpty(recon_model.local_non_trainable_variables)
self.assertEqual(input_spec, recon_model.input_spec)
def test_from_keras_model_local_layers_properties(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1], # Last Dense layer is local.
local_layers=keras_model.layers[-1:],
input_spec=input_spec)
# Expect last two variables, the weights and bias for the final Dense layer,
# to be local trainable, and the rest global.
self.assertEqual(recon_model.global_trainable_variables,
keras_model.trainable_variables[:-2])
self.assertEqual(recon_model.global_non_trainable_variables,
keras_model.non_trainable_variables)
self.assertEqual(recon_model.local_trainable_variables,
keras_model.trainable_variables[-2:])
self.assertEmpty(recon_model.local_non_trainable_variables)
self.assertEqual(input_spec, recon_model.input_spec)
def test_from_keras_model_forward_pass(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1],
local_layers=keras_model.layers[-1:],
input_spec=input_spec)
batch_input = collections.namedtuple('Batch', ['x', 'y'])(
x=tf.ones(shape=[10, 784], dtype=tf.float32),
y=tf.zeros(shape=[10, 1], dtype=tf.int32))
batch_output = recon_model.forward_pass(batch_input)
self.assertIsInstance(batch_output, model_lib.BatchOutput)
self.assertEqual(batch_output.num_examples, 10)
self.assertAllEqual(batch_output.labels,
tf.zeros(shape=[10, 1], dtype=tf.int32))
# Change num_examples and labels.
batch_input = collections.namedtuple('Batch', ['x', 'y'])(
x=tf.zeros(shape=[5, 784], dtype=tf.float32),
y=tf.ones(shape=[5, 1], dtype=tf.int32))
batch_output = recon_model.forward_pass(batch_input)
self.assertIsInstance(batch_output, model_lib.BatchOutput)
self.assertEqual(batch_output.num_examples, 5)
self.assertAllEqual(batch_output.labels,
tf.ones(shape=[5, 1], dtype=tf.int32))
def test_from_keras_model_forward_pass_list_input(self):
"""Forward pass still works with a 2-element list batch input."""
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1],
local_layers=keras_model.layers[-1:],
input_spec=input_spec)
batch_input = [
tf.ones(shape=[10, 784], dtype=tf.float32),
tf.zeros(shape=[10, 1], dtype=tf.int32)
]
batch_output = recon_model.forward_pass(batch_input)
self.assertIsInstance(batch_output, model_lib.BatchOutput)
self.assertEqual(batch_output.num_examples, 10)
self.assertAllEqual(batch_output.labels,
tf.zeros(shape=[10, 1], dtype=tf.int32))
def test_from_keras_model_forward_pass_fails_bad_input_keys(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
batch_input = collections.namedtuple('Batch', ['a', 'b'])(
a=tf.ones(shape=[10, 784], dtype=tf.float32),
b=tf.zeros(shape=[10, 1], dtype=tf.int32))
with self.assertRaisesRegex(KeyError, 'keys'):
recon_model.forward_pass(batch_input)
def test_mean_loss_metric_from_keras_loss(self):
mse_loss = tf.keras.losses.MeanSquaredError()
mse_metric = keras_utils.MeanLossMetric(mse_loss)
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
self.assertEqual(mse_loss(y_true, y_pred), mse_metric.result())
def test_mean_loss_metric_multiple_weighted_batches(self):
mse_loss = tf.keras.losses.MeanSquaredError()
mse_metric = keras_utils.MeanLossMetric(mse_loss)
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
y_true = tf.ones([40, 1], dtype=tf.float32)
y_pred = tf.ones([40, 1], dtype=tf.float32)
mse_metric.update_state(y_true, y_pred)
# Final weighted loss is (10 * 0.5^2 + 40 * 0.0) / 50
self.assertEqual(mse_metric.result(), 0.05)
def test_mean_loss_metric_from_fn(self):
"""Ensures the mean loss metric also works with a callable."""
def mse_loss(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
mse_metric = keras_utils.MeanLossMetric(mse_loss)
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
self.assertEqual(mse_loss(y_true, y_pred), mse_metric.result())
def test_recreate_mean_loss_from_keras_loss(self):
"""Ensures we can create a metric from config, as is done in aggregation."""
mse_loss = tf.keras.losses.MeanSquaredError()
mse_metric = keras_utils.MeanLossMetric(mse_loss)
recreated_mse_metric = type(mse_metric).from_config(mse_metric.get_config())
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
recreated_mse_metric.update_state(y_true, y_pred)
self.assertEqual(recreated_mse_metric.result(), mse_metric.result())
def test_recreate_mean_loss_from_fn(self):
def mse_loss(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
mse_metric = keras_utils.MeanLossMetric(mse_loss)
recreated_mse_metric = type(mse_metric).from_config(mse_metric.get_config())
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
recreated_mse_metric.update_state(y_true, y_pred)
self.assertEqual(recreated_mse_metric.result(), mse_metric.result())
if __name__ == '__main__':
tf.test.main()
|
python
|
# Generated by Django 2.1.3 on 2018-11-09 05:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restapi', '0010_annotatedrecording_recitation_mode'),
]
operations = [
migrations.CreateModel(
name='TajweedInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_id', models.CharField(blank=True, max_length=32)),
('recording_id', models.CharField(max_length=32)),
('platform', models.CharField(default='web', max_length=32)),
('letter', models.CharField(max_length=1)),
('letter_position', models.IntegerField(default=0)),
('degree', models.CharField(choices=[('jali', 'Jali'), ('khafi', 'Khafi')], default='jali', max_length=32)),
('category', models.CharField(choices=[('madd', 'Prolongation'), ('tafkheem', 'Fattening'), ('tarqeeq', 'Thinning'), ('makharij', 'Emission'), ('noon', 'Noon'), ('meem', 'Meem'), ('qalqala', 'Echo'), ('other', 'Other')], default='madd', max_length=32)),
],
),
]
|
python
|
# MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Generic,
Iterable,
Type,
TypeVar,
cast,
overload,
)
if TYPE_CHECKING: # pragma: no cover
from .connection import Connection
from .field import BaseField
from .model import Model
from .utils.lazy_list import LazyList
_REF = TypeVar("_REF", bound="Model")
_THROUGH = TypeVar("_THROUGH", bound="Model")
class ManyToMany(Generic[_REF, _THROUGH]):
"""A useful tool to simplify many-to-many references.
Args:
here (str): The field name on the current model.
here_ref (str): The model and field name on the "middle" table (in the
example below, the middle table is Player) in the format of
"model.field".
other_ref (str): The model and field name on the middle table that
references the final table (or other table).
other (str): The model and field name on the final table referenced by
the middle table.
Note: Although unecessary, it is highly recommended to use ForeignKeys on
the middle table where it references the initial and final table. You may
get unexpected behaviour if you don't.
Example Usage:
```
class User(Model):
username = VarChar(32).field()
primary_key = (username,)
games = ManyToMany["Game", "Player"]( # the typehints are optional
# the column on this table referenced in Player
"username",
# the column on Player that references "username"
"players.username",
# the column on Player that references Game.gameid
"players.gameid",
# the column on Game referenced by Player
"games.gameid",
)
class Game(Model):
gameid = Serial().field()
primary_key = (gameid,)
users = ManyToMany["User", "Player"](
"gameid",
"players.gameid",
"players.username",
"users.username",
)
class Player(Model):
username = VarChar(32).field()
gameid = Int().field()
primary_key = (username, gameid)
username_fk = ForeignKey(username, User.username)
gameid_fk = ForeignKey(gameid, Game.gameid)
class MyDatabase(Database):
users = User
games = Game
players = Player
...
circuit = await User.fetch(username="Circuit")
circuits_games = await circuit.games.fetchmany()
```
If you want typehints to work properly, use
`games = ManyToMany["Game"](...)`.
"""
__slots__: Iterable[str] = (
"_here",
"_here_ref",
"_other_ref",
"_other",
"_attribute_name",
)
_attribute_name: str
# populated by Model on __init_subclass__
def __init__(
self, here: str, here_ref: str, other_ref: str, other: str
) -> None:
self._here = here
self._here_ref = here_ref
self._other_ref = other_ref
self._other = other
@overload
def __get__(
self, inst: Model, cls: Type[Model]
) -> _RealManyToMany[_REF, _THROUGH]:
...
@overload
def __get__(
self, inst: None, cls: Type[Model]
) -> ManyToMany[_REF, _THROUGH]:
...
def __get__(
self, inst: Model | None, cls: type[Model]
) -> ManyToMany[_REF, _THROUGH] | _RealManyToMany[_REF, _THROUGH]:
if inst is None:
return self
real_m2m = self._generate_mtm(inst)
setattr(inst, self._attribute_name, real_m2m)
return real_m2m
def _generate_mtm(self, inst: Model) -> _RealManyToMany[_REF, _THROUGH]:
return _RealManyToMany(self, inst)
class _RealManyToMany(Generic[_REF, _THROUGH]):
__slots__: Iterable[str] = (
"orig",
"model",
"field",
"mm_model",
"mm_h_field",
"mm_o_field",
"ot_model",
"ot_field",
)
def __init__(
self, orig: ManyToMany[_REF, _THROUGH], model_inst: Model
) -> None:
# NOTE: all these casts are ugly, but truthfully
# there isn't a better way to do this. You can't
# actually check that these are Models and Fields
# without creating circular imports (since model.py
# imports this file)
self.orig = orig
mm_h_model, _mm_h_field = self.orig._here_ref.split(".")
mm_o_model, _mm_o_field = self.orig._other_ref.split(".")
assert mm_h_model == mm_o_model
mm_model = cast(
"Type[Model]", getattr(model_inst.database, mm_h_model)
)
mm_h_field = cast(
"BaseField[Any, Any, Any]", getattr(mm_model, _mm_h_field)
)
mm_o_field = cast(
"BaseField[Any, Any, Any]", getattr(mm_model, _mm_o_field)
)
_ot_model, _ot_field = self.orig._other.split(".")
ot_model = cast("Type[Model]", getattr(model_inst.database, _ot_model))
ot_field = cast(
"BaseField[Any, Any, Any]", getattr(ot_model, _ot_field)
)
self.model = model_inst
self.field = cast(
"BaseField[Any, Any, Any]",
getattr(model_inst.__class__, self.orig._here),
)
self.mm_model = mm_model
self.mm_h_field = mm_h_field
self.mm_o_field = mm_o_field
self.ot_model = ot_model
self.ot_field = ot_field
def __getattr__(self, name: str) -> Any:
return getattr(self.orig, name)
async def fetchmany(
self, con: Connection | None = None
) -> LazyList[dict[str, Any], Model]:
"""Fetch all rows from the final table that belong to this instance.
Returns:
LazyList[dict, Model]: A lazy-list of returned Models.
"""
return (
await self.ot_model.fetch_query(con=con)
.where(
self.mm_model.fetch_query()
.where(
self.mm_h_field.eq(
self.model._raw_values[self.field.name]
),
self.mm_o_field.eq(self.ot_field),
)
.exists()
)
.fetchmany()
)
async def count(self, con: Connection | None = None) -> int:
"""Returns the count.
Warning: To be efficient, this returns the count of *middle* models,
which may differ from the number of final models if you did not use
ForeignKeys properly.
Returns:
int: The count.
"""
return (
await self.mm_model.fetch_query(con=con)
.where(self.mm_h_field.eq(self.model._raw_values[self.field.name]))
.count()
)
async def clear(
self, con: Connection | None = None
) -> LazyList[dict[str, Any], Model]:
"""Remove all instances of the other model from this instance.
Both of these lines do the same thing:
```
deleted_players = await user.games.clear()
deleted_players = await Player.delete_query().where(
username=user.name
).execute()
```
Returns:
LazyList[dict, _REF]: A lazy-list of deleted through models (in
the example, it would be a list of Player).
"""
return (
await self.mm_model.delete_query(con=con)
.where(self.mm_h_field.eq(self.model._raw_values[self.field.name]))
.execute()
)
async def add(self, other: Model, con: Connection | None = None) -> Model:
"""Add one or more models to this ManyToMany.
Each of these lines does the exact same thing:
```
player = await user.games.add(game)
# OR
player = await games.users.add(user)
# OR
player = await Player(username=user.name, gameid=game.id).create()
```
Returns:
Model: The reference model that lines this model and the other
model. In the example, the return would be a Player.
"""
values = {
self.mm_h_field.name: self.model._raw_values[self.field.name],
self.mm_o_field.name: other._raw_values[self.ot_field.name],
}
return await self.mm_model(**values).create(con=con)
async def remove(
self, other: Model, con: Connection | None = None
) -> LazyList[dict[str, Any], Model]:
"""Remove one or models from this ManyToMany.
Each of these lines does the exact same thing:
```
deleted_players = await user.games.remove(game)
# OR
deleted_players = await games.user.remove(user)
# OR
deleted_players = await Player.delete_query().where(
username=user.name, gameid=game.id
).execute()
```
Note: The fact that .remove() returns a list instead of a single model
was intentional. The reason is ManyToMany does not enforce uniqueness
in any way, so there could be multiple Players that link a single user
to a single game. Thus, user.remove(game) could actually end up
deleting multiple players.
"""
values = {
self.mm_h_field.name: self.model._raw_values[self.field.name],
self.mm_o_field.name: other._raw_values[self.ot_field.name],
}
return (
await self.mm_model.delete_query(con=con).where(**values).execute()
)
|
python
|
import torch
from numpy import histogram, random
from scipy.stats import skewnorm
from torch import Tensor, from_numpy
from torch.nn.functional import softmax
class WGAN:
def __init__(self) -> None:
super().__init__()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
real_loss = -real_scores
fake_loss = fake_scores
loss = real_loss.mean() + fake_loss.mean()
return loss
def generator_loss(self, fake_scores: Tensor) -> Tensor:
fake_loss = -fake_scores
loss = fake_loss.mean()
return loss
class RaHinge:
def __init__(self) -> None:
super().__init__()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_validity = real_scores - fake_scores.mean()
relativistic_fake_validity = fake_scores - real_scores.mean()
real_loss = torch.relu(1.0 - relativistic_real_validity)
fake_loss = torch.relu(1.0 + relativistic_fake_validity)
loss = (real_loss.mean() + fake_loss.mean()) / 2
return loss
def generator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_validity = real_scores - fake_scores.mean()
relativistic_fake_validity = fake_scores - real_scores.mean()
real_loss = torch.relu(1.0 - relativistic_fake_validity)
fake_loss = torch.relu(1.0 + relativistic_real_validity)
loss = (fake_loss.mean() + real_loss.mean()) / 2
return loss
class RaLSGAN:
def __init__(self) -> None:
super().__init__()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_scores = real_scores - fake_scores.mean()
relativistic_fake_scores = fake_scores - real_scores.mean()
real_loss = (relativistic_real_scores - 1.0) ** 2
fake_loss = (relativistic_fake_scores + 1.0) ** 2
loss = (fake_loss.mean() + real_loss.mean()) / 2
return loss.unsqueeze(0)
def generator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_scores = real_scores - fake_scores.mean()
relativistic_fake_scores = fake_scores - real_scores.mean()
real_loss = (relativistic_real_scores + 1.0) ** 2
fake_loss = (relativistic_fake_scores - 1.0) ** 2
loss = (fake_loss.mean() + real_loss.mean()) / 2
return loss
def js_div(p, q, reduce=True):
m = 0.5 * (p + q)
jsd = 0.5 * (kl_div(p, m, reduce=False) + kl_div(q, m, reduce=False))
return torch.mean(jsd) if reduce else jsd
def kl_div(p, q, epsilon=1e-12, reduce=True):
kld = torch.sum(
p * (p / (q + epsilon)).log(),
dim=1
)
return torch.mean(kld) if reduce else kld
class Realness:
def __init__(self, score_dim) -> None:
super().__init__()
self.score_dim = score_dim
self.gauss_uniform = True
self.measure = 'kl'
if self.measure == 'js':
self.distance = js_div
elif self.measure == 'kl':
self.distance = kl_div
else:
raise NotImplementedError()
if self.gauss_uniform:
gauss = random.normal(0.0, 0.1, size=1000)
count, _ = histogram(gauss, self.score_dim)
self.anchor0 = from_numpy(count / sum(count)).float()
uniform = random.uniform(-1.0, 1.0, size=1000)
count, _ = histogram(uniform, self.score_dim)
self.anchor1 = from_numpy(count / sum(count)).float()
else:
skew_left = skewnorm.rvs(-5.0, size=1000)
count, _ = histogram(skew_left, self.score_dim)
self.anchor0 = from_numpy(count / sum(count)).float()
skew_right = skewnorm.rvs(5.0, size=1000)
count, _ = histogram(skew_right, self.score_dim)
self.anchor1 = from_numpy(count / sum(count)).float()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
self.anchor0 = self.anchor0.to(real_scores)
self.anchor1 = self.anchor1.to(real_scores)
real_probabilities = softmax(real_scores, dim=1)
fake_probabilities = softmax(fake_scores, dim=1)
loss = self.distance(self.anchor1, real_probabilities) + self.distance(self.anchor0, fake_probabilities)
# loss -= self.div(self.anchor1, fake_probabilities) + self.div(self.anchor0, real_probabilities)
return loss
def generator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
self.anchor0 = self.anchor0.to(real_scores)
self.anchor1 = self.anchor1.to(real_scores)
real_probabilities = softmax(real_scores, dim=1)
fake_probabilities = softmax(fake_scores, dim=1)
# No relativism
# loss = self.distance(self.anchor0, fake_probabilities)
# EQ19 (default)
loss = self.distance(real_probabilities, fake_probabilities) - self.distance(self.anchor0, fake_probabilities)
# EQ20
# loss = self.distance(self.anchor1, fake_probabilities) - self.distance(self.anchor0, fake_probabilities)
return loss
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-01 13:53
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20170801_1819'),
]
operations = [
migrations.AlterField(
model_name='sales',
name='description',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
migrations.AlterField(
model_name='sales',
name='discount',
field=models.IntegerField(blank=True, null=True, verbose_name='ะกะบะธะดะบะฐ'),
),
migrations.AlterField(
model_name='sales',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='shops/sales', verbose_name='ะะทะพะฑัะฐะถะตะฝะธะต'),
),
]
|
python
|
import pytest
from pathlib import Path
from coolcmp.cmp.source_code import *
from unit_tests.utils import run_test_codegen
tests = []
with open('unit_tests/compiled_files.txt') as f:
for line in f:
tests.append(Path(line.rstrip()).resolve())
@pytest.mark.complete
@pytest.mark.parametrize('file', tests, ids=map(str, tests))
def test_complete(file):
run_test_codegen(file)
|
python
|
๏ปฟ#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'shouke'
from common.log import logger
from common.globalvar import db_related_to_project_dic
from unittesttestcase import MyUnittestTestCase
__all__ = ['DBUnittestTestCase']
class DBUnittestTestCase(MyUnittestTestCase):
def test_select_one_record(self):
if self.input_params != '':
self.input_params = self.input_params + ','
self.input_params = eval(self.input_params) # ๅญ็ฌฆไธฒ็ฑปๅ็ๅ
็ป่ฝฌไธบๅ
็ป
try:
flag, query_result = db_related_to_project_dic[self.op_object].select_one_record(self.url_or_sql, self.input_params)
logger.info('ๆฐๆฎๅบๆๅกๅจ่ฟๅ็ๆฅ่ฏข็ปๆไธบไธบ query_result๏ผ%s, flag๏ผ%s' % (query_result, flag))
if flag:
if query_result:
logger.info('ๆญฃๅจไฟๅญ็ฎๆ ๅ
ๅฎนๅฐ่ชๅฎไนๅ้')
# ๅฆๆ็จๆท่ชๅฎไนไบโ่พๅบโๅๆฐ๏ผๅ่ฟ่ฆไฟๅญ็ฎๆ ๅผๅฐ็จๆทๅฎไน็ๅ้
self.save_result(query_result) # ไฟๅญๆฅ่ฏข่ฎฐๅฝ
logger.info('ๆญฃๅจ่ฟ่ก็ปๆๆญ่จ')
self.assert_result(query_result)
else:
msg = 'fail#%s' % query_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_update_record(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # ๅญ็ฌฆไธฒ็ฑปๅ็ๅ
็ป่ฝฌไธบๅ
็ป
try:
flag, execute_result = db_related_to_project_dic[self.op_object].execute_update(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_delete_record(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # ๅญ็ฌฆไธฒ็ฑปๅ็ๅ
็ป่ฝฌไธบๅ
็ป
try:
flag, execute_result = db_related_to_project_dic[self.op_object].execute_update(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_call_proc(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # ๅญ็ฌฆไธฒ็ฑปๅ็ๅ
็ป่ฝฌไธบๅ
็ป
try:
flag, execute_result = db_related_to_project_dic[self.op_object].call_proc(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_truncate_table(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # ๅญ็ฌฆไธฒ็ฑปๅ็ๅ
็ป่ฝฌไธบๅ
็ป
try:
flag, execute_result = db_related_to_project_dic[self.op_object].execute_update(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_insert_record(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # ๅญ็ฌฆไธฒ็ฑปๅ็ๅ
็ป่ฝฌไธบๅ
็ป
try:
temp_sql = self.url_or_sql % self.input_params
flag, execute_result = db_related_to_project_dic[self.op_object].execute_insert(temp_sql, '')
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
|
python
|
"""
Simple HTTP Server with GET that waits for given seconds.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import time
ENCODING = 'utf-8'
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
"""Simple multi-threaded HTTP Server."""
pass
class MyRequestHandler(BaseHTTPRequestHandler):
"""Very simple request handler. Only supports GET."""
def do_GET(self):
"""Respond after seconds given in path.
"""
try:
seconds = float(self.path[1:])
except ValueError:
seconds = 0.0
if seconds < 0:
seconds = 0.0
text = "Waited for {:4.2f} seconds.\nThat's all.\n"
msg = text.format(seconds).encode(ENCODING)
time.sleep(seconds)
self.send_response(200)
self.send_header("Content-type", "text/plain; charset=utf-8")
self.send_header("Content-length", str(len(msg)))
self.end_headers()
self.wfile.write(msg)
def run(server_class=ThreadingHTTPServer,
handler_class=MyRequestHandler,
port=8888):
"""Run the simple server on a given port."""
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print(("Serving from port {}...".format(port)))
httpd.serve_forever()
if __name__ == '__main__':
run()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.