hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70f6c2e837dc663e4225ba197c388dee678dd48 | 624 | py | Python | Algorithms/0091_Decode_Ways/Python/Decode_Ways_Solution_1.py | lht19900714/Leetcode_Solutions | dac7a038329a5c1f8a78e86cc6f49116b963f1fb | [
"MIT"
] | null | null | null | Algorithms/0091_Decode_Ways/Python/Decode_Ways_Solution_1.py | lht19900714/Leetcode_Solutions | dac7a038329a5c1f8a78e86cc6f49116b963f1fb | [
"MIT"
] | null | null | null | Algorithms/0091_Decode_Ways/Python/Decode_Ways_Solution_1.py | lht19900714/Leetcode_Solutions | dac7a038329a5c1f8a78e86cc6f49116b963f1fb | [
"MIT"
] | null | null | null |
# Space: O(n)
# Time: O(n)
class Solution:
def numDecodings(self, s: str) -> int:
if len(s) == 0: return 0
self.cache = {}
self.cache[''] = 1
def recursive(string):
if string in self.cache: return self.cache[string]
if string[0] == '0': return 0
if len(string) == 1: return 1
temp_res = recursive(string[1:])
prefix = int(string[:2])
if 0 < prefix <= 26:
temp_res += recursive(string[2:])
self.cache[string] = temp_res
return temp_res
return recursive(s)
| 19.5 | 62 | 0.491987 |
class Solution:
def numDecodings(self, s: str) -> int:
if len(s) == 0: return 0
self.cache = {}
self.cache[''] = 1
def recursive(string):
if string in self.cache: return self.cache[string]
if string[0] == '0': return 0
if len(string) == 1: return 1
temp_res = recursive(string[1:])
prefix = int(string[:2])
if 0 < prefix <= 26:
temp_res += recursive(string[2:])
self.cache[string] = temp_res
return temp_res
return recursive(s)
| true | true |
f70f6d36aba38dbe1415ec7683f3960500689480 | 968 | py | Python | tempest/api/compute/security_groups/base.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | 3 | 2016-07-15T12:27:23.000Z | 2021-04-23T04:41:10.000Z | tempest/api/compute/security_groups/base.py | LIS/lis-tempest | 8e6403b2d6de81c5d18ed867b4977385c8278b75 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/security_groups/base.py | LIS/lis-tempest | 8e6403b2d6de81c5d18ed867b4977385c8278b75 | [
"Apache-2.0"
] | 12 | 2016-07-14T18:13:05.000Z | 2017-07-08T18:45:42.000Z | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
super(BaseSecurityGroupsTest, cls).setup_credentials()
| 37.230769 | 78 | 0.737603 |
from tempest.api.compute import base
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True)
super(BaseSecurityGroupsTest, cls).setup_credentials()
| true | true |
f70f6ea82999764663604c0defd9f8fd956a3e54 | 43 | py | Python | embedvideos/__init__.py | vkaracic/EmbedVideosXBlock | e0cd04b41d655d8e8e69c6c9c4c0a41c22600965 | [
"MIT"
] | null | null | null | embedvideos/__init__.py | vkaracic/EmbedVideosXBlock | e0cd04b41d655d8e8e69c6c9c4c0a41c22600965 | [
"MIT"
] | null | null | null | embedvideos/__init__.py | vkaracic/EmbedVideosXBlock | e0cd04b41d655d8e8e69c6c9c4c0a41c22600965 | [
"MIT"
] | null | null | null | from .embedvideos import EmbedVideosXBlock
| 21.5 | 42 | 0.883721 | from .embedvideos import EmbedVideosXBlock
| true | true |
f70f70495df42bcff3545d60f74aba312be6b44a | 228 | py | Python | vininfo/__init__.py | ghilesmeddour/vininfo | 63cbf7dcdd9d106fb9c9a56d5c4f11c3dd794b1d | [
"BSD-3-Clause"
] | 60 | 2018-07-28T14:53:57.000Z | 2022-02-22T12:11:24.000Z | vininfo/__init__.py | ghilesmeddour/vininfo | 63cbf7dcdd9d106fb9c9a56d5c4f11c3dd794b1d | [
"BSD-3-Clause"
] | 16 | 2018-07-30T08:57:08.000Z | 2021-12-25T09:20:03.000Z | vininfo/__init__.py | ghilesmeddour/vininfo | 63cbf7dcdd9d106fb9c9a56d5c4f11c3dd794b1d | [
"BSD-3-Clause"
] | 29 | 2018-07-30T08:36:07.000Z | 2022-03-09T12:02:06.000Z | from .toolbox import Vin
from .exceptions import ValidationError, VininfoException
VERSION = (1, 6, 0)
"""Application version number tuple."""
VERSION_STR = '.'.join(map(str, VERSION))
"""Application version number string.""" | 25.333333 | 57 | 0.736842 | from .toolbox import Vin
from .exceptions import ValidationError, VininfoException
VERSION = (1, 6, 0)
VERSION_STR = '.'.join(map(str, VERSION)) | true | true |
f70f70dc9554cc5a86433d7d1c74bb4e02d3ad76 | 46,498 | py | Python | gbxml/gbxml.py | building-energy/gbxml | 039edf6e33cccbb76dcda5fbb871aeb950ad0a87 | [
"MIT"
] | 5 | 2020-04-24T15:59:45.000Z | 2022-02-23T14:40:14.000Z | gbxml/gbxml.py | building-energy/gbxml | 039edf6e33cccbb76dcda5fbb871aeb950ad0a87 | [
"MIT"
] | 2 | 2021-07-05T12:09:09.000Z | 2022-02-05T07:05:59.000Z | gbxml/gbxml.py | building-energy/gbxml | 039edf6e33cccbb76dcda5fbb871aeb950ad0a87 | [
"MIT"
] | 1 | 2020-04-24T15:59:48.000Z | 2020-04-24T15:59:48.000Z | # -*- coding: utf-8 -*-
from lxml import etree
import pkgutil
from io import BytesIO
from . import xml_functions, construction_functions, layer_functions
from . import surface_functions, space_functions, building_functions
from . import opening_functions, zone_functions
class Gbxml():
"A class that represents a gbXML file and the gbXML schema"
def __init__(self,
gbxml_fp=None,
gbxsd_fp=None):
"""Initialises a new Gbxml instance
Arguments:
gbxml_fp (str): filepath to a gbXML file. This is read in as an
lxml._ElementTree object. If not supplied then a
new lxml._ElementTree object with only a root element is created.
gbxsd_fp (str): filepath to a gbXML schema file. If not supplied
then a default gbXMl schema file is used.
"""
if gbxml_fp:
self._ElementTree=etree.parse(gbxml_fp)
else:
st = pkgutil.get_data(__package__, 'blank.xml')
self._ElementTree=etree.parse(BytesIO(st))
if gbxsd_fp:
self._ElementTree_gbxsd=etree.parse(gbxml_fp)
else:
st = pkgutil.get_data(__package__, 'GreenBuildingXML_Ver6.01.xsd')
self._ElementTree_gbxsd=etree.parse(BytesIO(st))
self.ns={'gbxml':'http://www.gbxml.org/schema'}
# general query methods
def get_ids(self, tag=None):
"""Returns the id attributes of elements
:param tag: an element tag to filter on
:type tag: str, optional
:return: a list of element ids
:rtype: list
"""
if tag is None: tag='*'
element=self._ElementTree.getroot()
return xml_functions.get_ids(element,tag)
def get_xmlstring(self,id=None):
"""Returns a string of an xml element
:param id: an element id to filter on
:type id: str, optional
:return: a string of xml contents
:rtype: str
"""
element=self._ElementTree.getroot()
if not id is None:
st='//gbxml:*[@id="%s"]' % id
element=element.xpath(st,namespaces=self.ns)[0]
return xml_functions.get_xmlstring(element)
def get_attributes(self,id):
"""Returns the attributes of an element
:param id: an element id
:type id: str
:return: the attributes of the element
:rtype: dict
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_attributes(element)
def get_child_tags(self,id):
"""Returns the child tags of an element
:param id: an element id
:type id: str
:return: a list of the tags of the child elements
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tags(element)
def get_child_tag_text(self,id,child_tag):
"""Returns the text of child elements
:param id: an element id
:type id: str
:param child_tag: a tag of a child element
:type child_tag: str
:return: a list of the text of child elements with the child_tag tag
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_text(element,child_tag)
def get_child_tag_attributes(self,id,child_tag):
"""Returns the attributes of child elements
:param id: an element id
:type id: str
:param child_tag: a tag of a child element
:type child_tag: str
:return: a list of the attributes of each child element with the child_tag tag
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_attributes(element,child_tag)
def get_children_list(self,id):
"""Returns a list of dicts representing each child element
:param id: an element id
:type id: str
:return: a list of dicts {'tag':(str),'text':(str),'attributes':(dict)}
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_children_list(element)
# campus query methods
def get_campus_location_tags(self,id):
"""Returns the child tags of the Location element of a campus
:param id: a Campus element id
:type id: str
:return: a list of the tags of the Location element
:rtype: list
"""
st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tags(element)
def get_campus_location_tag_text(self,id,child_tag):
"""Returns the text of Location child elements of a campus
:param id: a Campus element id
:type id: str
:param child_tag: a tag of a child element of the Location element
:type child_tag: str
:return: a list of the text of child elements of the Location element
with the child_tag tag
:rtype: list
"""
st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_text(element,child_tag)
# building query methods
def get_building_space_ids(self,id):
"""Returns the ids of all spaces in a building
:param id: a Building element id
:type id: str
:return: a list of Space ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get space ids
return building_functions.get_space_ids(element)
def get_building_surface_ids(self,id):
"""Returns the ids of all surfaces in a building
:param id: a Building element id
:type id: str
:return: a list of Surface ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get surface ids
return building_functions.get_surface_ids(element)
# space query methods
def get_space_surface_ids(self,id):
"""Returns the ids of all surfaces adjacent to a space
:param id: a Space element id
:type id: str
:return: a list of surface ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Building/gbxml:Space[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get surface ids
return space_functions.get_surface_ids(element)
# construction query methods
def get_construction_layer_ids(self,id):
"""Returns the layer ids of a construction
:param id: a Construction element id
:type id: str
:return: a list of layer ids
:rtype: list
"""
# get element from id
st='./gbxml:Construction[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get layer ids
return construction_functions.get_layer_ids(element)
def get_construction_material_ids(self,id):
"""Returns the material ids of a construction
:param id: a Construction element id
:type id: str
:return: a list of material ids
:rtype: list
"""
# get element from id
st='./gbxml:Construction[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get material ids
return construction_functions.get_material_ids(element)
# layer query methods
def get_layer_material_ids(self,id):
"""Returns the material ids of a construction
:param id: a Layer element id
:type id: str
:return: a list of material ids
:rtype: list
"""
# get element from id
st='./gbxml:Layer[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get layer ids
return layer_functions.get_material_ids(element)
# surface query methods
def get_surface_inner_space_id(self,id):
"""Returns the inner space id of a surface
:param id: a Surface element id
:type id: str
:return: the inner Space id
:rtype: str or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get inner space id
return surface_functions.get_inner_space_id(element)
def get_surface_outer_space_id(self,id):
"""Returns the outer space id of a surface
:param id: a Surface element id
:type id: str
:return: the outer Space id
:rtype: str or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get outer space id
return surface_functions.get_outer_space_id(element)
def get_surface_azimuth(self,id):
"""Returns the azimuth of a surface
:param id: a Surface element id
:type id: str
:return: the azimuth value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get azimuth
return surface_functions.get_azimuth(element)
def get_surface_tilt(self,id):
"""Returns the tilt of a surface
:param id: a Surface element id
:type id: str
:return: the tilt value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get tilt
return surface_functions.get_tilt(element)
def get_surface_coordinates(self,id):
"""Returns the coordinates of a surface
:param id: a Surface element id
:type id: str
:return: a list of coordinate tuples (x,y,z)
:rtype: list (of tuples)
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get coordinates
return surface_functions.get_coordinates(element)
def get_surface_area(self,id):
"""Returns the area of a surface
This is calculated using the surface coordiantes and includes the
area of any openings.
:param id: a Surface element id
:type id: str
:return: the area value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get area
return surface_functions.get_area(element)
def get_surface_opening_ids(self,id):
"""Returns the opening ids of a surface
:param id: a Surface element id
:type id: str
:return: a list of Opening ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get opening ids
return surface_functions.get_opening_ids(element)
# opening query methods
def get_opening_surface_id(self,id):
"""Returns the parent surface id of an opening
:param id: a Opening element id
:type id: str
:return: a Surface id
:rtype: str
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get surface id
return opening_functions.get_surface_id(element)
def get_opening_coordinates(self,id):
"""Returns the coordinates of an opening
:param id: a Opening element id
:type id: str
:return: a list of coordinate tuples (x,y,z)
:rtype: list (of tuples)
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get coordinates
return opening_functions.get_coordinates(element)
def get_opening_area(self,id):
"""Returns the area of an opening
:param id: a Opening element id
:type id: str
:return: the area value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get area
return opening_functions.get_area(element)
# zone query methods
def get_zone_space_ids(self,id):
"""Returns the ids of all spaces in a zone
:param id: a Zone element id
:type id: str
:return: a list of Space ids
:rtype: list
"""
# get element from id
st='./gbxml:Zone[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get space ids
return zone_functions.get_space_ids(element)
## OUTPUT
#
#
# def __xmlstring(self,element=None):
# """Returns a string of an xml element
#
# Arguments:
# - element (lxml.etree._Element): default is root node
#
# """
# if element is None: element=self.root()
# return etree.tostring(element,pretty_print=True).decode()
#
#
# def xpath(self,element,st_xpath):
# """Returns the result of an xpath operation on the gbXML file
#
# Arguments
# - st_xpath (str): the xpath string
# - element (lxml.etree._Element): the element for the xpath operation. The
# default is the root element
#
# """
# return element.xpath(st_xpath,namespaces=self.ns)
#
#
# def write(self,fp):
# """Writes the gbXML file to disc
#
# Arguments:
# fp (str): the filepath
# """
# st=etree.tostring(self.root(),xml_declaration=True)
# with open(fp,'wb') as f:
# f.write(st)
#
## VALIDATION
#
# def validate(self):
# """Validates the gbXMl file using the schema
#
# Returns True if the gbXML file is valid, otherwise False
#
# """
# xmlschema = etree.XMLSchema(self.gbxsd._ElementTree)
# result=xmlschema.validate(self._ElementTree)
# return result
#
## EDITING
#
# def add_element(self,parent_element,label,text=None,**kwargs):
# """Adds an element to the gbXML
#
# Returns the newly created element
#
# Arguments:
# - parent_element (lxml._Element or str): the parent element that the
# new element is added to. This can be either a lxml._Element object
# or a string with the element id.
# - label (str): the label or tag of the new element
# - text (str): the text of the new element
# - **kwargs (keywords): the attributes of the new element
#
# """
# if isinstance(parent_element,str):
# parent_element=self.element(parent_element)
# e=etree.SubElement(parent_element,'{%s}%s' % (self.ns['gbxml'],label))
# if text: e.text=text
# if kwargs:
# for k,v in kwargs.items():
# e.set(k,v)
# return e
#
# def set_attribute(self,element,key,value):
# """Sets the attribute of an element
#
# Returns the modified element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - key (str): the name of the attribute
# - value (str): the value of the attribute
#
# """
# if isinstance(element,str):
# element=self.element(element)
# element.set(key,value)
# return element
#
#
# def set_element_id(self,element,new_id):
# """Sets a new id attribute for an element and updates all links
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - new_id (str):
#
# Return value:
# - new_id (str)
#
# """
# #check if new_id already exists
# l=self.elements()
# ids=[x.get('id') for x in l if x.get('id')]
# if new_id in ids:
# raise ValueError('new_id %s already exists' % new_id)
#
# #get element
# if isinstance(element,str):
# element=self.element(element)
#
# #get old id
# old_id=element.get('id')
#
# #set new id
# element.set('id',new_id)
#
# #find all elements with attribute labelRefId=old_id
# label=self.label(element)
# prefix=label[0].lower()+label[1:]
# st='.//gbxml:*[@%sIdRef="%s"]' % (prefix,old_id)
# l=self.xpath(self.root(),st)
#
# #update with id
# for e in l:
# e.set('%sIdRef' % prefix,new_id)
# #return new id
# return new_id
#
#
# def set_text(self,element,text):
# """Sets the text of an element
#
# Returns the modified element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - text (str): the text
#
# """
# if isinstance(element,str):
# element=self.element(element)
# element.text=text
# return element
#
#
# def remove_element(self,element):
# """Removes an element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# """
# if isinstance(element,str):
# element=self.element(element)
#
# #remove links to element
# id=element.get('id')
# label=self.label(element)
# prefix=label[0].lower()+label[1:]
# st='.//gbxml:*[@%sIdRef="%s"]' % (prefix,id)
# l=self.xpath(self.root(),st)
# for x in l:
# self.remove_attribute(x,'%sIdRef' % prefix)
#
# #remove element
# parent=element.getparent()
# parent.remove(element)
#
#
# def remove_attribute(self,element,key):
# """Removes an element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - key (str): The name of the attribute to delete
#
# """
# if isinstance(element,str):
# element=self.element(element)
# element.attrib.pop(key)
#
#
# def remove_text(self,element):
# pass
#
#
#
## QUERYING
#
# def elements(self,label='*'):
# """Returns the elements of the gbXML file
#
# Arguments:
# - label (str): the label of the elements
#
# """
# st='//gbxml:%s' % label
# return self.xpath(self.root(),st)
#
#
# def root(self):
# "Returns the root element"
# return self._ElementTree.getroot()
#
#
# def element(self,id,label='*'):
# """Returns an element from the gbXML file
#
# Arguments:
# - id (str): the id of the element
# - label (str): the label of the element
#
# """
# st='//gbxml:%s[@id="%s"]' % (label,id)
# try:
# return self.xpath(self.root(),st)[0]
# except IndexError:
# raise KeyError('there is no element with an id of %s' % id)
#
#
# def label(self,element):
# """Returns the label of an element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# if isinstance(element,str):
# element=self.element(element)
# return element.tag.split('}')[1]
#
#
# def attributes(self,element):
# """Returns the attributes of an element
#
# Return value is a dictionary
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# if isinstance(element,str):
# element=self.element(element)
# return dict(element.attrib)
#
#
# def text(self,element):
# """Returns the text of an element, or None
#
# Return value is a string
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# if isinstance(element,str):
# element=self.element(element)
# return element.text
#
#
# def text_value(self,element):
# """Returns the text value of an element, i.e the text converted
# according to its schema data type
#
# Return value is an object with data type dependent on the schema
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
#
# #JUST RETURNS STRINGS AT PRESENT - TO DO
#
# if isinstance(element,str):
# element=self.element(element)
# text=element.text
# return text
#
#
# def child_elements(self,element,label='*'):
# """Returns the child elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
# if isinstance(element,str):
# element=self.element(element)
# st='./gbxml:%s' % label
# return self.xpath(element,st)
#
#
# def descendent_elements(self,element,label='*'):
# """Returns the descendent elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
# if isinstance(element,str):
# element=self.element(element)
# st='.//gbxml:%s' % label
# return self.xpath(element,st)
#
#
## CONSTRUCTION FUNCTIONS
#
# def construction_layers(self,construction_element):
# "Returns the layer elements of a construction"
# if isinstance(construction_element,str):
# construction_element=self.element(construction_element,label='Construction')
# layerId_elements=self.child_elements(construction_element,'LayerId')
# layer_elements=[self.element(layerId_element.get('layerIdRef'),'Layer')
# for layerId_element in layerId_elements]
# return layer_elements
#
# def construction_materials(self,construction_element):
# "Returns the layer elements of a construction"
# if isinstance(construction_element,str):
# construction_element=self.element(construction_element,label='Construction')
# layer_elements=self.construction_layers(construction_element)
# material_elements=[]
# for layer_element in layer_elements:
# material_elements+=self.layer_materials(layer_element)
# return material_elements
#
#
## LAYER FUNCTIONS
#
# def layer_materials(self,layer_element):
# "Returns the layer elements of a construction"
# if isinstance(layer_element,str):
# layer_element=self.element(layer_element,label='Layer')
# materialId_elements=self.child_elements(layer_element,'MaterialId')
# material_elements=[self.element(materialId_element.get('materialIdRef'),'Material')
# for materialId_element in materialId_elements]
# return material_elements
#
#
#
## OPENING FUNCTIONS
#
# def opening_coordinates(self,opening_element):
# """Returns a list of coordinate tuples
#
# Arguments:
# - opening_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# if isinstance(opening_element,str):
# opening_element=self.element(opening_element,label='Opening')
# l=[]
# st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint'
# cartesian_points=self.xpath(opening_element,st)
# for cartesian_point in cartesian_points:
# st='./gbxml:Coordinate'
# coordinates=self.xpath(cartesian_point,st)
# t=(float(self.text_value(coordinates[0])),
# float(self.text_value(coordinates[1])),
# float(self.text_value(coordinates[2])))
# l.append(t)
# return l
#
## SURFACE FUNCTIONS
#
# def surface_azimuth(self,surface_element):
# """Returns the azimuth of a surface
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - azimuth (float) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# l=self.xpath(surface_element,'./gbxml:RectangularGeometry/gbxml:Azimuth')
# if len(l)>0:
# azimuth=l[0]
# return float(self.text_value(azimuth))
#
#
# def surface_coordinates(self,surface_element):
# """Returns a list of coordinate tuples
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# l=[]
# st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint'
# cartesian_points=self.xpath(surface_element,st)
# for cartesian_point in cartesian_points:
# st='./gbxml:Coordinate'
# coordinates=self.xpath(cartesian_point,st)
# t=(float(self.text_value(coordinates[0])),
# float(self.text_value(coordinates[1])),
# float(self.text_value(coordinates[2])))
# l.append(t)
# return l
#
#
# def surface_inner_space(self,surface_element):
# """Returns the inner Space element of a Surface, or None
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - space (lxml._Element) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# adjacentSpaceIds=self.child_elements(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>0:
# adjacentSpaceId=adjacentSpaceIds[0]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return self.element(spaceIdRef)
#
#
# def surface_outer_space(self,surface_element):
# """Returns the outer Space element of a Surface, or None
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - space (lxml._Element) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# adjacentSpaceIds=self.child_elements(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>1:
# adjacentSpaceId=adjacentSpaceIds[1]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return self.element(spaceIdRef)
#
#
# def surface_tilt(self,surface_element):
# """Returns the tilt of a surface
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - tilt (float) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# l=self.xpath(surface_element,'./gbxml:RectangularGeometry/gbxml:Tilt')
# if len(l)>0:
# tilt=l[0]
# return float(self.text_value(tilt))
#
# def surface_construction(self,surface_element):
# "Returns the construction element of a surface"
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# construction_id=surface_element.get('constructionIdRef')
# construction_element=self.element(construction_id,'Construction')
# return construction_element
#
# def surface_layers(self,surface_element):
# "Returns the layer elements of a surface"
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# construction_element=self.surface_construction(surface_element)
# layer_elements=self.construction_layers(construction_element)
# return layer_elements
#
# def surface_materials(self,surface_element):
# "Returns the layer elements of a surface"
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# construction_element=self.surface_construction(surface_element)
# material_elements=self.construction_materials(construction_element)
# return material_elements
#
#
#
#
#
#
#
#
#
#
### SPACE FUNCTIONS
##
## def set_space_id(self,space_element,id):
## """Sets a new id attribute for a Space element and updates all links
##
##
## """
## if isinstance(space_element,str):
## space_element=self.element(space_element)
## #get old id
## old_id=space_element.get('id')
## #set new id
## space_element.set('id',id)
## #find all elements with attribute spaceRefId=old_id
## st='.//gbxml:*[@spaceIdRef="%s"]' % old_id
## l=self.xpath(self.root(),st)
## #update with id
## for e in l:
## e.set('spaceIdRef',id)
## #return new id
## return id
#
#
## WINDOWTYPE FUNCTIONS
#
# def windowType_materials(self,windowType_element):
# """Returns the Glaze and Gap elements of a windowType in order
#
# Arguments:
# - windowType_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - glaze_and_gap_elements (list)
#
# """
# l=[]
# if isinstance(windowType_element,str):
# windowType_element=self.element(windowType_element,label='WindowType')
# l=self.child_elements(windowType_element)
# return [x for x in l if self.label(x) in ['Glaze','Gap']]
#
#
## ZONE FUNCTIONS
#
# def add_zone(self,zone_id,space_ids):
# """Adds a zone element and the IdRef links to it.
#
# Arguments:
# - zone_id (str): the id of the new zone
# - space_ids (str or list): the ids of the spaces that link to the zone
# """
# #adds element
# parent=self.root()
# e=self.add_element(parent,'Zone')
# self.set_attribute(e,'id',zone_id)
# #adds links
# if isinstance(space_ids,str):
# space_ids=[space_ids]
# for space_id in space_ids:
# space=self.element(space_id,'Space')
# self.set_attribute(space,'zoneIdRef',zone_id)
# #returns the new zone element
# return e
#
#
# def remove_zone(self,zone_element):
# """Removes a Zone element and all IdRef links to the zone.
#
# Arguments:
# - zone_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# #find id
# if isinstance(zone_element,str):
# id=zone_element
# else:
# id=zone_element.get('id')
# #find all elements with attribute zoneRefId=id
# st='.//gbxml:*[@zoneIdRef="%s"]' % id
# l=self.xpath(self.root(),st)
# #removes all attributes zoneRefId=id
# for x in l:
# self.remove_attribute(x,'zoneIdRef')
# #remove node
# self.remove_element(zone_element)
#
#
#
#
# # LAYERS
#
#
#
## OUTPUT
#
#def xpath(element,st_xpath):
# """Returns the result of an xpath operation on the gbXML file
#
# Arguments
# - st_xpath (str): the xpath string
# - element (lxml.etree._Element): the element for the xpath operation. The
# default is the root element
#
# """
# return element.xpath(st_xpath,namespaces=ns)
#
## QUERYING
#
#def get_child(element,id=None,label='*'):
# """Returns the child of an element
#
# Arguments:
# - id (str): the id of the element
# - label (str): the label of the element
#
# """
# if id is None:
# return get_children(element,label)[0]
# else:
# st='./gbxml:%s[@id="%s"]' % (label,id)
# return xpath(element,st)[0]
#
#
#def get_child_text(element,label='*',dtype=None):
# "Returns the first child text value, or None"
# children=get_children(element,label)
# if children:
# if dtype is None:
# return children[0].text
# else:
# return dtype(children[0].text)
# else:
# return None
#
#def get_children(element,label='*'):
# """Returns the child elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
# st='./gbxml:%s' % label
# return xpath(element,st)
#
#def get_descendents(element,label='*'):
# """Returns the descendent elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element): This a lxml._Element object
# - label (str): the label of the element
# """
# st='.//gbxml:%s' % label
# return xpath(element,st)
#
#def get_element(element,id,label='*'):
# """Returns an element from the gbXML file
# """
# st='//gbxml:%s[@id="%s"]' % (label,id)
# return xpath(element.getroottree(),st)[0]
#
#
## CONSTRUCTION FUNCTIONS
#
#def construction_layers(construction_element):
# "Returns the layer elements of a construction"
# layerId_elements=get_children(construction_element,'LayerId')
# layer_elements=[get_layer(layerId_element,
# layerId_element.get('layerIdRef'))
# for layerId_element in layerId_elements]
# return layer_elements
#
#def construction_materials(construction_element):
# "Returns the layer elements of a construction"
# layer_elements=construction_layers(construction_element)
# material_elements=[]
# for layer_element in layer_elements:
# material_elements+=layer_materials(layer_element)
# return material_elements
#
#
## LAYER FUNCTIONS
#
#def get_layer(element,id):
# root=element.getroottree()
# result=xpath(root,'./gbxml:Layer[@id="%s"]' % id)
# return result[0]
#
#def layer_materials(layer_element):
# "Returns the layer elements of a construction"
# materialId_elements=get_children(layer_element,'MaterialId')
# material_elements=[get_element(materialId_element,
# materialId_element.get('materialIdRef'),
# 'Material')
# for materialId_element in materialId_elements]
# return material_elements
#
## MATERIAL FUNCTIONS
#
#def get_material(element,id):
# root=element.getroottree()
# result=xpath(root,'./gbxml:Material[@id="%s"]' % id)
# return result[0]
#
#
## SURFACE FUNCTION
#
#def get_surface_coordinates(surface_element):
# """Returns a list of coordinate tuples
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# l=[]
# st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint'
# cartesian_points=xpath(surface_element,st)
# for cartesian_point in cartesian_points:
# st='./gbxml:Coordinate'
# coordinates=xpath(cartesian_point,st)
# t=(float(coordinates[0].text),
# float(coordinates[1].text),
# float(coordinates[2].text))
# l.append(t)
# return l
#
#def get_surface_inner_space(surface_element):
# """Returns the inner Space element of a Surface, or None
# """
# adjacentSpaceIds=get_children(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>0:
# adjacentSpaceId=adjacentSpaceIds[0]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return get_element(surface_element,spaceIdRef)
#
#def get_surface_outer_space(surface_element):
# """Returns the outer Space element of a Surface, or None
# """
# adjacentSpaceIds=get_children(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>1:
# adjacentSpaceId=adjacentSpaceIds[1]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return get_element(surface_element,spaceIdRef)
#
#
#
#
#
#
#
#
## def child_node_text(self,id,label='*'):
## """Returns a dictionary listing any child nodes which have text
##
## Return values is {tag:text}
##
## """
## e=self._element(id,label)
## d={}
## for e1 in e:
## if e1.text:
## label=e1.tag.split('}')[1]
## d[label]=e1.text
## return d
##
##
## def child_node_values(self,id,label='*'):
## """Returns a dictionary listing any child nodes which have text
##
## Node text values are converted from strings into their datatype
## i.e. the text from an 'Area' node is converted into a float
##
## Return values is {label:value}
##
## """
## d=self.xml.child_node_text(id=id,label=label)
## d1={}
## for k,v in d.items():
## xml_type=self.xsd.element_type(k)
## #print(xml_type)
## if xml_type=='xsd:string':
## value=v
## elif xml_type=='xsd:decimal':
## value=float(v)
## else:
## raise Exception(xml_type)
## d1[k]=value
## return d1
##
##
##
## def node_attributes(self,id,label='*'):
## "Returns the attribute dict of node with id 'id'"
## e=self._element(id,label)
## return dict(e.attrib)
##
##
## def node_ids(self,label='*'):
## """Returns the ids of all nodes
##
## Arguments:
## label (str): the node tag to filter on
##
## """
## #filter by label
## st='//a:%s' % (label)
## l=self._ElementTree.getroot().xpath(st,namespaces=self.ns)
## return [x.get('id') for x in l]
##
##
## def parent_object(self,id,label='*'):
## """Returns the parent of an element
##
## Return value is a dictionary {'id':value,'label':value}
##
## """
## e=self._element(id,label)
## parent=e.getparent()
## return {'id':self._id(parent),
## 'label':self._label(parent)}
##
##
##
##
##
## def surface_adjacent_objects(self,id):
## """Returns the objects adjacent to the surface
##
## Return value is a 2 item list of dictionaries [{'id':value,'label':value}]
##
## """
## label='Surface'
## e=self._element(id,label)
## st='./a:AdjacentSpaceId/@spaceIdRef'
## l=e.xpath(st,namespaces=self.ns)
## l=l+[None]*(2-len(l))
## surfaceType=e.get('surfaceType')
## d=\
## {'InteriorWall':None,
## 'ExteriorWall':{'id':'Climate1','label':'Climate'},
## 'Roof':{'id':'Climate1','label':'Climate'},
## 'InteriorFloor':None,
## 'ExposedFloor':{'id':'Climate1','label':'Climate'},
## 'Shade':{'id':'Climate1','label':'Climate'},
## 'UndergroundWall':{'id':'Ground1','label':'Ground'},
## 'UndergroundSlab':{'id':'Ground1','label':'Ground'},
## 'Ceiling':None,
## 'Air':None,
## 'UndergroundCeiling':{'id':'Ground1','label':'Ground'},
## 'RaisedFloor':{'id':'Climate1','label':'Climate'},
## 'SlabOnGrade':{'id':'Ground1','label':'Ground'},
## 'FreestandingColumn':None,
## 'EmbeddedColumn':None
## }
## l1=[]
## for x in l:
## if not x is None:
## l1.append({'id':x,'label':'Space'})
## else:
## l1.append(d[surfaceType])
## return l1
##
##
## def surface_building_ids(self,id):
## """Returns a list of building ids that the surface belongs to
## """
## l=self.surface_adjacent_objects(id)
## l=[self.parent_object(x['id'])['id'] for x in l if x['label']=='Space']
## return l
##
##
##
#
## def elements(xml, tag='*'):
## """Returns a list of lxml elements, filtered by tag
##
## Arguments:
## xml (lxml.etree._ElementTree): the gbXML instance
## tag (str): the tag name, not including the namespace
##
## """
## st='//a:%s' % (tag)
## #print(st)
## return xml.getroot().xpath(st,namespaces=ns)
#
#
| 31.652825 | 93 | 0.549486 |
from lxml import etree
import pkgutil
from io import BytesIO
from . import xml_functions, construction_functions, layer_functions
from . import surface_functions, space_functions, building_functions
from . import opening_functions, zone_functions
class Gbxml():
def __init__(self,
gbxml_fp=None,
gbxsd_fp=None):
if gbxml_fp:
self._ElementTree=etree.parse(gbxml_fp)
else:
st = pkgutil.get_data(__package__, 'blank.xml')
self._ElementTree=etree.parse(BytesIO(st))
if gbxsd_fp:
self._ElementTree_gbxsd=etree.parse(gbxml_fp)
else:
st = pkgutil.get_data(__package__, 'GreenBuildingXML_Ver6.01.xsd')
self._ElementTree_gbxsd=etree.parse(BytesIO(st))
self.ns={'gbxml':'http://www.gbxml.org/schema'}
def get_ids(self, tag=None):
if tag is None: tag='*'
element=self._ElementTree.getroot()
return xml_functions.get_ids(element,tag)
def get_xmlstring(self,id=None):
element=self._ElementTree.getroot()
if not id is None:
st='//gbxml:*[@id="%s"]' % id
element=element.xpath(st,namespaces=self.ns)[0]
return xml_functions.get_xmlstring(element)
def get_attributes(self,id):
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_attributes(element)
def get_child_tags(self,id):
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tags(element)
def get_child_tag_text(self,id,child_tag):
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_text(element,child_tag)
def get_child_tag_attributes(self,id,child_tag):
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_attributes(element,child_tag)
def get_children_list(self,id):
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_children_list(element)
def get_campus_location_tags(self,id):
st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tags(element)
def get_campus_location_tag_text(self,id,child_tag):
st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_text(element,child_tag)
def get_building_space_ids(self,id):
st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return building_functions.get_space_ids(element)
def get_building_surface_ids(self,id):
st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return building_functions.get_surface_ids(element)
def get_space_surface_ids(self,id):
st='./gbxml:Campus/gbxml:Building/gbxml:Space[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return space_functions.get_surface_ids(element)
def get_construction_layer_ids(self,id):
st='./gbxml:Construction[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return construction_functions.get_layer_ids(element)
def get_construction_material_ids(self,id):
st='./gbxml:Construction[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return construction_functions.get_material_ids(element)
def get_layer_material_ids(self,id):
st='./gbxml:Layer[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return layer_functions.get_material_ids(element)
def get_surface_inner_space_id(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_inner_space_id(element)
def get_surface_outer_space_id(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_outer_space_id(element)
def get_surface_azimuth(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_azimuth(element)
def get_surface_tilt(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_tilt(element)
def get_surface_coordinates(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_coordinates(element)
def get_surface_area(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_area(element)
def get_surface_opening_ids(self,id):
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return surface_functions.get_opening_ids(element)
def get_opening_surface_id(self,id):
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return opening_functions.get_surface_id(element)
def get_opening_coordinates(self,id):
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return opening_functions.get_coordinates(element)
def get_opening_area(self,id):
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return opening_functions.get_area(element)
def get_zone_space_ids(self,id):
st='./gbxml:Zone[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return zone_functions.get_space_ids(element)
# Arguments:
# - element (lxml.etree._Element): default is root node
#
# """
#
# Arguments
# - st_xpath (str): the xpath string
# - element (lxml.etree._Element): the element for the xpath operation. The
# default is the root element
#
# """
#
# Arguments:
# fp (str): the filepath
# """
# Returns True if the gbXML file is valid, otherwise False
#
# """
# Returns the newly created element
#
# Arguments:
# - parent_element (lxml._Element or str): the parent element that the
# new element is added to. This can be either a lxml._Element object
# or a string with the element id.
# - label (str): the label or tag of the new element
# - text (str): the text of the new element
# - **kwargs (keywords): the attributes of the new element
#
# """
#
# Returns the modified element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - key (str): the name of the attribute
# - value (str): the value of the attribute
#
# """
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - new_id (str):
#
# Return value:
# - new_id (str)
#
# """
or str): This a lxml._Element object
# or a string with the element id.
# - text (str): the text
#
# """
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# """
ments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - key (str): The name of the attribute to delete
#
# """
# Arguments:
# - label (str): the label of the elements
#
# """
#
# Arguments:
# - id (str): the id of the element
# - label (str): the label of the element
#
# """
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
#
# Return value is a dictionary
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
#
# Return value is a string
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# according to its schema data type
#
# Return value is an object with data type dependent on the schema
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
e is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
guments:
# - opening_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - azimuth (float) or None
#
# """
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - space (lxml._Element) or None
#
# """
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - space (lxml._Element) or None
#
# """
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - tilt (float) or None
#
# """
# Arguments:
# - id (str): the id of the element
# - label (str): the label of the element
#
# """
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element): This a lxml._Element object
# - label (str): the label of the element
# """
# """
face_element (lxml._Element or str): This a lxml._Element object
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# """
# """
| true | true |
f70f71544831b4d1ffff7c6948b00d3bdd751afe | 38,151 | py | Python | google/net/proto2/python/internal/python_message.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | google/net/proto2/python/internal/python_message.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | google/net/proto2/python/internal/python_message.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import sys
import six
from six.moves import range
if sys.version_info[0] < 3:
try:
from io import StringIO as BytesIO
except ImportError:
from io import StringIO as BytesIO
import six.moves.copyreg as copyreg
else:
from io import BytesIO
import copyreg
import struct
import weakref
from google.net.proto2.python.internal import containers
from google.net.proto2.python.internal import decoder
from google.net.proto2.python.internal import encoder
from google.net.proto2.python.internal import enum_type_wrapper
from google.net.proto2.python.internal import message_listener as message_listener_mod
from google.net.proto2.python.internal import type_checkers
from google.net.proto2.python.internal import wire_format
from google.net.proto2.python.public import descriptor as descriptor_mod
from google.net.proto2.python.public import message as message_mod
from google.net.proto2.python.public import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(bases, descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
return bases
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number), None)
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(descriptor, cls)
copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__',
'_oneofs']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor),
field_descriptor if field_descriptor.containing_oneof is not None
else None)
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in six.iteritems(extension_dict):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added."""
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
six.reraise(type(exc), exc, sys.exc_info()[2])
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
self._oneofs = {}
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in six.iteritems(kwargs):
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
for val in field_value:
copy.add().MergeFrom(val)
else:
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
try:
copy.MergeFrom(field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
self._fields[field] = copy
else:
try:
setattr(self, field_name, field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
def getter(self):
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def field_setter(self, new_value):
self._fields[field] = type_checker.CheckValue(new_value)
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof is not None:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = message_type._concrete_class()
field_value._SetListener(
_OneofListener(self, field)
if field.containing_oneof is not None
else self._listener_for_children)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in six.iteritems(extension_dict):
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle
if _IsMessageSetExtension(handle):
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in six.iteritems(self._fields) if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
for field in message_descriptor.oneofs:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def Clear(self):
self._fields = {}
self._unknown_fields = ()
self._oneofs = {}
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if not self.ListFields() == other.ListFields():
return False
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
"""Helper for _AddMessageMethods()."""
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = BytesIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None))
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append((tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
if field_desc:
self._UpdateOneofState(field_desc)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()):
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = []
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in six.iteritems(msg._fields):
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
"""Returns the name of the currently set field inside a oneof, or None."""
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
self._parent_message_weakref._Modified()
except ReferenceError:
pass
class _OneofListener(_Listener):
"""Special listener implementation for setting composite oneof fields."""
def __init__(self, parent_message, field):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
field: The descriptor of the field being set in the parent message.
"""
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
return extension_handle.default_value
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
type_checker = type_checkers.GetTypeChecker(
extension_handle)
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
| 30.182753 | 86 | 0.720925 |
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import sys
import six
from six.moves import range
if sys.version_info[0] < 3:
try:
from io import StringIO as BytesIO
except ImportError:
from io import StringIO as BytesIO
import six.moves.copyreg as copyreg
else:
from io import BytesIO
import copyreg
import struct
import weakref
from google.net.proto2.python.internal import containers
from google.net.proto2.python.internal import decoder
from google.net.proto2.python.internal import encoder
from google.net.proto2.python.internal import enum_type_wrapper
from google.net.proto2.python.internal import message_listener as message_listener_mod
from google.net.proto2.python.internal import type_checkers
from google.net.proto2.python.internal import wire_format
from google.net.proto2.python.public import descriptor as descriptor_mod
from google.net.proto2.python.public import message as message_mod
from google.net.proto2.python.public import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(bases, descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
return bases
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number), None)
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(descriptor, cls)
copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _PropertyName(proto_field_name):
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__',
'_oneofs']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor),
field_descriptor if field_descriptor.containing_oneof is not None
else None)
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in six.iteritems(extension_dict):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
def _ReraiseTypeErrorWithFieldName(message_name, field_name):
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
six.reraise(type(exc), exc, sys.exc_info()[2])
def _AddInitMethod(message_descriptor, cls):
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
self._oneofs = {}
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in six.iteritems(kwargs):
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
for val in field_value:
copy.add().MergeFrom(val)
else:
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
try:
copy.MergeFrom(field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
self._fields[field] = copy
else:
try:
setattr(self, field_name, field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
def getter(self):
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def field_setter(self, new_value):
self._fields[field] = type_checker.CheckValue(new_value)
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof is not None:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = message_type._concrete_class()
field_value._SetListener(
_OneofListener(self, field)
if field.containing_oneof is not None
else self._listener_for_children)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in six.iteritems(extension_dict):
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle
if _IsMessageSetExtension(handle):
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
def ListFields(self):
all_fields = [item for item in six.iteritems(self._fields) if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
for field in message_descriptor.oneofs:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
def Clear(self):
self._fields = {}
self._unknown_fields = ()
self._oneofs = {}
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if not self.ListFields() == other.ListFields():
return False
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
def SerializeToString(self):
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
def SerializePartialToString(self):
out = BytesIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None))
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append((tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
if field_desc:
self._UpdateOneofState(field_desc)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()):
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
errors = []
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in six.iteritems(msg._fields):
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _AddMessageMethods(message_descriptor, cls):
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
def _AddPrivateHelperMethods(message_descriptor, cls):
def Modified(self):
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
class _Listener(object):
def __init__(self, parent_message):
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
self._parent_message_weakref._Modified()
except ReferenceError:
pass
class _OneofListener(_Listener):
def __init__(self, parent_message, field):
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
class _ExtensionDict(object):
def __init__(self, extended_message):
self._extended_message = extended_message
def __getitem__(self, extension_handle):
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
return extension_handle.default_value
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __setitem__(self, extension_handle, value):
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
type_checker = type_checkers.GetTypeChecker(
extension_handle)
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def _FindExtensionByName(self, name):
return self._extended_message._extensions_by_name.get(name, None)
| true | true |
f70f7266246a72a1f47bb872e52660dc84524048 | 167 | py | Python | REL/ner/__init__.py | theblackcat102/REL | 9daaf924d3b7ee75ba0738fd218ddbaeab989bd8 | [
"MIT"
] | 210 | 2020-02-27T14:10:57.000Z | 2022-03-30T01:32:52.000Z | REL/ner/__init__.py | theblackcat102/REL | 9daaf924d3b7ee75ba0738fd218ddbaeab989bd8 | [
"MIT"
] | 69 | 2020-03-06T09:58:43.000Z | 2022-03-31T16:24:35.000Z | REL/ner/__init__.py | cnnlabs/REL | 7e680a13fb26cb23d9ba9ea45efd01cb4c6c7871 | [
"MIT"
] | 57 | 2020-02-28T15:52:33.000Z | 2022-03-16T11:28:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from REL.ner.base import NERBase, Span
from REL.ner.flair_wrapper import load_flair_ner
from REL.ner.ngram import Cmns
| 23.857143 | 48 | 0.748503 |
from REL.ner.base import NERBase, Span
from REL.ner.flair_wrapper import load_flair_ner
from REL.ner.ngram import Cmns
| true | true |
f70f729a2c7bd23c7413efdf4accb9acf8f8503f | 411 | py | Python | main.py | deadaf/tickets-bot | 1f72668a6843cb67daa77e911057775f9d9a37f1 | [
"MIT"
] | 5 | 2021-02-05T05:50:29.000Z | 2021-08-17T16:09:59.000Z | main.py | deadaf/tickets-bot | 1f72668a6843cb67daa77e911057775f9d9a37f1 | [
"MIT"
] | null | null | null | main.py | deadaf/tickets-bot | 1f72668a6843cb67daa77e911057775f9d9a37f1 | [
"MIT"
] | 3 | 2021-03-20T13:01:16.000Z | 2022-03-05T12:38:24.000Z | import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions
import json
import asyncio
bot = commands.Bot(command_prefix=".")
bot.remove_command("help")
@bot.event
async def on_ready():
print("Bot running with:")
print("Username: ", bot.user.name)
print("User ID: ", bot.user.id)
bot.load_extension('cogs.tickets')
bot.run("TOKEN")
| 19.571429 | 68 | 0.729927 | import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions
import json
import asyncio
bot = commands.Bot(command_prefix=".")
bot.remove_command("help")
@bot.event
async def on_ready():
print("Bot running with:")
print("Username: ", bot.user.name)
print("User ID: ", bot.user.id)
bot.load_extension('cogs.tickets')
bot.run("TOKEN")
| true | true |
f70f73cdbebb8824480c01d15f16886fcde78be7 | 914 | py | Python | corehq/motech/dhis2/migrations/0006_sqldhis2connection.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/motech/dhis2/migrations/0006_sqldhis2connection.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/motech/dhis2/migrations/0006_sqldhis2connection.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-01-14 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('dhis2', '0005_delete_jsonapilog'),
]
operations = [
migrations.CreateModel(
name='SQLDhis2Connection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255, unique=True)),
('server_url', models.CharField(max_length=255, null=True)),
('username', models.CharField(max_length=255)),
('password', models.CharField(max_length=255, null=True)),
('skip_cert_verify', models.BooleanField(default=False)),
],
),
]
| 31.517241 | 114 | 0.601751 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('dhis2', '0005_delete_jsonapilog'),
]
operations = [
migrations.CreateModel(
name='SQLDhis2Connection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255, unique=True)),
('server_url', models.CharField(max_length=255, null=True)),
('username', models.CharField(max_length=255)),
('password', models.CharField(max_length=255, null=True)),
('skip_cert_verify', models.BooleanField(default=False)),
],
),
]
| true | true |
f70f74b17c66f2afe1b4d538e6f2c4b8da58986d | 37,822 | py | Python | src/pip/_internal/index/package_finder.py | Shivansh-007/pip | 0c284520c6d068cb25ac89d9dbee0456c2eba23a | [
"MIT"
] | 1 | 2022-03-14T20:15:00.000Z | 2022-03-14T20:15:00.000Z | src/pip/_internal/index/package_finder.py | Shivansh-007/pip | 0c284520c6d068cb25ac89d9dbee0456c2eba23a | [
"MIT"
] | 1 | 2022-01-27T19:09:25.000Z | 2022-01-27T19:09:25.000Z | src/pip/_internal/index/package_finder.py | Shivansh-007/pip | 0c284520c6d068cb25ac89d9dbee0456c2eba23a | [
"MIT"
] | 1 | 2021-09-27T11:14:58.000Z | 2021-09-27T11:14:58.000Z | """Routines related to PyPI, indexes"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import enum
import functools
import itertools
import logging
import re
from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import LinkCollector, parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.req import InstallRequirement
from pip._internal.utils._log import getLogger
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
logger = getLogger(__name__)
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
def _check_link_requires_python(
link: Link,
version_info: Tuple[int, int, int],
ignore_requires_python: bool = False,
) -> bool:
"""
Return whether the given Python version is compatible with a link's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
"""
try:
is_compatible = check_requires_python(
link.requires_python,
version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python,
link,
)
else:
if not is_compatible:
version = ".".join(map(str, version_info))
if not ignore_requires_python:
logger.verbose(
"Link requires a different Python (%s not in: %r): %s",
version,
link.requires_python,
link,
)
return False
logger.debug(
"Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
version,
link.requires_python,
link,
)
return True
class LinkType(enum.Enum):
candidate = enum.auto()
different_project = enum.auto()
yanked = enum.auto()
format_unsupported = enum.auto()
format_invalid = enum.auto()
platform_mismatch = enum.auto()
requires_python_mismatch = enum.auto()
class LinkEvaluator:
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name: str,
canonical_name: str,
formats: FrozenSet[str],
target_python: TargetPython,
allow_yanked: bool,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
"""
Determine whether a link is a candidate for installation.
:return: A tuple (result, detail), where *result* is an enum
representing whether the evaluation found a candidate, or the reason
why one is not found. If a candidate is found, *detail* will be the
candidate's version string; if one is not found, it contains the
reason the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or "<none given>"
return (LinkType.yanked, f"yanked for reason: {reason}")
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (LinkType.format_unsupported, "not a file")
if ext not in SUPPORTED_EXTENSIONS:
return (
LinkType.format_unsupported,
f"unsupported archive format: {ext}",
)
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = f"No binaries permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if "macosx10" in link.path and ext == ".zip":
return (LinkType.format_unsupported, "macosx10 one")
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (
LinkType.format_invalid,
"invalid wheel filename",
)
if canonicalize_name(wheel.name) != self._canonical_name:
reason = f"wrong project name (not {self.project_name})"
return (LinkType.different_project, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = ", ".join(wheel.get_formatted_file_tags())
reason = (
f"none of the wheel's tags ({file_tags}) are compatible "
f"(run pip debug --verbose to show compatible tags)"
)
return (LinkType.platform_mismatch, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f"No sources permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if not version:
version = _extract_version_from_fragment(
egg_info,
self._canonical_name,
)
if not version:
reason = f"Missing project version for {self.project_name}"
return (LinkType.format_invalid, reason)
match = self._py_version_re.search(version)
if match:
version = version[: match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (
LinkType.platform_mismatch,
"Python version is incorrect",
)
supports_python = _check_link_requires_python(
link,
version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
reason = f"{version} Requires-Python {link.requires_python}"
return (LinkType.requires_python_mismatch, reason)
logger.debug("Found link %s, version: %s", link, version)
return (LinkType.candidate, version)
def filter_unallowed_hashes(
candidates: List[InstallationCandidate],
hashes: Hashes,
project_name: str,
) -> List[InstallationCandidate]:
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
"Given no hashes to check %s links for project %r: "
"discarding no candidates",
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = "discarding no candidates"
else:
discard_message = "discarding {} non-matches:\n {}".format(
len(non_matches),
"\n ".join(str(candidate.link) for candidate in non_matches),
)
logger.debug(
"Checked %s links for project %r against %s hashes "
"(%s matches, %s no digest): %s",
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message,
)
return filtered
class CandidatePreferences:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
def __init__(
self,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
) -> None:
"""
:param allow_all_prereleases: Whether to allow all pre-releases.
"""
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult:
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
"""
def __init__(
self,
candidates: List[InstallationCandidate],
applicable_candidates: List[InstallationCandidate],
best_candidate: Optional[InstallationCandidate],
) -> None:
"""
:param candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self) -> Iterable[InstallationCandidate]:
"""Iterate through all candidates."""
return iter(self._candidates)
def iter_applicable(self) -> Iterable[InstallationCandidate]:
"""Iterate through the applicable candidates."""
return iter(self._applicable_candidates)
class CandidateEvaluator:
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name: str,
target_python: Optional[TargetPython] = None,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> "CandidateEvaluator":
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name: str,
supported_tags: List[Tag],
specifier: specifiers.BaseSpecifier,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
hashes: Optional[Hashes] = None,
) -> None:
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
# Since the index of the tag in the _supported_tags list is used
# as a priority, precompute a map from tag to index/priority to be
# used in wheel.find_most_preferred_tag.
self._wheel_tag_preferences = {
tag: idx for idx, tag in enumerate(supported_tags)
}
def get_applicable_candidates(
self,
candidates: List[InstallationCandidate],
) -> List[InstallationCandidate]:
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v)
for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
# Again, converting version to str to deal with debundling.
applicable_candidates = [c for c in candidates if str(c.version) in versions]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag: BuildTag = ()
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
try:
pri = -(
wheel.find_most_preferred_tag(
valid_tags, self._wheel_tag_preferences
)
)
except ValueError:
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
if wheel.build_tag is not None:
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash,
yank_value,
binary_preference,
candidate.version,
pri,
build_tag,
)
def sort_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> Optional[InstallationCandidate]:
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> BestCandidateResult:
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder:
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector: LinkCollector,
target_python: TargetPython,
allow_yanked: bool,
use_deprecated_html5lib: bool,
format_control: Optional[FormatControl] = None,
candidate_prefs: Optional[CandidatePreferences] = None,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self._use_deprecated_html5lib = use_deprecated_html5lib
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector: LinkCollector,
selection_prefs: SelectionPreferences,
target_python: Optional[TargetPython] = None,
*,
use_deprecated_html5lib: bool,
) -> "PackageFinder":
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
use_deprecated_html5lib=use_deprecated_html5lib,
)
@property
def target_python(self) -> TargetPython:
return self._target_python
@property
def search_scope(self) -> SearchScope:
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope: SearchScope) -> None:
self._link_collector.search_scope = search_scope
@property
def find_links(self) -> List[str]:
return self._link_collector.find_links
@property
def index_urls(self) -> List[str]:
return self.search_scope.index_urls
@property
def trusted_hosts(self) -> Iterable[str]:
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self) -> bool:
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self) -> None:
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self) -> bool:
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self) -> None:
self._candidate_prefs.prefer_binary = True
def requires_python_skipped_reasons(self) -> List[str]:
reasons = {
detail
for _, result, detail in self._logged_links
if result == LinkType.requires_python_mismatch
}
return sorted(reasons)
def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links: Iterable[Link]) -> List[Link]:
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen: Set[Link] = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
entry = (link, result, detail)
if entry not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
logger.debug("Skipping link: %s: %s", detail, link)
self._logged_links.add(entry)
def get_install_candidate(
self, link_evaluator: LinkEvaluator, link: Link
) -> Optional[InstallationCandidate]:
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
result, detail = link_evaluator.evaluate_link(link)
if result != LinkType.candidate:
self._log_skipped_link(link, result, detail)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=detail,
)
def evaluate_links(
self, link_evaluator: LinkEvaluator, links: Iterable[Link]
) -> List[InstallationCandidate]:
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(
self, project_url: Link, link_evaluator: LinkEvaluator
) -> List[InstallationCandidate]:
logger.debug(
"Fetching project page and analyzing links: %s",
project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page, self._use_deprecated_html5lib))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
@functools.lru_cache(maxsize=None)
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
link_evaluator = self.make_link_evaluator(project_name)
collected_sources = self._link_collector.collect_sources(
project_name=project_name,
candidates_from_page=functools.partial(
self.process_project_url,
link_evaluator=link_evaluator,
),
)
page_candidates_it = itertools.chain.from_iterable(
source.page_candidates()
for sources in collected_sources
for source in sources
if source is not None
)
page_candidates = list(page_candidates_it)
file_links_it = itertools.chain.from_iterable(
source.file_links()
for sources in collected_sources
for source in sources
if source is not None
)
file_candidates = self.evaluate_links(
link_evaluator,
sorted(file_links_it, reverse=True),
)
if logger.isEnabledFor(logging.DEBUG) and file_candidates:
paths = []
for candidate in file_candidates:
assert candidate.link.url # we need to have a URL
try:
paths.append(candidate.link.file_path)
except Exception:
paths.append(candidate.link.url) # it's not a local file
logger.debug("Local files found: %s", ", ".join(paths))
# This is an intentional priority ordering
return file_candidates + page_candidates
def make_candidate_evaluator(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> CandidateEvaluator:
"""Create a CandidateEvaluator object to use."""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
@functools.lru_cache(maxsize=None)
def find_best_candidate(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> BestCandidateResult:
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(
self, req: InstallRequirement, upgrade: bool
) -> Optional[InstallationCandidate]:
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name,
specifier=req.specifier,
hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version: Optional[_BaseVersion] = None
if req.satisfied_by is not None:
installed_version = req.satisfied_by.version
def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return (
", ".join(
sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)
)
or "none"
)
if installed_version is None and best_candidate is None:
logger.critical(
"Could not find a version that satisfies the requirement %s "
"(from versions: %s)",
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
"No matching distribution found for {}".format(req)
)
best_installed = False
if installed_version and (
best_candidate is None or best_candidate.version <= installed_version
):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
"Existing installed version (%s) is most up-to-date and "
"satisfies requirement",
installed_version,
)
else:
logger.debug(
"Existing installed version (%s) satisfies requirement "
"(most up-to-date version is %s)",
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
"Installed version (%s) is most up-to-date (past versions: %s)",
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
"Using version %s (newest of versions: %s)",
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
| 36.684772 | 88 | 0.631749 |
import enum
import functools
import itertools
import logging
import re
from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import LinkCollector, parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.req import InstallRequirement
from pip._internal.utils._log import getLogger
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
logger = getLogger(__name__)
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
def _check_link_requires_python(
link: Link,
version_info: Tuple[int, int, int],
ignore_requires_python: bool = False,
) -> bool:
try:
is_compatible = check_requires_python(
link.requires_python,
version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python,
link,
)
else:
if not is_compatible:
version = ".".join(map(str, version_info))
if not ignore_requires_python:
logger.verbose(
"Link requires a different Python (%s not in: %r): %s",
version,
link.requires_python,
link,
)
return False
logger.debug(
"Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
version,
link.requires_python,
link,
)
return True
class LinkType(enum.Enum):
candidate = enum.auto()
different_project = enum.auto()
yanked = enum.auto()
format_unsupported = enum.auto()
format_invalid = enum.auto()
platform_mismatch = enum.auto()
requires_python_mismatch = enum.auto()
class LinkEvaluator:
_py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name: str,
canonical_name: str,
formats: FrozenSet[str],
target_python: TargetPython,
allow_yanked: bool,
ignore_requires_python: Optional[bool] = None,
) -> None:
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or "<none given>"
return (LinkType.yanked, f"yanked for reason: {reason}")
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (LinkType.format_unsupported, "not a file")
if ext not in SUPPORTED_EXTENSIONS:
return (
LinkType.format_unsupported,
f"unsupported archive format: {ext}",
)
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = f"No binaries permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if "macosx10" in link.path and ext == ".zip":
return (LinkType.format_unsupported, "macosx10 one")
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (
LinkType.format_invalid,
"invalid wheel filename",
)
if canonicalize_name(wheel.name) != self._canonical_name:
reason = f"wrong project name (not {self.project_name})"
return (LinkType.different_project, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
file_tags = ", ".join(wheel.get_formatted_file_tags())
reason = (
f"none of the wheel's tags ({file_tags}) are compatible "
f"(run pip debug --verbose to show compatible tags)"
)
return (LinkType.platform_mismatch, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f"No sources permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if not version:
version = _extract_version_from_fragment(
egg_info,
self._canonical_name,
)
if not version:
reason = f"Missing project version for {self.project_name}"
return (LinkType.format_invalid, reason)
match = self._py_version_re.search(version)
if match:
version = version[: match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (
LinkType.platform_mismatch,
"Python version is incorrect",
)
supports_python = _check_link_requires_python(
link,
version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
reason = f"{version} Requires-Python {link.requires_python}"
return (LinkType.requires_python_mismatch, reason)
logger.debug("Found link %s, version: %s", link, version)
return (LinkType.candidate, version)
def filter_unallowed_hashes(
candidates: List[InstallationCandidate],
hashes: Hashes,
project_name: str,
) -> List[InstallationCandidate]:
if not hashes:
logger.debug(
"Given no hashes to check %s links for project %r: "
"discarding no candidates",
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = "discarding no candidates"
else:
discard_message = "discarding {} non-matches:\n {}".format(
len(non_matches),
"\n ".join(str(candidate.link) for candidate in non_matches),
)
logger.debug(
"Checked %s links for project %r against %s hashes "
"(%s matches, %s no digest): %s",
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message,
)
return filtered
class CandidatePreferences:
def __init__(
self,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
) -> None:
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult:
def __init__(
self,
candidates: List[InstallationCandidate],
applicable_candidates: List[InstallationCandidate],
best_candidate: Optional[InstallationCandidate],
) -> None:
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self) -> Iterable[InstallationCandidate]:
return iter(self._candidates)
def iter_applicable(self) -> Iterable[InstallationCandidate]:
return iter(self._applicable_candidates)
class CandidateEvaluator:
@classmethod
def create(
cls,
project_name: str,
target_python: Optional[TargetPython] = None,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> "CandidateEvaluator":
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name: str,
supported_tags: List[Tag],
specifier: specifiers.BaseSpecifier,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
hashes: Optional[Hashes] = None,
) -> None:
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
# Since the index of the tag in the _supported_tags list is used
# as a priority, precompute a map from tag to index/priority to be
# used in wheel.find_most_preferred_tag.
self._wheel_tag_preferences = {
tag: idx for idx, tag in enumerate(supported_tags)
}
def get_applicable_candidates(
self,
candidates: List[InstallationCandidate],
) -> List[InstallationCandidate]:
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v)
for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
applicable_candidates = [c for c in candidates if str(c.version) in versions]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag: BuildTag = ()
binary_preference = 0
link = candidate.link
if link.is_wheel:
wheel = Wheel(link.filename)
try:
pri = -(
wheel.find_most_preferred_tag(
valid_tags, self._wheel_tag_preferences
)
)
except ValueError:
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
if wheel.build_tag is not None:
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash,
yank_value,
binary_preference,
candidate.version,
pri,
build_tag,
)
def sort_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> Optional[InstallationCandidate]:
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> BestCandidateResult:
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder:
def __init__(
self,
link_collector: LinkCollector,
target_python: TargetPython,
allow_yanked: bool,
use_deprecated_html5lib: bool,
format_control: Optional[FormatControl] = None,
candidate_prefs: Optional[CandidatePreferences] = None,
ignore_requires_python: Optional[bool] = None,
) -> None:
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self._use_deprecated_html5lib = use_deprecated_html5lib
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
# Don't include an allow_yanked default value to make sure each call
@classmethod
def create(
cls,
link_collector: LinkCollector,
selection_prefs: SelectionPreferences,
target_python: Optional[TargetPython] = None,
*,
use_deprecated_html5lib: bool,
) -> "PackageFinder":
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
use_deprecated_html5lib=use_deprecated_html5lib,
)
@property
def target_python(self) -> TargetPython:
return self._target_python
@property
def search_scope(self) -> SearchScope:
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope: SearchScope) -> None:
self._link_collector.search_scope = search_scope
@property
def find_links(self) -> List[str]:
return self._link_collector.find_links
@property
def index_urls(self) -> List[str]:
return self.search_scope.index_urls
@property
def trusted_hosts(self) -> Iterable[str]:
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self) -> bool:
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self) -> None:
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self) -> bool:
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self) -> None:
self._candidate_prefs.prefer_binary = True
def requires_python_skipped_reasons(self) -> List[str]:
reasons = {
detail
for _, result, detail in self._logged_links
if result == LinkType.requires_python_mismatch
}
return sorted(reasons)
def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links: Iterable[Link]) -> List[Link]:
eggs, no_eggs = [], []
seen: Set[Link] = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
entry = (link, result, detail)
if entry not in self._logged_links:
logger.debug("Skipping link: %s: %s", detail, link)
self._logged_links.add(entry)
def get_install_candidate(
self, link_evaluator: LinkEvaluator, link: Link
) -> Optional[InstallationCandidate]:
result, detail = link_evaluator.evaluate_link(link)
if result != LinkType.candidate:
self._log_skipped_link(link, result, detail)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=detail,
)
def evaluate_links(
self, link_evaluator: LinkEvaluator, links: Iterable[Link]
) -> List[InstallationCandidate]:
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(
self, project_url: Link, link_evaluator: LinkEvaluator
) -> List[InstallationCandidate]:
logger.debug(
"Fetching project page and analyzing links: %s",
project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page, self._use_deprecated_html5lib))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
@functools.lru_cache(maxsize=None)
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
link_evaluator = self.make_link_evaluator(project_name)
collected_sources = self._link_collector.collect_sources(
project_name=project_name,
candidates_from_page=functools.partial(
self.process_project_url,
link_evaluator=link_evaluator,
),
)
page_candidates_it = itertools.chain.from_iterable(
source.page_candidates()
for sources in collected_sources
for source in sources
if source is not None
)
page_candidates = list(page_candidates_it)
file_links_it = itertools.chain.from_iterable(
source.file_links()
for sources in collected_sources
for source in sources
if source is not None
)
file_candidates = self.evaluate_links(
link_evaluator,
sorted(file_links_it, reverse=True),
)
if logger.isEnabledFor(logging.DEBUG) and file_candidates:
paths = []
for candidate in file_candidates:
assert candidate.link.url
try:
paths.append(candidate.link.file_path)
except Exception:
paths.append(candidate.link.url)
logger.debug("Local files found: %s", ", ".join(paths))
# This is an intentional priority ordering
return file_candidates + page_candidates
def make_candidate_evaluator(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> CandidateEvaluator:
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
@functools.lru_cache(maxsize=None)
def find_best_candidate(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> BestCandidateResult:
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(
self, req: InstallRequirement, upgrade: bool
) -> Optional[InstallationCandidate]:
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name,
specifier=req.specifier,
hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version: Optional[_BaseVersion] = None
if req.satisfied_by is not None:
installed_version = req.satisfied_by.version
def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return (
", ".join(
sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)
)
or "none"
)
if installed_version is None and best_candidate is None:
logger.critical(
"Could not find a version that satisfies the requirement %s "
"(from versions: %s)",
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
"No matching distribution found for {}".format(req)
)
best_installed = False
if installed_version and (
best_candidate is None or best_candidate.version <= installed_version
):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
"Existing installed version (%s) is most up-to-date and "
"satisfies requirement",
installed_version,
)
else:
logger.debug(
"Existing installed version (%s) satisfies requirement "
"(most up-to-date version is %s)",
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
"Installed version (%s) is most up-to-date (past versions: %s)",
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
"Using version %s (newest of versions: %s)",
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
| true | true |
f70f74c49693a3c8825166a36842545400ad789b | 2,992 | py | Python | algorithms/anchor_detector.py | songheony/AAA-multi | 80a988d8d312664d8ca19dee82c844183cf4f55d | [
"MIT"
] | null | null | null | algorithms/anchor_detector.py | songheony/AAA-multi | 80a988d8d312664d8ca19dee82c844183cf4f55d | [
"MIT"
] | null | null | null | algorithms/anchor_detector.py | songheony/AAA-multi | 80a988d8d312664d8ca19dee82c844183cf4f55d | [
"MIT"
] | 1 | 2021-03-01T06:58:15.000Z | 2021-03-01T06:58:15.000Z | from .aaa_util import eval_results, get_summary, convert_df
class AnchorDetector:
def __init__(self, offline):
self.offline = offline
def initialize(self, seq_info):
self.seq_info = seq_info
self.previous_offline = None
def fixed_detect(self, frame_idx, duration):
feedback_length = duration
if (frame_idx + 1) % duration == 0:
is_anchor, feedback = (
True,
self._get_feedback(frame_idx - duration + 1, frame_idx),
)
else:
is_anchor, feedback = False, None
return is_anchor, feedback, feedback_length
def stable_detect(self, seq_info, frame_idx, duration, threshold):
if frame_idx + 1 > duration:
current_offline = self._get_feedback(frame_idx - duration + 1, frame_idx)
if self.previous_offline is not None and current_offline is not None:
overlap_previous = self.previous_offline[
self.previous_offline[:, 0] > 1
]
overlap_previous[:, 0] -= 1
overlap_previous = convert_df(overlap_previous, is_offline=True)
overlap_current = current_offline[current_offline[:, 0] < duration]
overlap_current = convert_df(overlap_current, is_offline=True)
feedback_length = duration
else:
current_offline = self._get_feedback(0, frame_idx)
if self.previous_offline is not None and current_offline is not None:
overlap_previous = convert_df(self.previous_offline, is_offline=True)
overlap_current = current_offline[current_offline[:, 0] <= frame_idx]
overlap_current = convert_df(overlap_current, is_offline=True)
feedback_length = frame_idx + 1
if self.previous_offline is not None and current_offline is not None:
prev_acc, prev_ana, _ = eval_results(
seq_info, overlap_previous, overlap_current
)
prev_sum = get_summary(prev_acc, prev_ana)
curr_acc, curr_ana, _ = eval_results(
seq_info, overlap_current, overlap_previous
)
curr_sum = get_summary(curr_acc, curr_ana)
mean_mota = (prev_sum[3] + curr_sum[3]) / 2
if mean_mota >= threshold:
is_anchor = True
feedback = current_offline
else:
is_anchor = False
feedback = None
# print(f"Frame {frame_idx}, MOTA {mean_mota}")
else:
is_anchor = False
feedback = None
self.previous_offline = current_offline
return is_anchor, feedback, feedback_length
def _get_feedback(self, start_frame, end_frame):
try:
feedback = self.offline.track(start_frame, end_frame)
except (RuntimeError, ValueError):
feedback = None
return feedback
| 35.2 | 85 | 0.60127 | from .aaa_util import eval_results, get_summary, convert_df
class AnchorDetector:
def __init__(self, offline):
self.offline = offline
def initialize(self, seq_info):
self.seq_info = seq_info
self.previous_offline = None
def fixed_detect(self, frame_idx, duration):
feedback_length = duration
if (frame_idx + 1) % duration == 0:
is_anchor, feedback = (
True,
self._get_feedback(frame_idx - duration + 1, frame_idx),
)
else:
is_anchor, feedback = False, None
return is_anchor, feedback, feedback_length
def stable_detect(self, seq_info, frame_idx, duration, threshold):
if frame_idx + 1 > duration:
current_offline = self._get_feedback(frame_idx - duration + 1, frame_idx)
if self.previous_offline is not None and current_offline is not None:
overlap_previous = self.previous_offline[
self.previous_offline[:, 0] > 1
]
overlap_previous[:, 0] -= 1
overlap_previous = convert_df(overlap_previous, is_offline=True)
overlap_current = current_offline[current_offline[:, 0] < duration]
overlap_current = convert_df(overlap_current, is_offline=True)
feedback_length = duration
else:
current_offline = self._get_feedback(0, frame_idx)
if self.previous_offline is not None and current_offline is not None:
overlap_previous = convert_df(self.previous_offline, is_offline=True)
overlap_current = current_offline[current_offline[:, 0] <= frame_idx]
overlap_current = convert_df(overlap_current, is_offline=True)
feedback_length = frame_idx + 1
if self.previous_offline is not None and current_offline is not None:
prev_acc, prev_ana, _ = eval_results(
seq_info, overlap_previous, overlap_current
)
prev_sum = get_summary(prev_acc, prev_ana)
curr_acc, curr_ana, _ = eval_results(
seq_info, overlap_current, overlap_previous
)
curr_sum = get_summary(curr_acc, curr_ana)
mean_mota = (prev_sum[3] + curr_sum[3]) / 2
if mean_mota >= threshold:
is_anchor = True
feedback = current_offline
else:
is_anchor = False
feedback = None
else:
is_anchor = False
feedback = None
self.previous_offline = current_offline
return is_anchor, feedback, feedback_length
def _get_feedback(self, start_frame, end_frame):
try:
feedback = self.offline.track(start_frame, end_frame)
except (RuntimeError, ValueError):
feedback = None
return feedback
| true | true |
f70f74dd4d6743c0e5a61696e8f4284ad3a589ae | 300 | py | Python | maxsmi/tests/test_maxsmi.py | t-kimber/maxsmi | d7d52a9ba95efb6b4219928425bb5de965c4b3b5 | [
"MIT"
] | 1 | 2021-01-22T17:56:54.000Z | 2021-01-22T17:56:54.000Z | maxsmi/tests/test_maxsmi.py | t-kimber/maxsmi | d7d52a9ba95efb6b4219928425bb5de965c4b3b5 | [
"MIT"
] | 12 | 2020-10-16T10:13:56.000Z | 2021-04-14T07:25:05.000Z | maxsmi/tests/test_maxsmi.py | t-kimber/maxsmi | d7d52a9ba95efb6b4219928425bb5de965c4b3b5 | [
"MIT"
] | null | null | null | """
Unit and regression test for the maxsmi package.
"""
# Import package, test suite, and other packages as needed
# import maxsmi
# import pytest
import sys
def test_maxsmi_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "maxsmi" in sys.modules
| 21.428571 | 74 | 0.73 |
import sys
def test_maxsmi_imported():
assert "maxsmi" in sys.modules
| true | true |
f70f75b56fefe5b1ddf643a702b9df5d8dde9dd9 | 1,130 | py | Python | catalyst/assets/__init__.py | guilhermeprokisch/catalyst | 21e096b261912d9e905584178d6ee626072c23cb | [
"Apache-2.0"
] | null | null | null | catalyst/assets/__init__.py | guilhermeprokisch/catalyst | 21e096b261912d9e905584178d6ee626072c23cb | [
"Apache-2.0"
] | null | null | null | catalyst/assets/__init__.py | guilhermeprokisch/catalyst | 21e096b261912d9e905584178d6ee626072c23cb | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._assets import (
Asset,
Equity,
Future,
make_asset_array,
CACHE_FILE_TEMPLATE
)
from .assets import (
AssetFinder,
AssetConvertible,
PricingDataAssociable,
)
from .asset_db_schema import ASSET_DB_VERSION
from .asset_writer import AssetDBWriter
__all__ = [
'ASSET_DB_VERSION',
'Asset',
'AssetDBWriter',
'Equity',
'Future',
'AssetFinder',
'AssetConvertible',
'PricingDataAssociable',
'make_asset_array',
'CACHE_FILE_TEMPLATE'
]
| 26.27907 | 75 | 0.69646 |
from ._assets import (
Asset,
Equity,
Future,
make_asset_array,
CACHE_FILE_TEMPLATE
)
from .assets import (
AssetFinder,
AssetConvertible,
PricingDataAssociable,
)
from .asset_db_schema import ASSET_DB_VERSION
from .asset_writer import AssetDBWriter
__all__ = [
'ASSET_DB_VERSION',
'Asset',
'AssetDBWriter',
'Equity',
'Future',
'AssetFinder',
'AssetConvertible',
'PricingDataAssociable',
'make_asset_array',
'CACHE_FILE_TEMPLATE'
]
| true | true |
f70f76797352e63f38e9029343013e191ad89605 | 3,744 | py | Python | ansible/plugins/action/sros.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | ansible/plugins/action/sros.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | ansible/plugins/action/sros.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.sros import sros_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.six import iteritems
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'sros'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(sros_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| 36.705882 | 118 | 0.654647 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.sros import sros_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.six import iteritems
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'sros'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(sros_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| true | true |
f70f78983d59b550f32fb5bc0b61b997923e1baf | 11,459 | py | Python | graphsense/model/entity_tags.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | null | null | null | graphsense/model/entity_tags.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | 1 | 2022-02-24T11:21:49.000Z | 2022-02-24T11:21:49.000Z | graphsense/model/entity_tags.py | INTERPOL-Innovation-Centre/GraphSense-Maltego-transform | 2a9b352289ab64903a7012c5d84cb4c6d8172ade | [
"MIT"
] | null | null | null | """
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from graphsense.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from graphsense.exceptions import ApiAttributeError
def lazy_import():
from graphsense.model.entity_tag import EntityTag
globals()['EntityTag'] = EntityTag
class EntityTags(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'entity_tags': ([EntityTag],), # noqa: E501
'next_page': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'entity_tags': 'entity_tags', # noqa: E501
'next_page': 'next_page', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, entity_tags, *args, **kwargs): # noqa: E501
"""EntityTags - a model defined in OpenAPI
Args:
entity_tags ([EntityTag]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
next_page (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.entity_tags = entity_tags
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, entity_tags, *args, **kwargs): # noqa: E501
"""EntityTags - a model defined in OpenAPI
Args:
entity_tags ([EntityTag]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
next_page (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.entity_tags = entity_tags
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.128676 | 121 | 0.569421 |
import re
import sys
from graphsense.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from graphsense.exceptions import ApiAttributeError
def lazy_import():
from graphsense.model.entity_tag import EntityTag
globals()['EntityTag'] = EntityTag
class EntityTags(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'entity_tags': ([EntityTag],),
'next_page': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'entity_tags': 'entity_tags',
'next_page': 'next_page',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, entity_tags, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.entity_tags = entity_tags
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, entity_tags, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.entity_tags = entity_tags
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f70f79489ce5d6799a41c07cf1757179521b1ce4 | 2,632 | py | Python | the_mechanic_backend/apps/stock/migrations/0003_sparecustomer_spareorder_sparesold.py | muthukumar4999/the-mechanic-backend | 1e31affddf60d2de72445a85dd2055bdeba6f670 | [
"MIT"
] | null | null | null | the_mechanic_backend/apps/stock/migrations/0003_sparecustomer_spareorder_sparesold.py | muthukumar4999/the-mechanic-backend | 1e31affddf60d2de72445a85dd2055bdeba6f670 | [
"MIT"
] | 5 | 2020-06-05T22:30:20.000Z | 2021-09-08T01:12:27.000Z | the_mechanic_backend/apps/stock/migrations/0003_sparecustomer_spareorder_sparesold.py | muthukumar4999/the-mechanic-backend | 1e31affddf60d2de72445a85dd2055bdeba6f670 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-03-31 18:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('stock', '0002_spare_store'),
]
operations = [
migrations.CreateModel(
name='SpareCustomer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('phone_number', models.CharField(max_length=10)),
('address', models.TextField()),
],
),
migrations.CreateModel(
name='SpareOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(max_length=20)),
('order_type', models.CharField(choices=[('IN_SOURCE', 'IN_SOURCE'), ('OUT_SOURCE', 'OUT_SOURCE')], max_length=20)),
('total', models.DecimalField(decimal_places=2, max_digits=10)),
('order_date', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.SpareCustomer')),
('sold_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Store')),
],
),
migrations.CreateModel(
name='SpareSold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spare_count', models.IntegerField()),
('spare_name', models.CharField(max_length=100)),
('spare_price', models.DecimalField(decimal_places=2, max_digits=10)),
('spare_price_type', models.CharField(choices=[('MRP', 'MRP'), ('MECHANIC', 'MECHANIC'), ('WHOLESALER', 'WHOLESALER'), ('CUSTOMER', 'CUSTOMER')], max_length=20)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.SpareOrder')),
('spare', models.ForeignKey(on_delete=models.SET('deleted'), to='stock.Spare')),
],
),
]
| 49.660377 | 178 | 0.602204 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('stock', '0002_spare_store'),
]
operations = [
migrations.CreateModel(
name='SpareCustomer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('phone_number', models.CharField(max_length=10)),
('address', models.TextField()),
],
),
migrations.CreateModel(
name='SpareOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(max_length=20)),
('order_type', models.CharField(choices=[('IN_SOURCE', 'IN_SOURCE'), ('OUT_SOURCE', 'OUT_SOURCE')], max_length=20)),
('total', models.DecimalField(decimal_places=2, max_digits=10)),
('order_date', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.SpareCustomer')),
('sold_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Store')),
],
),
migrations.CreateModel(
name='SpareSold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spare_count', models.IntegerField()),
('spare_name', models.CharField(max_length=100)),
('spare_price', models.DecimalField(decimal_places=2, max_digits=10)),
('spare_price_type', models.CharField(choices=[('MRP', 'MRP'), ('MECHANIC', 'MECHANIC'), ('WHOLESALER', 'WHOLESALER'), ('CUSTOMER', 'CUSTOMER')], max_length=20)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.SpareOrder')),
('spare', models.ForeignKey(on_delete=models.SET('deleted'), to='stock.Spare')),
],
),
]
| true | true |
f70f797573bfd52476fcff70d64ce151275711dc | 3,122 | py | Python | meteors.py | Stafferson/YandexLycP2 | f5c50cc89ca6716612f0b91f2e22315c414d5541 | [
"MIT"
] | null | null | null | meteors.py | Stafferson/YandexLycP2 | f5c50cc89ca6716612f0b91f2e22315c414d5541 | [
"MIT"
] | null | null | null | meteors.py | Stafferson/YandexLycP2 | f5c50cc89ca6716612f0b91f2e22315c414d5541 | [
"MIT"
] | null | null | null | import os
import sys
import random
import pygame
def load_image(name, colorkey=None): # not sure if this method is needed
fullname = os.path.join('data', name)
# если файл не существует, то выходим
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
sys.exit()
image = pygame.image.load(fullname) # we can just use this one, cuz we know that pics are ok
return image
enemies = pygame.sprite.Group()
bullets = pygame.sprite.Group()
class Meteor(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.frames = []
self.cut_sheet(load_image("meteors1.png"), 5, 1)
self.cur_frame = 0
self.image = self.frames[self.cur_frame]
self.count = 0
self.mask = pygame.mask.from_surface(self.image)
self.rect.x = random.randrange(width)
self.rect.y = -1 * self.image.get_height()
while pygame.sprite.spritecollideany(self, enemies, pygame.sprite.collide_mask) or\
self.rect.x < 0 or self.rect.right > width:
self.rect.x = random.randrange(width)
self.life = 1
def cut_sheet(self, sheet, columns, rows):
self.rect = pygame.Rect(0, 0, sheet.get_width() // columns,
sheet.get_height() // rows)
for j in range(rows):
for i in range(columns):
frame_location = (self.rect.w * i, self.rect.h * j)
self.frames.append(sheet.subsurface(pygame.Rect(
frame_location, self.rect.size)))
def update(self):
if pygame.sprite.spritecollideany(self, bullets, pygame.sprite.collide_mask):
self.life -= 1
if self.life > 0 and self.rect.y <= height:
self.rect = self.rect.move(0, 1)
self.count += 1
if self.count % 7 == 0:
self.cur_frame = (self.cur_frame + 1) % len(self.frames)
self.image = self.frames[self.cur_frame]
else:
self.kill()
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if __name__ == '__main__':
pygame.init()
size = width, height = 500, 700 # other parameters may be set in the main game
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
fps = 60
MYEVENTTYPE = pygame.USEREVENT + 1
pygame.time.set_timer(MYEVENTTYPE, 3000)
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MYEVENTTYPE: # every 3000 frames new enemies are created
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
screen.fill(pygame.Color('blue')) # in the main game, there will be a background(animated?)
enemies.draw(screen)
enemies.update()
clock.tick(fps)
pygame.display.flip()
pygame.quit()
sys.excepthook = except_hook | 35.078652 | 100 | 0.601217 | import os
import sys
import random
import pygame
def load_image(name, colorkey=None):
fullname = os.path.join('data', name)
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
sys.exit()
image = pygame.image.load(fullname)
return image
enemies = pygame.sprite.Group()
bullets = pygame.sprite.Group()
class Meteor(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.frames = []
self.cut_sheet(load_image("meteors1.png"), 5, 1)
self.cur_frame = 0
self.image = self.frames[self.cur_frame]
self.count = 0
self.mask = pygame.mask.from_surface(self.image)
self.rect.x = random.randrange(width)
self.rect.y = -1 * self.image.get_height()
while pygame.sprite.spritecollideany(self, enemies, pygame.sprite.collide_mask) or\
self.rect.x < 0 or self.rect.right > width:
self.rect.x = random.randrange(width)
self.life = 1
def cut_sheet(self, sheet, columns, rows):
self.rect = pygame.Rect(0, 0, sheet.get_width() // columns,
sheet.get_height() // rows)
for j in range(rows):
for i in range(columns):
frame_location = (self.rect.w * i, self.rect.h * j)
self.frames.append(sheet.subsurface(pygame.Rect(
frame_location, self.rect.size)))
def update(self):
if pygame.sprite.spritecollideany(self, bullets, pygame.sprite.collide_mask):
self.life -= 1
if self.life > 0 and self.rect.y <= height:
self.rect = self.rect.move(0, 1)
self.count += 1
if self.count % 7 == 0:
self.cur_frame = (self.cur_frame + 1) % len(self.frames)
self.image = self.frames[self.cur_frame]
else:
self.kill()
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if __name__ == '__main__':
pygame.init()
size = width, height = 500, 700
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
fps = 60
MYEVENTTYPE = pygame.USEREVENT + 1
pygame.time.set_timer(MYEVENTTYPE, 3000)
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MYEVENTTYPE:
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
screen.fill(pygame.Color('blue'))
enemies.draw(screen)
enemies.update()
clock.tick(fps)
pygame.display.flip()
pygame.quit()
sys.excepthook = except_hook | true | true |
f70f79ef125a14fdd24574d066c3d6d527e43b4f | 330 | py | Python | flask_app/__init__.py | maawoo/S1GRASS-Webapp | b34335f2aaa64dff075b955b98ad01f062ba9891 | [
"Unlicense"
] | null | null | null | flask_app/__init__.py | maawoo/S1GRASS-Webapp | b34335f2aaa64dff075b955b98ad01f062ba9891 | [
"Unlicense"
] | 1 | 2020-09-10T12:18:37.000Z | 2020-09-10T12:18:37.000Z | flask_app/__init__.py | maawoo/S1GRASS-Webapp | b34335f2aaa64dff075b955b98ad01f062ba9891 | [
"Unlicense"
] | 2 | 2020-09-09T13:37:45.000Z | 2021-04-23T18:57:24.000Z | from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
bootstrap = Bootstrap(app)
from flask_app import routes, models
| 22 | 39 | 0.818182 | from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
bootstrap = Bootstrap(app)
from flask_app import routes, models
| true | true |
f70f7a2b99b2903b8866d778aaaf850f6f9f1fa1 | 6,688 | py | Python | gammapy/utils/testing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 1 | 2017-11-22T17:07:56.000Z | 2017-11-22T17:07:56.000Z | gammapy/utils/testing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/testing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 1 | 2019-09-04T14:03:33.000Z | 2019-09-04T14:03:33.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for testing"""
import os
import sys
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time
__all__ = [
"requires_dependency",
"requires_data",
"mpl_plot_check",
"assert_quantity_allclose",
"assert_skycoord_allclose",
"assert_time_allclose",
"Checker",
]
# Cache for `requires_dependency`
_requires_dependency_cache = {}
def requires_dependency(name):
"""Decorator to declare required dependencies for tests.
Examples
--------
::
from gammapy.utils.testing import requires_dependency
@requires_dependency('scipy')
def test_using_scipy():
import scipy
...
"""
import pytest
if name in _requires_dependency_cache:
skip_it = _requires_dependency_cache[name]
else:
try:
__import__(name)
skip_it = False
except ImportError:
skip_it = True
_requires_dependency_cache[name] = skip_it
reason = f"Missing dependency: {name}"
return pytest.mark.skipif(skip_it, reason=reason)
def has_data(name):
"""Is a certain set of data available?"""
if name == "gammapy-extra":
return "GAMMAPY_EXTRA" in os.environ
elif name == "gammapy-data":
return "GAMMAPY_DATA" in os.environ
elif name == "gamma-cat":
return "GAMMA_CAT" in os.environ
elif name == "fermi-lat":
return "GAMMAPY_FERMI_LAT_DATA" in os.environ
else:
raise ValueError(f"Invalid name: {name}")
def requires_data(name="gammapy-data"):
"""Decorator to declare required data for tests.
Examples
--------
::
from gammapy.utils.testing import requires_data
@requires_data()
def test_using_data_files():
filename = "$GAMMAPY_DATA/..."
...
"""
import pytest
if not isinstance(name, str):
raise TypeError(
"You must call @requires_data with a name (str). "
"Usually this: @requires_data()"
)
skip_it = not has_data(name)
reason = f"Missing data: {name}"
return pytest.mark.skipif(skip_it, reason=reason)
def run_cli(cli, args, exit_code=0):
"""Run Click command line tool.
Thin wrapper around `click.testing.CliRunner`
that prints info to stderr if the command fails.
Parameters
----------
cli : click.Command
Click command
args : list of str
Argument list
exit_code : int
Expected exit code of the command
Returns
-------
result : `click.testing.Result`
Result
"""
from click.testing import CliRunner
result = CliRunner().invoke(cli, args, catch_exceptions=False)
if result.exit_code != exit_code:
sys.stderr.write("Exit code mismatch!\n")
sys.stderr.write("Output:\n")
sys.stderr.write(result.output)
return result
def assert_skycoord_allclose(actual, desired):
"""Assert all-close for `astropy.coordinates.SkyCoord` objects.
- Frames can be different, aren't checked at the moment.
"""
assert isinstance(actual, SkyCoord)
assert isinstance(desired, SkyCoord)
assert_allclose(actual.data.lon.deg, desired.data.lon.deg)
assert_allclose(actual.data.lat.deg, desired.data.lat.deg)
def assert_time_allclose(actual, desired, atol=1e-3):
"""Assert all-close for `astropy.time.Time` objects.
atol is absolute tolerance in seconds.
"""
assert isinstance(actual, Time)
assert isinstance(desired, Time)
assert actual.scale == desired.scale
assert actual.format == desired.format
dt = actual - desired
assert_allclose(dt.sec, 0, rtol=0, atol=atol)
def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):
"""Assert all-close for `astropy.units.Quantity` objects.
Requires that ``unit`` is identical, not just that quantities
are allclose taking different units into account.
We prefer this kind of assert for testing, since units
should only change on purpose, so this tests more behaviour.
"""
# TODO: change this later to explicitly check units are the same!
# assert actual.unit == desired.unit
args = _unquantify_allclose_arguments(actual, desired, rtol, atol)
assert_allclose(*args, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = u.Quantity(actual, subok=True, copy=False)
desired = u.Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except u.UnitsError:
raise u.UnitsError(
"Units for 'desired' ({}) and 'actual' ({}) "
"are not convertible".format(desired.unit, actual.unit)
)
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = u.Quantity(0)
else:
atol = u.Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except u.UnitsError:
raise u.UnitsError(
"Units for 'atol' ({}) and 'actual' ({}) "
"are not convertible".format(atol.unit, actual.unit)
)
rtol = u.Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(u.dimensionless_unscaled)
except Exception:
raise u.UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
def mpl_plot_check():
"""Matplotlib plotting test context manager.
It create a new figure on __enter__ and calls savefig for the
current figure in __exit__. This will trigger a render of the
Figure, which can sometimes raise errors if there is a problem.
This is writing to an in-memory byte buffer, i.e. is faster
than writing to disk.
"""
from io import BytesIO
import matplotlib.pyplot as plt
class MPLPlotCheck:
def __enter__(self):
plt.figure()
def __exit__(self, type, value, traceback):
plt.savefig(BytesIO(), format="png")
plt.close()
return MPLPlotCheck()
class Checker:
"""Base class for checker classes in Gammapy."""
def run(self, checks="all"):
if checks == "all":
checks = self.CHECKS.keys()
unknown_checks = sorted(set(checks).difference(self.CHECKS.keys()))
if unknown_checks:
raise ValueError(f"Unknown checks: {unknown_checks!r}")
for check in checks:
method = getattr(self, self.CHECKS[check])
yield from method()
| 27.866667 | 80 | 0.643541 |
import os
import sys
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time
__all__ = [
"requires_dependency",
"requires_data",
"mpl_plot_check",
"assert_quantity_allclose",
"assert_skycoord_allclose",
"assert_time_allclose",
"Checker",
]
_requires_dependency_cache = {}
def requires_dependency(name):
import pytest
if name in _requires_dependency_cache:
skip_it = _requires_dependency_cache[name]
else:
try:
__import__(name)
skip_it = False
except ImportError:
skip_it = True
_requires_dependency_cache[name] = skip_it
reason = f"Missing dependency: {name}"
return pytest.mark.skipif(skip_it, reason=reason)
def has_data(name):
if name == "gammapy-extra":
return "GAMMAPY_EXTRA" in os.environ
elif name == "gammapy-data":
return "GAMMAPY_DATA" in os.environ
elif name == "gamma-cat":
return "GAMMA_CAT" in os.environ
elif name == "fermi-lat":
return "GAMMAPY_FERMI_LAT_DATA" in os.environ
else:
raise ValueError(f"Invalid name: {name}")
def requires_data(name="gammapy-data"):
import pytest
if not isinstance(name, str):
raise TypeError(
"You must call @requires_data with a name (str). "
"Usually this: @requires_data()"
)
skip_it = not has_data(name)
reason = f"Missing data: {name}"
return pytest.mark.skipif(skip_it, reason=reason)
def run_cli(cli, args, exit_code=0):
from click.testing import CliRunner
result = CliRunner().invoke(cli, args, catch_exceptions=False)
if result.exit_code != exit_code:
sys.stderr.write("Exit code mismatch!\n")
sys.stderr.write("Output:\n")
sys.stderr.write(result.output)
return result
def assert_skycoord_allclose(actual, desired):
assert isinstance(actual, SkyCoord)
assert isinstance(desired, SkyCoord)
assert_allclose(actual.data.lon.deg, desired.data.lon.deg)
assert_allclose(actual.data.lat.deg, desired.data.lat.deg)
def assert_time_allclose(actual, desired, atol=1e-3):
assert isinstance(actual, Time)
assert isinstance(desired, Time)
assert actual.scale == desired.scale
assert actual.format == desired.format
dt = actual - desired
assert_allclose(dt.sec, 0, rtol=0, atol=atol)
def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):
args = _unquantify_allclose_arguments(actual, desired, rtol, atol)
assert_allclose(*args, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = u.Quantity(actual, subok=True, copy=False)
desired = u.Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except u.UnitsError:
raise u.UnitsError(
"Units for 'desired' ({}) and 'actual' ({}) "
"are not convertible".format(desired.unit, actual.unit)
)
if atol is None:
atol = u.Quantity(0)
else:
atol = u.Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except u.UnitsError:
raise u.UnitsError(
"Units for 'atol' ({}) and 'actual' ({}) "
"are not convertible".format(atol.unit, actual.unit)
)
rtol = u.Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(u.dimensionless_unscaled)
except Exception:
raise u.UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
def mpl_plot_check():
from io import BytesIO
import matplotlib.pyplot as plt
class MPLPlotCheck:
def __enter__(self):
plt.figure()
def __exit__(self, type, value, traceback):
plt.savefig(BytesIO(), format="png")
plt.close()
return MPLPlotCheck()
class Checker:
def run(self, checks="all"):
if checks == "all":
checks = self.CHECKS.keys()
unknown_checks = sorted(set(checks).difference(self.CHECKS.keys()))
if unknown_checks:
raise ValueError(f"Unknown checks: {unknown_checks!r}")
for check in checks:
method = getattr(self, self.CHECKS[check])
yield from method()
| true | true |
f70f7a739c8358bfeaf9b03fd0832dae1581d974 | 1,088 | py | Python | src/snlayers/snconv1d.py | Zihang97/PAGAN | 9233fc54ecf49d6a82bb0794333d61f707439a68 | [
"MIT"
] | 29 | 2019-11-04T12:46:17.000Z | 2022-02-19T10:06:16.000Z | src/snlayers/snconv1d.py | Zihang97/PAGAN | 9233fc54ecf49d6a82bb0794333d61f707439a68 | [
"MIT"
] | 2 | 2020-07-05T04:15:57.000Z | 2021-04-10T03:45:09.000Z | src/snlayers/snconv1d.py | Zihang97/PAGAN | 9233fc54ecf49d6a82bb0794333d61f707439a68 | [
"MIT"
] | 9 | 2020-05-04T01:23:37.000Z | 2021-07-13T06:47:02.000Z | # coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import conv
from torch.nn.modules.utils import _single
from ..functions.max_sv import max_singular_value
class SNConv1d(conv._ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(SNConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias)
self.register_buffer('u', torch.Tensor(1, out_channels).normal_())
@property
def W_(self):
w_mat = self.weight.view(self.weight.size(0), -1)
sigma, _u = max_singular_value(w_mat, self.u)
self.u.copy_(_u)
return self.weight / sigma
def forward(self, input):
return F.conv1d(input, self.W_, self.bias, self.stride, self.padding, self.dilation, self.groups)
| 36.266667 | 117 | 0.682904 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import conv
from torch.nn.modules.utils import _single
from ..functions.max_sv import max_singular_value
class SNConv1d(conv._ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(SNConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias)
self.register_buffer('u', torch.Tensor(1, out_channels).normal_())
@property
def W_(self):
w_mat = self.weight.view(self.weight.size(0), -1)
sigma, _u = max_singular_value(w_mat, self.u)
self.u.copy_(_u)
return self.weight / sigma
def forward(self, input):
return F.conv1d(input, self.W_, self.bias, self.stride, self.padding, self.dilation, self.groups)
| true | true |
f70f7a9a71e9c09452054da1066ca9dc4f363773 | 678 | py | Python | tf_agents/agents/sac/__init__.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 16 | 2020-09-23T06:21:49.000Z | 2022-03-28T05:45:04.000Z | tf_agents/agents/sac/__init__.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 13 | 2019-06-18T03:36:39.000Z | 2019-08-28T18:30:29.000Z | tf_agents/agents/sac/__init__.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 6 | 2020-10-09T06:33:23.000Z | 2022-02-03T16:16:36.000Z | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Soft Actor Critic agent."""
from tf_agents.agents.sac import sac_agent
| 37.666667 | 74 | 0.759587 |
from tf_agents.agents.sac import sac_agent
| true | true |
f70f7ac9a07e731a63b7367bb5601c7b352c07cb | 6,766 | py | Python | src/classifiers.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 619c5c0b17438d1014f7ca7e4ce13cc44c45de3c | [
"MIT"
] | 1 | 2020-11-17T16:09:13.000Z | 2020-11-17T16:09:13.000Z | src/classifiers.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 619c5c0b17438d1014f7ca7e4ce13cc44c45de3c | [
"MIT"
] | null | null | null | src/classifiers.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 619c5c0b17438d1014f7ca7e4ce13cc44c45de3c | [
"MIT"
] | 4 | 2019-07-05T02:03:02.000Z | 2022-01-21T22:12:16.000Z | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ classifiers.py ]
# Synopsis [ 'Naive Bayes' and 'Decision Tree' training, testing, and tunning functions ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn import tree
############
# CONSTANT #
############
N_FOLD = 10
DEPTHS = np.arange(1, 64)
ALPHAS = np.arange(0.001, 1.0, 0.001)
ALPHAS_MUSHROOM = np.arange(0.0001, 1.0, 0.0001)
BEST_DISTRIBUTION = 'Multinominal'
###############
# NAIVE BAYES #
###############
class naive_bayes_runner(object):
def __init__(self, MODEL, train_x, train_y, test_x, test_y):
#---data---#
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
#---model---#
self.cross_validate = False
self.MODEL = MODEL
if self.MODEL == 'NEWS':
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.065),
'Complement' : ComplementNB(alpha=0.136),
'Bernoulli' : BernoulliNB(alpha=0.002) }
if self.MODEL == 'MUSHROOM':
ALPHAS = ALPHAS_MUSHROOM
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.0001),
'Complement' : ComplementNB(alpha=0.0001),
'Bernoulli' : BernoulliNB(alpha=0.0001) }
if self.MODEL == 'INCOME':
self.cross_validate = True
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.959),
'Complement' : ComplementNB(alpha=0.16),
'Bernoulli' : BernoulliNB(alpha=0.001) }
def _fit_and_evaluate(self, model):
model_fit = model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
acc = metrics.accuracy_score(self.test_y, pred_y)
return acc, pred_y
def search_alpha(self):
try:
from tqdm import tqdm
except:
raise ImportError('Failed to import tqdm, use the following command to install: pip3 install tqdm')
for distribution, model in self.models.items():
best_acc = 0.0
best_alpha = 0.001
if distribution != 'Guassian':
print('>> [Naive Bayes Runner] Searching for best alpha value, distribution:', distribution)
for alpha in tqdm(ALPHAS):
model.set_params(alpha=alpha)
if self.cross_validate:
scores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate(model)
if acc > best_acc:
best_acc = acc
best_alpha = alpha
print('>> [Naive Bayes Runner] '+ distribution + ' - Best Alpha Value:', best_alpha)
def run_best_all(self):
for distribution, model in self.models.items():
if self.cross_validate:
scores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate(model)
print('>> [Naive Bayes Runner] '+ distribution + ' - Accuracy:', acc)
def run_best(self):
if self.cross_validate:
scores = cross_val_score(self.models[BEST_DISTRIBUTION], self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
model_fit = self.models[BEST_DISTRIBUTION].fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
else:
acc, pred_y = self._fit_and_evaluate(self.models[BEST_DISTRIBUTION])
print('>> [Naive Bayes Runner] '+ BEST_DISTRIBUTION + ' - Accuracy:', acc)
return pred_y
#################
# DECISION TREE #
#################
class decision_tree_runner(object):
def __init__(self, MODEL, train_x, train_y, test_x, test_y):
#---data---#
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
#---model---#
self.cross_validate = False
self.MODEL = MODEL
if self.MODEL == 'NEWS':
self.model = tree.DecisionTreeClassifier(criterion='gini',
splitter='random',
max_depth=47,
random_state=1337)
elif self.MODEL == 'MUSHROOM':
self.model = tree.DecisionTreeClassifier(criterion='gini',
splitter='random',
max_depth=7,
random_state=1337)
elif self.MODEL == 'INCOME':
self.cross_validate = True
self.model = tree.DecisionTreeClassifier(criterion='entropy',
min_impurity_decrease=2e-4,
max_depth=15,
random_state=1337)
def _fit_and_evaluate(self):
model_fit = self.model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
acc = metrics.accuracy_score(self.test_y, pred_y)
return acc, pred_y
def search_max_depth(self):
try:
from tqdm import tqdm
except:
raise ImportError('Failed to import tqdm, use the following command to install: $ pip3 install tqdm')
best_acc = 0.0
best_depth = 1
print('>> [Naive Bayes Runner] Searching for best max depth value...')
for depth in tqdm(DEPTHS):
self.model.set_params(max_depth=depth)
if self.cross_validate:
scores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate()
if acc > best_acc:
best_acc = acc
best_depth = depth
print('>> [Decision Tree Runner] - Best Dpeth Value:', best_depth)
def visualize(self):
try:
import graphviz
except:
raise ImportError('Failed to import graphviz, use the following command to install: $ pip3 install graphviz, and $ sudo apt-get install graphviz')
model_fit = self.model.fit(self.train_x, self.train_y)
dot_data = tree.export_graphviz(model_fit, out_file=None,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'png'
graph.render('../image/TREE_' + self.MODEL)
print('>> [Decision Tree Runner] - Tree visualization complete.')
def run_best(self):
if self.cross_validate:
scores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
model_fit = self.model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
else:
acc, pred_y = self._fit_and_evaluate()
print('>> [Decision Tree Runner] - Accuracy:', acc)
return pred_y
| 31.915094 | 149 | 0.649571 |
n.model_selection import cross_val_score
from sklearn import metrics
from sklearn import tree
DISTRIBUTION = 'Multinominal'
cross_validate = False
self.MODEL = MODEL
if self.MODEL == 'NEWS':
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.065),
'Complement' : ComplementNB(alpha=0.136),
'Bernoulli' : BernoulliNB(alpha=0.002) }
if self.MODEL == 'MUSHROOM':
ALPHAS = ALPHAS_MUSHROOM
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.0001),
'Complement' : ComplementNB(alpha=0.0001),
'Bernoulli' : BernoulliNB(alpha=0.0001) }
if self.MODEL == 'INCOME':
self.cross_validate = True
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.959),
'Complement' : ComplementNB(alpha=0.16),
'Bernoulli' : BernoulliNB(alpha=0.001) }
def _fit_and_evaluate(self, model):
model_fit = model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
acc = metrics.accuracy_score(self.test_y, pred_y)
return acc, pred_y
def search_alpha(self):
try:
from tqdm import tqdm
except:
raise ImportError('Failed to import tqdm, use the following command to install: pip3 install tqdm')
for distribution, model in self.models.items():
best_acc = 0.0
best_alpha = 0.001
if distribution != 'Guassian':
print('>> [Naive Bayes Runner] Searching for best alpha value, distribution:', distribution)
for alpha in tqdm(ALPHAS):
model.set_params(alpha=alpha)
if self.cross_validate:
scores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate(model)
if acc > best_acc:
best_acc = acc
best_alpha = alpha
print('>> [Naive Bayes Runner] '+ distribution + ' - Best Alpha Value:', best_alpha)
def run_best_all(self):
for distribution, model in self.models.items():
if self.cross_validate:
scores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate(model)
print('>> [Naive Bayes Runner] '+ distribution + ' - Accuracy:', acc)
def run_best(self):
if self.cross_validate:
scores = cross_val_score(self.models[BEST_DISTRIBUTION], self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
model_fit = self.models[BEST_DISTRIBUTION].fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
else:
acc, pred_y = self._fit_and_evaluate(self.models[BEST_DISTRIBUTION])
print('>> [Naive Bayes Runner] '+ BEST_DISTRIBUTION + ' - Accuracy:', acc)
return pred_y
L == 'NEWS':
self.model = tree.DecisionTreeClassifier(criterion='gini',
splitter='random',
max_depth=47,
random_state=1337)
elif self.MODEL == 'MUSHROOM':
self.model = tree.DecisionTreeClassifier(criterion='gini',
splitter='random',
max_depth=7,
random_state=1337)
elif self.MODEL == 'INCOME':
self.cross_validate = True
self.model = tree.DecisionTreeClassifier(criterion='entropy',
min_impurity_decrease=2e-4,
max_depth=15,
random_state=1337)
def _fit_and_evaluate(self):
model_fit = self.model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
acc = metrics.accuracy_score(self.test_y, pred_y)
return acc, pred_y
def search_max_depth(self):
try:
from tqdm import tqdm
except:
raise ImportError('Failed to import tqdm, use the following command to install: $ pip3 install tqdm')
best_acc = 0.0
best_depth = 1
print('>> [Naive Bayes Runner] Searching for best max depth value...')
for depth in tqdm(DEPTHS):
self.model.set_params(max_depth=depth)
if self.cross_validate:
scores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate()
if acc > best_acc:
best_acc = acc
best_depth = depth
print('>> [Decision Tree Runner] - Best Dpeth Value:', best_depth)
def visualize(self):
try:
import graphviz
except:
raise ImportError('Failed to import graphviz, use the following command to install: $ pip3 install graphviz, and $ sudo apt-get install graphviz')
model_fit = self.model.fit(self.train_x, self.train_y)
dot_data = tree.export_graphviz(model_fit, out_file=None,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'png'
graph.render('../image/TREE_' + self.MODEL)
print('>> [Decision Tree Runner] - Tree visualization complete.')
def run_best(self):
if self.cross_validate:
scores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
model_fit = self.model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
else:
acc, pred_y = self._fit_and_evaluate()
print('>> [Decision Tree Runner] - Accuracy:', acc)
return pred_y
| true | true |
f70f7cdaa34aef41cecf2fa32f6c3bc75d3c6636 | 19 | py | Python | xontrib/avox_poetry/__init__.py | jnoortheen/xontrib-avox-poetry | aef6fd087108ec66c53e473d9492ae99c357a00e | [
"MIT"
] | 3 | 2021-02-21T05:46:52.000Z | 2021-12-01T16:07:31.000Z | xontrib/avox_poetry/__init__.py | jnoortheen/xontrib-avox-poetry | aef6fd087108ec66c53e473d9492ae99c357a00e | [
"MIT"
] | 3 | 2021-03-03T22:49:35.000Z | 2022-03-17T15:40:19.000Z | xontrib/avox_poetry/__init__.py | jnoortheen/xontrib-avox-poetry | aef6fd087108ec66c53e473d9492ae99c357a00e | [
"MIT"
] | 1 | 2022-03-20T18:20:54.000Z | 2022-03-20T18:20:54.000Z | from . import venv
| 9.5 | 18 | 0.736842 | from . import venv
| true | true |
f70f7e6d8c477abc39ccd009b82ebc37062afbec | 561 | py | Python | Curve/Parms.py | olesmith/SmtC | dfae5097f02192b60aae05b9d02404fcfe893be3 | [
"CC0-1.0"
] | null | null | null | Curve/Parms.py | olesmith/SmtC | dfae5097f02192b60aae05b9d02404fcfe893be3 | [
"CC0-1.0"
] | null | null | null | Curve/Parms.py | olesmith/SmtC | dfae5097f02192b60aae05b9d02404fcfe893be3 | [
"CC0-1.0"
] | null | null | null |
class Curve_Parms():
def Curve_Parms_Paths(self):
return [str(self.a),str(self.b),str(self.c),str(self.NFrames)]
def Curve_Parms_Path(self):
return "/".join( self.Curve_Parms_Paths() )
def Curve_Parms_FileName(self,cname,fname,ext="svg"):
fnames=self.Curve_Parms_Paths()
n=fnames.pop()
paths=[self.BasePath,self.Name]
fnames=[ fname,]+fnames+[ n+"."+ext ]
fname="-".join(fnames)
paths.append( "-".join(fnames) )
return "/".join(paths)
| 22.44 | 71 | 0.561497 |
class Curve_Parms():
def Curve_Parms_Paths(self):
return [str(self.a),str(self.b),str(self.c),str(self.NFrames)]
def Curve_Parms_Path(self):
return "/".join( self.Curve_Parms_Paths() )
def Curve_Parms_FileName(self,cname,fname,ext="svg"):
fnames=self.Curve_Parms_Paths()
n=fnames.pop()
paths=[self.BasePath,self.Name]
fnames=[ fname,]+fnames+[ n+"."+ext ]
fname="-".join(fnames)
paths.append( "-".join(fnames) )
return "/".join(paths)
| true | true |
f70f7e730fa4c4fa5d6d670b19ebb19549c82ecc | 79,847 | py | Python | src/opserver/test/test_analytics_uve.py | codilime/contrail-controller-arch | e87a974950fc1bbdc2b834212dbdfee5e94008de | [
"Apache-2.0"
] | null | null | null | src/opserver/test/test_analytics_uve.py | codilime/contrail-controller-arch | e87a974950fc1bbdc2b834212dbdfee5e94008de | [
"Apache-2.0"
] | null | null | null | src/opserver/test/test_analytics_uve.py | codilime/contrail-controller-arch | e87a974950fc1bbdc2b834212dbdfee5e94008de | [
"Apache-2.0"
] | 1 | 2020-07-04T12:08:02.000Z | 2020-07-04T12:08:02.000Z | #!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_uvetest.py
#
# UVE and Alarm tests
#
import os
import sys
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import unittest
import testtools
import fixtures
import socket
from utils.util import obj_to_dict, find_buildroot
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockredis import mockredis
from mockzoo import mockzoo
import logging
import time
from opserver.sandesh.viz.constants import *
from opserver.sandesh.viz.constants import _OBJECT_TABLES
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
import platform
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
builddir = find_buildroot(os.getcwd())
class AnalyticsUveTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.redis_port = AnalyticsUveTest.get_free_port()
mockredis.start_redis(cls.redis_port)
@classmethod
def tearDownClass(cls):
mockredis.stop_redis(cls.redis_port)
#@unittest.skip('Skipping non-cassandra test with vizd')
def test_00_nocassandra(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("%%% test_00_nocassandra %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
return True
# end test_00_nocassandra
#@unittest.skip('Skipping VM UVE test')
def test_01_vm_uve(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_01_vm_uve %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Delete the VM UVE and verify that the deleted flag is set
# in the UVE cache
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
# Add the VM UVE with the same vm_id and verify that the deleted flag
# is cleared in the UVE cache
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Generate VM with vm_id containing XML control character
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
# end test_01_vm_uve
#@unittest.skip('Skipping VM UVE test')
def test_02_vm_uve_with_password(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_02_vm_uve_with_password %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
redis_password='contrail'))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
return True
# end test_02_vm_uve_with_password
#@unittest.skip('verify redis-uve restart')
def test_03_redis_uve_restart(self):
logging.info('%%% test_03_redis_uve_restart %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', collectors, logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
self.verify_uve_resync(vizd_obj)
# Alarm should return after redis restart
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# should there be a return True here?
# end test_03_redis_uve_restart
#@unittest.skip('verify redis-uve restart')
def test_04_redis_uve_restart_with_password(self):
logging.info('%%% test_03_redis_uve_restart_with_password %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0,
redis_password='contrail'))
self.verify_uve_resync(vizd_obj)
return True
# end test_04_redis_uve_restart
def verify_uve_resync(self, vizd_obj):
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify redis-uve list
host = socket.gethostname()
gen_list = [host+':Analytics:contrail-collector:0',
host+':Analytics:contrail-query-engine:0',
host+':Analytics:contrail-analytics-api:0']
assert vizd_obj.verify_generator_uve_list(gen_list)
# stop redis-uve
vizd_obj.redis_uves[0].stop()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0], False)
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver, False)
# start redis-uve and verify that contrail-collector and Opserver are
# connected to the redis-uve
vizd_obj.redis_uves[0].start()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify that UVEs are resynced with redis-uve
assert vizd_obj.verify_generator_uve_list(gen_list)
#@unittest.skip('Skipping contrail-collector HA test')
def test_05_collector_ha(self):
logging.info('%%% test_05_collector_ha %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
# OpServer, AlarmGen and QE are started with collectors[0] as
# primary and collectors[1] as secondary
exp_genlist = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# start the contrail-vrouter-agent with collectors[1] as primary and
# collectors[0] as secondary
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# stop collectors[0] and verify that OpServer, AlarmGen and QE switch
# from primary to secondary collector
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# start collectors[0]
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify that the old UVEs are flushed from redis when collector restarts
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop collectors[1] and verify that OpServer, AlarmGen and QE switch
# from secondary to primary and contrail-vrouter-agent from primary to
# secondary
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop QE
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# start a python generator and QE with collectors[1] as the primary and
# collectors[0] as the secondary. On generator startup, verify
# that they connect to the secondary collector, if the
# connection to the primary fails
vr2_collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr2_agent = self.useFixture(
GeneratorFixture("contrail-snmp-collector", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr2_agent.verify_on_setup()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# stop the collectors[0] - both collectors[0] and collectors[1] are down
# send the VM UVE and verify that the VM UVE is synced after connection
# to the collector
vizd_obj.collectors[0].stop()
# Make sure the connection to the collector is teared down before
# sending the VM UVE
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
# end test_05_collector_ha
#@unittest.skip('Skipping AlarmGen basic test')
def test_06_alarmgen_basic(self):
'''
This test starts the analytics processes.
It enables partition 0 on alarmgen, and confirms
that it got enabled
'''
logging.info("%%% test_06_alarmgen_basic %%%")
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
assert(vizd_obj.verify_uvetable_alarm("ObjectCollectorInfo",
"ObjectCollectorInfo:" + socket.gethostname(), "process-status"))
# setup generator for sending Vrouter build_info
collector = vizd_obj.collectors[0].get_addr()
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute",
rules=[{"and_list": [{
"condition": {
"operation": "==",
"operand1": "ObjectVRouter.build_info",
"operand2": {
"json_value": "null"
}
},
"match": [{"json_operand1_value": "null"}]
}]}]
))
# Now try to clear the alarm by sending build_info
alarm_gen1.send_vrouterinfo("myvrouter1", b_info = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Now try to clear the alarm by deleting the UVE
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
alarm_gen2 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname(), inst = "1"))
alarm_gen2.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen2.send_vrouterinfo("myvrouter2")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute"))
# Now try to clear the alarm by disconnecting the generator
alarm_gen2._sandesh_instance._client._connection.set_admin_state(\
down=True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE of myvrouter without build_info again !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Verify that we can give up partition ownership
assert(vizd_obj.set_alarmgen_partition(0,0) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'false'))
# Give up the other partitions
assert(vizd_obj.set_alarmgen_partition(1,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,0) == 'true')
# Confirm that alarms are all gone
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
# Get the partitions again
assert(vizd_obj.set_alarmgen_partition(0,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(1,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,1) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'true'))
# The PartialSysinfo alarm om myvrouter should return
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
return True
# end test_06_alarmgen_basic
#@unittest.skip('Skipping Alarm test')
def test_07_alarm(self):
'''
This test starts redis, collectors, analytics-api and
python generators that simulates alarm generator. This
test sends alarms from alarm generators and verifies the
retrieval of alarms from analytics-api.
'''
logging.info('%%% test_07_alarm %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
# collector_ha_test flag is set to True, because we wanna test
# retrieval of alarms across multiple redis servers.
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True,
start_kafka = True))
assert vizd_obj.verify_on_setup()
# create alarm-generator and attach it to the first collector.
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, hostname=socket.gethostname()+'_1'))
alarm_gen1.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen1.create_process_state_alarm(
'contrail-query-engine')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
analytics_tbl = _OBJECT_TABLES[COLLECTOR_INFO_TABLE].log_query_name
# send proces state alarm for control-node
alarms = alarm_gen1.create_process_state_alarm('contrail-dns')
alarm_gen1.send_alarm('<&'+socket.gethostname()+'_1>', alarms,
BGP_ROUTER_TABLE)
control_tbl = _OBJECT_TABLES[BGP_ROUTER_TABLE].log_query_name
# create another alarm-generator and attach it to the second collector.
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, hostname=socket.gethostname()+'_2'))
alarm_gen2.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen2.create_process_state_alarm(
'contrail-topology')
alarm_gen2.send_alarm(socket.gethostname()+'_2', alarms,
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1', socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[1], obj_to_dict(
alarm_gen2.alarms[COLLECTOR_INFO_TABLE][keys[1]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# delete analytics-node alarm generated by alarm_gen2
alarm_gen2.delete_alarm(socket.gethostname()+'_2',
COLLECTOR_INFO_TABLE)
# verify analytics-node alarms
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
ukeys = [socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
# Disconnect alarm_gen1 from Collector and verify that all
# alarms generated by alarm_gen1 is removed by the Collector.
alarm_gen1.disconnect_from_collector()
ukeys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
ukeys = ['<&'+socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(control_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(control_tbl, ukeys[0], {}))
# update analytics-node alarm in disconnect state
alarms = alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
# Connect alarm_gen1 to Collector and verify that all
# alarms generated by alarm_gen1 is synced with Collector.
alarm_gen1.connect_to_collector()
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# end test_07_alarm
#@unittest.skip('Skipping UVE/Alarm Filter test')
def test_08_uve_alarm_filter(self):
'''
This test verifies the filter options kfilt, sfilt, mfilt and cfilt
in the UVE/Alarm GET and POST methods.
'''
logging.info('%%% test_08_uve_alarm_filter %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True, start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
api_server_name = socket.gethostname()+'_1'
api_server = self.useFixture(
GeneratorFixture('contrail-api', [collectors[0]], logging,
None, node_type='Config',
hostname=api_server_name))
vr_agent_name = socket.gethostname()+'_2'
vr_agent = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', [collectors[1]],
logging, None, node_type='Compute',
hostname=vr_agent_name))
alarm_gen1_name = socket.gethostname()+'_1'
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, node_type='Analytics',
hostname=alarm_gen1_name))
alarm_gen2_name = socket.gethostname()+'_3'
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, node_type='Analytics',
hostname=alarm_gen2_name))
api_server.verify_on_setup()
vr_agent.verify_on_setup()
alarm_gen1.verify_on_setup()
alarm_gen2.verify_on_setup()
vn_list = ['default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&']
# generate UVEs for the filter test
api_server.send_vn_config_uve(name=vn_list[0],
partial_conn_nw=[vn_list[1]],
num_acl_rules=2)
api_server.send_vn_config_uve(name=vn_list[1],
num_acl_rules=3)
vr_agent.send_vn_agent_uve(name=vn_list[1], num_acl_rules=3,
ipkts=2, ibytes=1024)
vr_agent.send_vn_agent_uve(name=vn_list[2], ipkts=4, ibytes=128)
vr_agent.send_vn_agent_uve(name=vn_list[3], ipkts=8, ibytes=256)
# generate Alarms for the filter test
alarms = alarm_gen1.create_alarm('InPktsThreshold')
alarms += alarm_gen1.create_alarm('InBytesThreshold', ack=True)
alarm_gen1.send_alarm(vn_list[1], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[2], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[3], alarms, VN_TABLE)
filt_test = [
# no filter
{
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt
{
'kfilt': ['*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:*',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:vn1',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project2:*',
'invalid-vn:*'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&',
'invalid-vn'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['invalid-vn'],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# sfilt
{
'sfilt': socket.gethostname()+'_1',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
{
'sfilt': socket.gethostname()+'_3',
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'sfilt': 'invalid_source',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# mfilt
{
'mfilt': 'Config:contrail-api:0',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-alarm-gen:0',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-invalid:0',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# cfilt
{
'cfilt': ['UveVirtualNetworkAgent'],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:total_acl_rules',
'UveVirtualNetworkConfig:partially_connected_networks'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
]
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkConfig:invalid',
'UveVirtualNetworkAgent:in_tpkts',
'UVEAlarms:alarms'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:invalid',
'UVEAlarms:invalid_alarms',
'invalid'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# ackfilt
{
'ackfilt': True,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
}
}
]
},
},
{
'ackfilt': False,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
},
# kfilt + sfilt + ackfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project2:*',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
# kfilt + sfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'sfilt': socket.gethostname()+'_1',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms',
'UveVirtualNetworkConfig:Invalid'
],
'uve_list_get': [
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
# kfilt + mfilt + cfilt
{
'kfilt': ['*'],
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms:alarms'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# kfilt + sfilt + mfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:*'
],
'sfilt': socket.gethostname()+'_1',
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkConfig:partially_connected_networks',
'UveVirtualNetworkConfig:total_acl_rules',
'UVEAlarms'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt + mfilt + cfilt + ackfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1&',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1&'
],
'uve_get_post': {'value': []},
}
]
vn_table = _OBJECT_TABLES[VN_TABLE].log_query_name
for i in range(len(filt_test)):
filters = dict(kfilt=filt_test[i].get('kfilt'),
sfilt=filt_test[i].get('sfilt'),
mfilt=filt_test[i].get('mfilt'),
cfilt=filt_test[i].get('cfilt'),
ackfilt=filt_test[i].get('ackfilt'))
assert(vizd_obj.verify_uve_list(vn_table,
filts=filters, exp_uve_list=filt_test[i]['uve_list_get']))
assert(vizd_obj.verify_multi_uve_get(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_uve_post(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
if 'get_alarms' in filt_test[i]:
filters['tablefilt'] = 'virtual-network'
assert(vizd_obj.verify_get_alarms(vn_table,
filts=filters, exp_uves=filt_test[i]['get_alarms']))
# end test_08_uve_alarm_filter
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_kafka():
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
return True
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
| 42.471809 | 85 | 0.36218 |
import os
import sys
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import unittest
import testtools
import fixtures
import socket
from utils.util import obj_to_dict, find_buildroot
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockredis import mockredis
from mockzoo import mockzoo
import logging
import time
from opserver.sandesh.viz.constants import *
from opserver.sandesh.viz.constants import _OBJECT_TABLES
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
import platform
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
builddir = find_buildroot(os.getcwd())
class AnalyticsUveTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.redis_port = AnalyticsUveTest.get_free_port()
mockredis.start_redis(cls.redis_port)
@classmethod
def tearDownClass(cls):
mockredis.stop_redis(cls.redis_port)
def test_00_nocassandra(self):
logging.info("%%% test_00_nocassandra %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
return True
def test_01_vm_uve(self):
logging.info("%%% test_01_vm_uve %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
def test_02_vm_uve_with_password(self):
logging.info("%%% test_02_vm_uve_with_password %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
redis_password='contrail'))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
return True
def test_03_redis_uve_restart(self):
logging.info('%%% test_03_redis_uve_restart %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', collectors, logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
self.verify_uve_resync(vizd_obj)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
def test_04_redis_uve_restart_with_password(self):
logging.info('%%% test_03_redis_uve_restart_with_password %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0,
redis_password='contrail'))
self.verify_uve_resync(vizd_obj)
return True
def verify_uve_resync(self, vizd_obj):
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
host = socket.gethostname()
gen_list = [host+':Analytics:contrail-collector:0',
host+':Analytics:contrail-query-engine:0',
host+':Analytics:contrail-analytics-api:0']
assert vizd_obj.verify_generator_uve_list(gen_list)
vizd_obj.redis_uves[0].stop()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0], False)
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver, False)
vizd_obj.redis_uves[0].start()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
assert vizd_obj.verify_generator_uve_list(gen_list)
def test_05_collector_ha(self):
logging.info('%%% test_05_collector_ha %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
vr2_collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr2_agent = self.useFixture(
GeneratorFixture("contrail-snmp-collector", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr2_agent.verify_on_setup()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
vizd_obj.collectors[0].stop()
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
def test_06_alarmgen_basic(self):
logging.info("%%% test_06_alarmgen_basic %%%")
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
assert(vizd_obj.verify_uvetable_alarm("ObjectCollectorInfo",
"ObjectCollectorInfo:" + socket.gethostname(), "process-status"))
collector = vizd_obj.collectors[0].get_addr()
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute",
rules=[{"and_list": [{
"condition": {
"operation": "==",
"operand1": "ObjectVRouter.build_info",
"operand2": {
"json_value": "null"
}
},
"match": [{"json_operand1_value": "null"}]
}]}]
))
alarm_gen1.send_vrouterinfo("myvrouter1", b_info = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
alarm_gen2 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname(), inst = "1"))
alarm_gen2.verify_on_setup()
alarm_gen2.send_vrouterinfo("myvrouter2")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute"))
alarm_gen2._sandesh_instance._client._connection.set_admin_state(\
down=True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute", is_set = False))
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
assert(vizd_obj.set_alarmgen_partition(0,0) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'false'))
assert(vizd_obj.set_alarmgen_partition(1,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,0) == 'true')
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
assert(vizd_obj.set_alarmgen_partition(0,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(1,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,1) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'true'))
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
return True
def test_07_alarm(self):
logging.info('%%% test_07_alarm %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True,
start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, hostname=socket.gethostname()+'_1'))
alarm_gen1.verify_on_setup()
alarms = alarm_gen1.create_process_state_alarm(
'contrail-query-engine')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
analytics_tbl = _OBJECT_TABLES[COLLECTOR_INFO_TABLE].log_query_name
alarms = alarm_gen1.create_process_state_alarm('contrail-dns')
alarm_gen1.send_alarm('<&'+socket.gethostname()+'_1>', alarms,
BGP_ROUTER_TABLE)
control_tbl = _OBJECT_TABLES[BGP_ROUTER_TABLE].log_query_name
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, hostname=socket.gethostname()+'_2'))
alarm_gen2.verify_on_setup()
alarms = alarm_gen2.create_process_state_alarm(
'contrail-topology')
alarm_gen2.send_alarm(socket.gethostname()+'_2', alarms,
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1', socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[1], obj_to_dict(
alarm_gen2.alarms[COLLECTOR_INFO_TABLE][keys[1]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
alarm_gen2.delete_alarm(socket.gethostname()+'_2',
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
ukeys = [socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
alarm_gen1.disconnect_from_collector()
ukeys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
ukeys = ['<&'+socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(control_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(control_tbl, ukeys[0], {}))
alarms = alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
alarm_gen1.connect_to_collector()
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
def test_08_uve_alarm_filter(self):
logging.info('%%% test_08_uve_alarm_filter %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True, start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
api_server_name = socket.gethostname()+'_1'
api_server = self.useFixture(
GeneratorFixture('contrail-api', [collectors[0]], logging,
None, node_type='Config',
hostname=api_server_name))
vr_agent_name = socket.gethostname()+'_2'
vr_agent = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', [collectors[1]],
logging, None, node_type='Compute',
hostname=vr_agent_name))
alarm_gen1_name = socket.gethostname()+'_1'
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, node_type='Analytics',
hostname=alarm_gen1_name))
alarm_gen2_name = socket.gethostname()+'_3'
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, node_type='Analytics',
hostname=alarm_gen2_name))
api_server.verify_on_setup()
vr_agent.verify_on_setup()
alarm_gen1.verify_on_setup()
alarm_gen2.verify_on_setup()
vn_list = ['default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&']
api_server.send_vn_config_uve(name=vn_list[0],
partial_conn_nw=[vn_list[1]],
num_acl_rules=2)
api_server.send_vn_config_uve(name=vn_list[1],
num_acl_rules=3)
vr_agent.send_vn_agent_uve(name=vn_list[1], num_acl_rules=3,
ipkts=2, ibytes=1024)
vr_agent.send_vn_agent_uve(name=vn_list[2], ipkts=4, ibytes=128)
vr_agent.send_vn_agent_uve(name=vn_list[3], ipkts=8, ibytes=256)
alarms = alarm_gen1.create_alarm('InPktsThreshold')
alarms += alarm_gen1.create_alarm('InBytesThreshold', ack=True)
alarm_gen1.send_alarm(vn_list[1], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[2], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[3], alarms, VN_TABLE)
filt_test = [
{
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:*',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:vn1',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project2:*',
'invalid-vn:*'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&',
'invalid-vn'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['invalid-vn'],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
{
'sfilt': socket.gethostname()+'_1',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
{
'sfilt': socket.gethostname()+'_3',
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'sfilt': 'invalid_source',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
{
'mfilt': 'Config:contrail-api:0',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-alarm-gen:0',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-invalid:0',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
{
'cfilt': ['UveVirtualNetworkAgent'],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:total_acl_rules',
'UveVirtualNetworkConfig:partially_connected_networks'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
]
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkConfig:invalid',
'UveVirtualNetworkAgent:in_tpkts',
'UVEAlarms:alarms'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:invalid',
'UVEAlarms:invalid_alarms',
'invalid'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
{
'ackfilt': True,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
}
}
]
},
},
{
'ackfilt': False,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project2:*',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'sfilt': socket.gethostname()+'_1',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms',
'UveVirtualNetworkConfig:Invalid'
],
'uve_list_get': [
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
{
'kfilt': ['*'],
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms:alarms'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:*'
],
'sfilt': socket.gethostname()+'_1',
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkConfig:partially_connected_networks',
'UveVirtualNetworkConfig:total_acl_rules',
'UVEAlarms'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1&',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1&'
],
'uve_get_post': {'value': []},
}
]
vn_table = _OBJECT_TABLES[VN_TABLE].log_query_name
for i in range(len(filt_test)):
filters = dict(kfilt=filt_test[i].get('kfilt'),
sfilt=filt_test[i].get('sfilt'),
mfilt=filt_test[i].get('mfilt'),
cfilt=filt_test[i].get('cfilt'),
ackfilt=filt_test[i].get('ackfilt'))
assert(vizd_obj.verify_uve_list(vn_table,
filts=filters, exp_uve_list=filt_test[i]['uve_list_get']))
assert(vizd_obj.verify_multi_uve_get(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_uve_post(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
if 'get_alarms' in filt_test[i]:
filters['tablefilt'] = 'virtual-network'
assert(vizd_obj.verify_get_alarms(vn_table,
filts=filters, exp_uves=filt_test[i]['get_alarms']))
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_kafka():
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
return True
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
| true | true |
f70f7f844b3f8ee5ade345d734bba14d2d862c60 | 8,736 | py | Python | PyPowerDNS/api.py | TheDJVG/PyPowerDNS | 2e0e47c3bb7a7b20c08ddfa6f0cd93e663d02dc7 | [
"MIT"
] | 1 | 2021-04-05T21:40:34.000Z | 2021-04-05T21:40:34.000Z | PyPowerDNS/api.py | TheDJVG/PyPowerDNS | 2e0e47c3bb7a7b20c08ddfa6f0cd93e663d02dc7 | [
"MIT"
] | 1 | 2020-09-21T15:00:44.000Z | 2020-09-22T00:38:15.000Z | PyPowerDNS/api.py | TheDJVG/PyPowerDNS | 2e0e47c3bb7a7b20c08ddfa6f0cd93e663d02dc7 | [
"MIT"
] | null | null | null | from .objects import Server, Zone, RRSet, Record, Comment, Cryptokey, Metadata, SearchResult, StatisticItem, \
MapStatisticItem, RingStatisticItem, SimpleStatisticItem, CacheFlushResult
from .exceptions import PDNSApiException, PDNSApiNotFound
import json
from functools import partial
import requests
import logging
logger = logging.getLogger(__name__)
# TODO:
# - Logging
# - TSIGKeys
class APIClient:
def __init__(self, api_host, api_key, tls_verify=True, request_timeout=None):
self._api_url = api_host if 'api/v1' in api_host else f"{api_host}/api/v1"
self._api_key = api_key
self._tls_verify = tls_verify
self._request_timeout = request_timeout
if not self._tls_verify:
logger.warning("Disabling TLS certificate validation.")
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.request_headers = {'X-API-Key': self._api_key}
self.get = partial(self.request, method='GET')
self.post = partial(self.request, method='POST')
self.put = partial(self.request, method='PUT')
self.patch = partial(self.request, method='PATCH')
self.delete = partial(self.request, method='DELETE')
self.servers = self._set_servers()
self.current_server = self.servers[0]
self.zones = self._set_zones()
def request(self, path: str, method: str, data=None, **kwargs):
url = f"{self._api_url}/{path.lstrip('/')}"
if data is None:
data = {}
response = requests.request(method,
url,
json=data,
headers=self.request_headers,
timeout=self._request_timeout,
verify=self._tls_verify,
**kwargs
)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if response.status_code == 404:
raise (PDNSApiNotFound(e)) from None
try:
status_message = response.json()
status_message = status_message.get('error', status_message.get('errors', 'Unknown error'))
except:
status_message = response.text
raise PDNSApiException(response.status_code, status_message) from None
except json.decoder.JSONDecodeError:
return response.text
def _set_servers(self):
new_servers = list()
for server in self.get('servers'):
new_servers.append(Server(**server))
return new_servers
def _set_zones(self):
new_zones = list()
for zone in self.get(f'servers/{self.current_server.id}/zones'):
new_zones.append(Zone(**zone))
return new_zones
def create_zone(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones'
return Zone(**self.post(path, data=zone))
# Zones
def get_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
zone = Zone(**self.get(path))
new_rrsets = []
for rrset in zone.rrsets:
new_comments = []
new_records = []
rrset = RRSet(**rrset)
for comment in rrset.comments:
new_comments.append(Comment(**comment))
for record in rrset.records:
new_records.append(Record(**record))
rrset.comments = new_comments
rrset.records = new_records
new_rrsets.append(rrset)
zone.rrsets = new_rrsets
return zone
def delete_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
self.delete(path)
def update_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.put(path, data=zone)
return self.get_zone(zone.name)
def patch_rrsets(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.patch(path, data={'rrsets': zone.rrsets})
return self.get_zone(zone.name)
def create_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'REPLACE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
def delete_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'DELETE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
# Cryptokeys
def get_zone_cryptokeys(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
cryptkeys_new = []
for cryptokey in self.get(path):
cryptkeys_new.append(Cryptokey(**cryptokey))
return cryptkeys_new
def create_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
return self.post(path, data=cryptokey)
def get_cryptokey(self, zone: Zone, key_id):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{key_id}'
return Cryptokey(**self.get(path))
def put_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{cryptokey.id}'
self.put(path, data=cryptokey)
# Metadata
def get_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
metadata_new = []
for metadata in self.get(path):
metadata_new.append(Metadata(**metadata))
return metadata_new
def create_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
self.post(path, data=metadata)
return self.get_zone_metadata(zone)
def get_metadata(self, zone: Zone, metadata_kind):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata_kind}'
return Metadata(**self.get(path))
def put_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
return Metadata(**self.put(path, data=metadata))
def delete_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
self.delete(path)
# TSIGKeys
# FIXME TBW
# Searching
def search(self, query: str, max_results: int, object_type: str):
path = f'servers/{self.current_server.id}/search-data'
object_types = ['all', 'zone', 'record', 'comment']
if object_type not in object_types:
raise TypeError(f"object_type must be one of {', '.join(object_types)}")
if not isinstance(max_results, int):
raise TypeError("max_results needs to be an integer.")
payload = {'q': query, 'max': max_results, 'object_type': object_type}
new_results = []
for result in self.get(path, params=payload):
new_results.append(SearchResult(**result))
return new_results
# Statistics
def statistics(self, statistic=None, includerings=True):
path = f'servers/{self.current_server.id}/statistics'
payload = {'statistic': statistic, 'includerings': includerings}
type_map = {
'StatisticItem': StatisticItem,
'MapStatisticItem': MapStatisticItem,
'RingStatisticItem': RingStatisticItem
}
new_statistics = []
for item in self.get(path, params=payload):
if item.get('type') in type_map.keys():
new_statistic = type_map[item.get('type')](**item)
if isinstance(new_statistic.value, list):
new_values = []
for value in new_statistic.value:
new_values.append(SimpleStatisticItem(**value))
new_statistic.value = new_values
if statistic is not None:
return new_statistic
new_statistics.append(new_statistic)
return new_statistics
# Cache
def flush_cache(self, domain: str):
path = f'servers/{self.current_server.id}/cache/flush'
payload = {'domain': domain}
return CacheFlushResult(**self.put(path, params=payload))
| 38.484581 | 110 | 0.615156 | from .objects import Server, Zone, RRSet, Record, Comment, Cryptokey, Metadata, SearchResult, StatisticItem, \
MapStatisticItem, RingStatisticItem, SimpleStatisticItem, CacheFlushResult
from .exceptions import PDNSApiException, PDNSApiNotFound
import json
from functools import partial
import requests
import logging
logger = logging.getLogger(__name__)
class APIClient:
def __init__(self, api_host, api_key, tls_verify=True, request_timeout=None):
self._api_url = api_host if 'api/v1' in api_host else f"{api_host}/api/v1"
self._api_key = api_key
self._tls_verify = tls_verify
self._request_timeout = request_timeout
if not self._tls_verify:
logger.warning("Disabling TLS certificate validation.")
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.request_headers = {'X-API-Key': self._api_key}
self.get = partial(self.request, method='GET')
self.post = partial(self.request, method='POST')
self.put = partial(self.request, method='PUT')
self.patch = partial(self.request, method='PATCH')
self.delete = partial(self.request, method='DELETE')
self.servers = self._set_servers()
self.current_server = self.servers[0]
self.zones = self._set_zones()
def request(self, path: str, method: str, data=None, **kwargs):
url = f"{self._api_url}/{path.lstrip('/')}"
if data is None:
data = {}
response = requests.request(method,
url,
json=data,
headers=self.request_headers,
timeout=self._request_timeout,
verify=self._tls_verify,
**kwargs
)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if response.status_code == 404:
raise (PDNSApiNotFound(e)) from None
try:
status_message = response.json()
status_message = status_message.get('error', status_message.get('errors', 'Unknown error'))
except:
status_message = response.text
raise PDNSApiException(response.status_code, status_message) from None
except json.decoder.JSONDecodeError:
return response.text
def _set_servers(self):
new_servers = list()
for server in self.get('servers'):
new_servers.append(Server(**server))
return new_servers
def _set_zones(self):
new_zones = list()
for zone in self.get(f'servers/{self.current_server.id}/zones'):
new_zones.append(Zone(**zone))
return new_zones
def create_zone(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones'
return Zone(**self.post(path, data=zone))
def get_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
zone = Zone(**self.get(path))
new_rrsets = []
for rrset in zone.rrsets:
new_comments = []
new_records = []
rrset = RRSet(**rrset)
for comment in rrset.comments:
new_comments.append(Comment(**comment))
for record in rrset.records:
new_records.append(Record(**record))
rrset.comments = new_comments
rrset.records = new_records
new_rrsets.append(rrset)
zone.rrsets = new_rrsets
return zone
def delete_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
self.delete(path)
def update_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.put(path, data=zone)
return self.get_zone(zone.name)
def patch_rrsets(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.patch(path, data={'rrsets': zone.rrsets})
return self.get_zone(zone.name)
def create_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'REPLACE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
def delete_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'DELETE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
def get_zone_cryptokeys(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
cryptkeys_new = []
for cryptokey in self.get(path):
cryptkeys_new.append(Cryptokey(**cryptokey))
return cryptkeys_new
def create_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
return self.post(path, data=cryptokey)
def get_cryptokey(self, zone: Zone, key_id):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{key_id}'
return Cryptokey(**self.get(path))
def put_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{cryptokey.id}'
self.put(path, data=cryptokey)
def get_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
metadata_new = []
for metadata in self.get(path):
metadata_new.append(Metadata(**metadata))
return metadata_new
def create_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
self.post(path, data=metadata)
return self.get_zone_metadata(zone)
def get_metadata(self, zone: Zone, metadata_kind):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata_kind}'
return Metadata(**self.get(path))
def put_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
return Metadata(**self.put(path, data=metadata))
def delete_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
self.delete(path)
def search(self, query: str, max_results: int, object_type: str):
path = f'servers/{self.current_server.id}/search-data'
object_types = ['all', 'zone', 'record', 'comment']
if object_type not in object_types:
raise TypeError(f"object_type must be one of {', '.join(object_types)}")
if not isinstance(max_results, int):
raise TypeError("max_results needs to be an integer.")
payload = {'q': query, 'max': max_results, 'object_type': object_type}
new_results = []
for result in self.get(path, params=payload):
new_results.append(SearchResult(**result))
return new_results
def statistics(self, statistic=None, includerings=True):
path = f'servers/{self.current_server.id}/statistics'
payload = {'statistic': statistic, 'includerings': includerings}
type_map = {
'StatisticItem': StatisticItem,
'MapStatisticItem': MapStatisticItem,
'RingStatisticItem': RingStatisticItem
}
new_statistics = []
for item in self.get(path, params=payload):
if item.get('type') in type_map.keys():
new_statistic = type_map[item.get('type')](**item)
if isinstance(new_statistic.value, list):
new_values = []
for value in new_statistic.value:
new_values.append(SimpleStatisticItem(**value))
new_statistic.value = new_values
if statistic is not None:
return new_statistic
new_statistics.append(new_statistic)
return new_statistics
def flush_cache(self, domain: str):
path = f'servers/{self.current_server.id}/cache/flush'
payload = {'domain': domain}
return CacheFlushResult(**self.put(path, params=payload))
| true | true |
f70f80addbf2038f17208bc47e55b1fabb3e74e7 | 5,534 | py | Python | ezcoach/ezcoach/agent.py | Pawel-M/EZ-Coach | ee078b8ab7409730e99cb38653d03aa574ab914b | [
"MIT"
] | 1 | 2021-09-14T13:17:33.000Z | 2021-09-14T13:17:33.000Z | ezcoach/ezcoach/agent.py | Pawel-M/EZ-Coach | ee078b8ab7409730e99cb38653d03aa574ab914b | [
"MIT"
] | null | null | null | ezcoach/ezcoach/agent.py | Pawel-M/EZ-Coach | ee078b8ab7409730e99cb38653d03aa574ab914b | [
"MIT"
] | null | null | null | """
The agent module contains three abstract classes that are subclassed in order to create algorithms.
The classes are:
* Player - for an algorithm that cannot learn and can only play
* Learner - for a learning algorithm controlling a single agent
* MultiLearner - for a learning algorithm of controlling a number of agents
"""
import abc
from typing import List, Iterable
from ezcoach.enviroment import Manifest
class Player(abc.ABC):
"""
The abstract class representing a playing agent. It can be initialized with the manifest of the game
and can react to states by selecting actions.
Both methods are empty and must be implemented in the concrete class.
A class that inherits from the Player class can be used with the Runner's test procedure.
"""
@abc.abstractmethod
def initialize(self, manifest: Manifest):
"""
Initializes the object with the manifest that describe the game.
:param manifest: a Manifest class obtained from the environment.
"""
@abc.abstractmethod
def act(self, state):
"""
Selects an action to be performed in the given state.
:param state: a state received from the environment
:return: an action compliant with the manifest provided in initialize method
"""
@classmethod
def __subclasshook__(cls, obj):
if cls is Player:
methods = ('initialize', 'act')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
class Learner(Player):
"""
The abstract class representing an agent that is capable of learning. It inherits from the Player class
and thus it is capable of playing.
Only do_start_episode method must be implemented. Other methods can be left unimplemented and consequently empty.
Rewards are received on the step basis in receive_reward method and on episode basis with episode_ended method.
Methods that ensure persistence are added for convenience.
An agent derived from Learner can be used in both training and testing procedures.
"""
@abc.abstractmethod
def do_start_episode(self, episode: int) -> bool:
"""
Decides if next episode should be started.
:param episode: the number of an episode to be started (starting from 1)
:return: the decision if the next episode should be started
"""
def episode_started(self, episode: int):
"""
Informs the algorithm that the episode was started.
:param episode: the number of the started episode (starting from 1)
"""
def receive_reward(self, previous_state, action, reward: float, accumulated_reward: float, next_state):
"""
Receives a reward from an environment.
:param previous_state: the state that precedes the reward
:param action: the action that precedes the reward
:param reward: the numerical reward signal
:param accumulated_reward: the reward accumulated during the current episode
:param next_state: the state that follow the reward
"""
def episode_ended(self, terminal_state, accumulated_reward):
"""
Receives the accumulated reward for an episode. If a discount is used this value should be ignored
and the actual reward should be calculated using receive_reward method during the episode.
:param terminal_state: the last state of the episode
:param accumulated_reward: the accumulated reward assuming no discount
"""
@classmethod
def __subclasshook__(cls, obj):
if cls is Learner:
methods = ('initialize', 'act',
'do_start_episode', 'episode_started', 'receive_reward', 'episode_ended')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
class MultiLearner(Learner):
"""
The class representing a learning algorithm capable of controlling a number of agents.
It inherits from Learner class. The list of player numbers is provided in set_players method before each episode.
The number identifying currently acting player is set in set_acting_player method which is invoked before
act and receive_reward methods during an episode and before episode_ended method at the end of an episode.
"""
@abc.abstractmethod
def set_players(self, players: Iterable[int]):
"""
Informs the learner about the players that it will control.
:param players: an iterable of numbers identifying players
"""
@abc.abstractmethod
def set_acting_player(self, player):
"""
Sets the current player that will act, receive reward and end episode.
:param player: a number identifying the acting player
"""
@classmethod
def __subclasshook__(cls, obj):
if cls is MultiLearner:
methods = ('initialize', 'act',
'do_start_episode', 'episode_started', 'receive_reward', 'episode_ended',
'set_players', 'set_acting_player')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
| 36.893333 | 117 | 0.669859 | import abc
from typing import List, Iterable
from ezcoach.enviroment import Manifest
class Player(abc.ABC):
@abc.abstractmethod
def initialize(self, manifest: Manifest):
@abc.abstractmethod
def act(self, state):
@classmethod
def __subclasshook__(cls, obj):
if cls is Player:
methods = ('initialize', 'act')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
class Learner(Player):
@abc.abstractmethod
def do_start_episode(self, episode: int) -> bool:
def episode_started(self, episode: int):
def receive_reward(self, previous_state, action, reward: float, accumulated_reward: float, next_state):
def episode_ended(self, terminal_state, accumulated_reward):
@classmethod
def __subclasshook__(cls, obj):
if cls is Learner:
methods = ('initialize', 'act',
'do_start_episode', 'episode_started', 'receive_reward', 'episode_ended')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
class MultiLearner(Learner):
@abc.abstractmethod
def set_players(self, players: Iterable[int]):
@abc.abstractmethod
def set_acting_player(self, player):
@classmethod
def __subclasshook__(cls, obj):
if cls is MultiLearner:
methods = ('initialize', 'act',
'do_start_episode', 'episode_started', 'receive_reward', 'episode_ended',
'set_players', 'set_acting_player')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
| true | true |
f70f81401c08d654abb1e9dcd49531c69b6cbd11 | 8,969 | py | Python | ceilometer/tests/alarm/test_notifier.py | NeCTAR-RC/ceilometer | 25cb8740b83bfbf5c526be816fa3ae10f936bff5 | [
"Apache-2.0"
] | 1 | 2015-02-26T03:23:09.000Z | 2015-02-26T03:23:09.000Z | ceilometer/tests/alarm/test_notifier.py | NeCTAR-RC/ceilometer | 25cb8740b83bfbf5c526be816fa3ae10f936bff5 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/alarm/test_notifier.py | NeCTAR-RC/ceilometer | 25cb8740b83bfbf5c526be816fa3ae10f936bff5 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2013 eNovance
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six.moves.urllib.parse as urlparse
import mock
import requests
from ceilometer.alarm import service
from ceilometer.openstack.common import context
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
DATA_JSON = ('{"current": "ALARM", "alarm_id": "foobar",'
' "reason": "what ?", "reason_data": {"test": "test"},'
' "previous": "OK"}')
NOTIFICATION = dict(alarm_id='foobar',
condition=dict(threshold=42),
reason='what ?',
reason_data={'test': 'test'},
previous='OK',
current='ALARM')
class TestAlarmNotifier(test.BaseTestCase):
def setUp(self):
super(TestAlarmNotifier, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.service = service.AlarmNotifierService('somehost', 'sometopic')
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_init_host(self):
# If we try to create a real RPC connection, init_host() never
# returns. Mock it out so we can establish the service
# configuration.
with mock.patch('ceilometer.openstack.common.rpc.create_connection'):
self.service.start()
def test_notify_alarm(self):
data = {
'actions': ['test://'],
'alarm_id': 'foobar',
'previous': 'OK',
'current': 'ALARM',
'reason': 'Everything is on fire',
'reason_data': {'fire': 'everywhere'}
}
self.service.notify_alarm(context.get_admin_context(), data)
notifications = self.service.notifiers['test'].obj.notifications
self.assertEqual(1, len(notifications))
self.assertEqual((urlparse.urlsplit(data['actions'][0]),
data['alarm_id'],
data['previous'],
data['current'],
data['reason'],
data['reason_data']),
notifications[0])
def test_notify_alarm_no_action(self):
self.service.notify_alarm(context.get_admin_context(), {})
def test_notify_alarm_log_action(self):
self.service.notify_alarm(context.get_admin_context(),
{
'actions': ['log://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
@staticmethod
def _fake_spawn_n(func, *args, **kwargs):
func(*args, **kwargs)
@staticmethod
def _notification(action):
notification = {}
notification.update(NOTIFICATION)
notification['actions'] = [action]
return notification
HTTP_HEADERS = {'content-type': 'application/json'}
def test_notify_alarm_rest_action_ok(self):
action = 'http://host/action'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS)
def test_notify_alarm_rest_action_with_ssl_client_cert(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
cert=certificate, verify=True)
def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
key = "/etc/ssl/cert/whatever.key"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
self.CONF.set_override("rest_notifier_certificate_key", key,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
cert=(certificate, key), verify=True)
def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self):
action = 'https://host/action'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_disable(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=0'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=1'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=True)
@staticmethod
def _fake_urlsplit(*args, **kwargs):
raise Exception("Evil urlsplit!")
def test_notify_alarm_invalid_url(self):
with mock.patch('ceilometer.openstack.common.network_utils.urlsplit',
self._fake_urlsplit):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
def test_notify_alarm_invalid_action(self):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
| 42.709524 | 79 | 0.563051 |
import six.moves.urllib.parse as urlparse
import mock
import requests
from ceilometer.alarm import service
from ceilometer.openstack.common import context
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
DATA_JSON = ('{"current": "ALARM", "alarm_id": "foobar",'
' "reason": "what ?", "reason_data": {"test": "test"},'
' "previous": "OK"}')
NOTIFICATION = dict(alarm_id='foobar',
condition=dict(threshold=42),
reason='what ?',
reason_data={'test': 'test'},
previous='OK',
current='ALARM')
class TestAlarmNotifier(test.BaseTestCase):
def setUp(self):
super(TestAlarmNotifier, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.service = service.AlarmNotifierService('somehost', 'sometopic')
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_init_host(self):
with mock.patch('ceilometer.openstack.common.rpc.create_connection'):
self.service.start()
def test_notify_alarm(self):
data = {
'actions': ['test://'],
'alarm_id': 'foobar',
'previous': 'OK',
'current': 'ALARM',
'reason': 'Everything is on fire',
'reason_data': {'fire': 'everywhere'}
}
self.service.notify_alarm(context.get_admin_context(), data)
notifications = self.service.notifiers['test'].obj.notifications
self.assertEqual(1, len(notifications))
self.assertEqual((urlparse.urlsplit(data['actions'][0]),
data['alarm_id'],
data['previous'],
data['current'],
data['reason'],
data['reason_data']),
notifications[0])
def test_notify_alarm_no_action(self):
self.service.notify_alarm(context.get_admin_context(), {})
def test_notify_alarm_log_action(self):
self.service.notify_alarm(context.get_admin_context(),
{
'actions': ['log://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
@staticmethod
def _fake_spawn_n(func, *args, **kwargs):
func(*args, **kwargs)
@staticmethod
def _notification(action):
notification = {}
notification.update(NOTIFICATION)
notification['actions'] = [action]
return notification
HTTP_HEADERS = {'content-type': 'application/json'}
def test_notify_alarm_rest_action_ok(self):
action = 'http://host/action'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS)
def test_notify_alarm_rest_action_with_ssl_client_cert(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
cert=certificate, verify=True)
def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
key = "/etc/ssl/cert/whatever.key"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
self.CONF.set_override("rest_notifier_certificate_key", key,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
cert=(certificate, key), verify=True)
def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self):
action = 'https://host/action'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_disable(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=0'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=1'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=True)
@staticmethod
def _fake_urlsplit(*args, **kwargs):
raise Exception("Evil urlsplit!")
def test_notify_alarm_invalid_url(self):
with mock.patch('ceilometer.openstack.common.network_utils.urlsplit',
self._fake_urlsplit):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
def test_notify_alarm_invalid_action(self):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
| true | true |
f70f82382935e2fef97fc4bfb9b6127666b2db6e | 1,793 | py | Python | app.py | ds19991999/CKUser | c66ebda6ef5068a79b816de2c57a443b25d7096d | [
"MIT"
] | null | null | null | app.py | ds19991999/CKUser | c66ebda6ef5068a79b816de2c57a443b25d7096d | [
"MIT"
] | null | null | null | app.py | ds19991999/CKUser | c66ebda6ef5068a79b816de2c57a443b25d7096d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from ckuser import client,server
import os
def print_client_menu():
print("用户菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"用户登录系统"+"-"*10)
print("3"+"-"*10+"用户修改信息"+"-"*10)
print("4"+"-"*10+"用户注册信息"+"-"*10)
print("6"+"-"*10+"退出系统")
def print_server_menu():
print("服务菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"添加用户帐号"+"-"*10)
print("3"+"-"*10+"删除用户帐号"+"-"*10)
print("4"+"-"*10+"修改用户帐号"+"-"*10)
print("5"+"-"*10+"查找用户帐号"+"-"*10)
print("6"+"-"*10+"退出系统")
def server_oper():
print_server_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
break
elif i == 1:
os.system("clear")
print_server_menu()
elif i == 2:
server.user_add()
elif i == 3:
server.user_del()
elif i == 4:
server.user_update()
elif i == 5:
server.user_find()
elif i == 6:
os.system("clear")
os.system(exit())
except Exception as msg:
os.system("clear")
print_server_menu()
print("输入错误!")
client_oper()
def client_oper():
print_client_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
print_client_menu()
elif i == 1:
os.system("clear")
break
elif i == 2:
client.login()
elif i == 3:
client.update()
elif i == 4:
client.register()
elif i == 6:
os.system("clear")
os.system(exit())
else:
os.system("clear")
print_client_menu()
print("输入错误!")
except Exception:
os.system("clear")
print_client_menu()
print("输入错误!")
server_oper()
def main():
# server.user_update()
client_oper()
if __name__ == '__main__':
main()
| 19.703297 | 34 | 0.557167 |
from ckuser import client,server
import os
def print_client_menu():
print("用户菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"用户登录系统"+"-"*10)
print("3"+"-"*10+"用户修改信息"+"-"*10)
print("4"+"-"*10+"用户注册信息"+"-"*10)
print("6"+"-"*10+"退出系统")
def print_server_menu():
print("服务菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"添加用户帐号"+"-"*10)
print("3"+"-"*10+"删除用户帐号"+"-"*10)
print("4"+"-"*10+"修改用户帐号"+"-"*10)
print("5"+"-"*10+"查找用户帐号"+"-"*10)
print("6"+"-"*10+"退出系统")
def server_oper():
print_server_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
break
elif i == 1:
os.system("clear")
print_server_menu()
elif i == 2:
server.user_add()
elif i == 3:
server.user_del()
elif i == 4:
server.user_update()
elif i == 5:
server.user_find()
elif i == 6:
os.system("clear")
os.system(exit())
except Exception as msg:
os.system("clear")
print_server_menu()
print("输入错误!")
client_oper()
def client_oper():
print_client_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
print_client_menu()
elif i == 1:
os.system("clear")
break
elif i == 2:
client.login()
elif i == 3:
client.update()
elif i == 4:
client.register()
elif i == 6:
os.system("clear")
os.system(exit())
else:
os.system("clear")
print_client_menu()
print("输入错误!")
except Exception:
os.system("clear")
print_client_menu()
print("输入错误!")
server_oper()
def main():
client_oper()
if __name__ == '__main__':
main()
| true | true |
f70f84ea78513abd82f40253fd127865d7f56a02 | 2,181 | py | Python | sound_factory/sound_factory.py | jphacks/C_2008 | 65d7a1d3a90045b149397cdd1e038ab648bb842e | [
"MIT"
] | 2 | 2020-11-28T05:10:48.000Z | 2020-11-29T01:23:53.000Z | sound_factory/sound_factory.py | jphacks/C_2008 | 65d7a1d3a90045b149397cdd1e038ab648bb842e | [
"MIT"
] | 5 | 2020-11-01T06:34:02.000Z | 2020-11-01T06:37:46.000Z | sound_factory/sound_factory.py | jphacks/C_2008 | 65d7a1d3a90045b149397cdd1e038ab648bb842e | [
"MIT"
] | null | null | null | import os
import re
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img, img_to_array
IMAGE_SHAPE = [(224, 224), (240, 240), (260, 260), (300, 300), (380, 380), (456, 456), (528, 528), (600, 600)]
def main(paths : list, model_name : str):
try:
model = tf.keras.models.load_model(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name))
except Exception:
print('そのようなモデルはありません')
exit()
model_index = int(re.search('\d', model_name).group(0))
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name, 'labels.txt'), mode='r', encoding='utf-8') as f1:
labels = [s.strip() for s in f1.readlines()]
with open('manga_sound_labels.csv', mode='w', encoding='utf-8') as f2:
for path in paths:
if os.path.isfile(path):
try:
img = np.expand_dims(img_to_array(load_img(path,target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)
except Exception:
continue
pridict = labels[np.argmax(model.predict(img)[0])]
f2.write(path + ',' + pridict + '\n')
else:
for filename in os.listdir(path):
try:
img = np.expand_dims(img_to_array(load_img(os.path.join(path, filename),target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)
except Exception:
continue
pridict = labels[np.argmax(model.predict(img)[0])]
f2.write(os.path.join(path, filename) + ',' + pridict + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='コマの画像から背景音を予測します')
parser.add_argument('path',nargs='*', help='解析するファイル名かディレクトリ名')
parser.add_argument('--model', default=os.path.join('best','b0'), help='クラス分けに使用するモデル名')
args = parser.parse_args()
if 'manga_sound_labels.csv' in os.listdir(os.getcwd()):
print('manga_sound_labels.csvがすでにあるので終了します')
exit()
main(args.path, args.model) | 41.942308 | 149 | 0.604768 | import os
import re
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img, img_to_array
IMAGE_SHAPE = [(224, 224), (240, 240), (260, 260), (300, 300), (380, 380), (456, 456), (528, 528), (600, 600)]
def main(paths : list, model_name : str):
try:
model = tf.keras.models.load_model(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name))
except Exception:
print('そのようなモデルはありません')
exit()
model_index = int(re.search('\d', model_name).group(0))
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name, 'labels.txt'), mode='r', encoding='utf-8') as f1:
labels = [s.strip() for s in f1.readlines()]
with open('manga_sound_labels.csv', mode='w', encoding='utf-8') as f2:
for path in paths:
if os.path.isfile(path):
try:
img = np.expand_dims(img_to_array(load_img(path,target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)
except Exception:
continue
pridict = labels[np.argmax(model.predict(img)[0])]
f2.write(path + ',' + pridict + '\n')
else:
for filename in os.listdir(path):
try:
img = np.expand_dims(img_to_array(load_img(os.path.join(path, filename),target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)
except Exception:
continue
pridict = labels[np.argmax(model.predict(img)[0])]
f2.write(os.path.join(path, filename) + ',' + pridict + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='コマの画像から背景音を予測します')
parser.add_argument('path',nargs='*', help='解析するファイル名かディレクトリ名')
parser.add_argument('--model', default=os.path.join('best','b0'), help='クラス分けに使用するモデル名')
args = parser.parse_args()
if 'manga_sound_labels.csv' in os.listdir(os.getcwd()):
print('manga_sound_labels.csvがすでにあるので終了します')
exit()
main(args.path, args.model) | true | true |
f70f862db871c216db3b2e5ea714abdc5fdf04bd | 3,238 | py | Python | scripts/load_file_into_mod.py | strategineer/crusader_kings_3_mods | e290c3e8e542875c0ced2d1b7a013eb85b2037fb | [
"MIT"
] | null | null | null | scripts/load_file_into_mod.py | strategineer/crusader_kings_3_mods | e290c3e8e542875c0ced2d1b7a013eb85b2037fb | [
"MIT"
] | null | null | null | scripts/load_file_into_mod.py | strategineer/crusader_kings_3_mods | e290c3e8e542875c0ced2d1b7a013eb85b2037fb | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import sys
from shutil import copyfile
import argparse
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
NUMBERED_FILENAME_SPLIT_CHARACTER = "_"
parser = argparse.ArgumentParser(description='')
parser.add_argument('filepath', help='')
parser.add_argument('--force', '-f', action="store_true", help='Override any existing files')
parser.add_argument('--increment', '-i', action="store_true", help='Increment the version number on the file so 00_X.txt will be copied as 01_X.txt')
args = parser.parse_args()
CRUSADER_KINGS_3_CURRENT_MOD_NAME = "CRUSADER_KINGS_3_CURRENT_MOD_NAME"
CRUSADER_KINGS_3_MAIN_DIR = "CRUSADER_KINGS_3_MAIN_DIR"
CRUSADER_KINGS_3_MOD_DIR = "CRUSADER_KINGS_3_MOD_DIR"
mod_name = os.environ.get(CRUSADER_KINGS_3_CURRENT_MOD_NAME, '')
main_directory_str = os.environ.get(CRUSADER_KINGS_3_MAIN_DIR, '').replace(" ", "\\ ")
base_mod_directory_str = os.environ.get(CRUSADER_KINGS_3_MOD_DIR, '').replace(" ", "\\ ")
if not mod_name:
logging.error(f"The {CRUSADER_KINGS_3_CURRENT_MOD_NAME} environment variable must be set")
sys.exit(1)
if not main_directory_str:
logging.error(f"The {CRUSADER_KINGS_3_MAIN_DIR} environment variable must be set")
sys.exit(1)
if not base_mod_directory_str:
logging.error(f"The {CRUSADER_KINGS_3_MOD_DIR} environment variable must be set")
sys.exit(1)
main_path = Path(main_directory_str)
if not main_path.exists() or not main_path.is_dir():
logging.error(f"Please ensure that {main_directory_str} points to a valid directory")
sys.exit(1)
base_mod_path = Path(base_mod_directory_str)
if not base_mod_path.exists() or not base_mod_path.is_dir():
logging.error(f"Please ensure that {base_mod_directory_str} points to a valid directory")
sys.exit(1)
mod_directory_str = f"{base_mod_directory_str}/{mod_name}"
mod_path = Path(mod_directory_str)
if not mod_path.exists() or not mod_path.is_dir():
logging.error(f"Please ensure that {mod_directory_str} points to a valid directory")
sys.exit(1)
filepath_str = f"{main_directory_str}/{args.filepath}"
filepath_path = Path(filepath_str)
if not filepath_path.exists() or not filepath_path.is_file():
logging.error(f"Please ensure that {filepath_str} points to an existing file")
sys.exit(1)
destination_filepath = args.filepath
if args.increment:
filepath = Path(args.filepath)
if NUMBERED_FILENAME_SPLIT_CHARACTER in filepath.name:
(n, tail) = filepath.name.split(NUMBERED_FILENAME_SPLIT_CHARACTER, 1)
n = str(int(n) + 1).zfill(len(n))
destination_filepath = str(filepath.parents[0]) + f"/{n}_{tail}"
destination_filepath_str = f"{mod_directory_str}/{destination_filepath}"
destination_filepath_path = Path(destination_filepath_str)
if destination_filepath_path.exists() and not args.force:
logging.error(f"File exists at {destination_filepath_str} already, please use the --force/-f parameter if you want to write over it")
sys.exit(1)
destination_filepath_path.parents[0].mkdir(parents=True, exist_ok=True)
destination_filepath_path.touch(exist_ok=True)
destination_filepath_path.write_text(filepath_path.read_text())
logging.info(f"Created at {destination_filepath_path}")
| 39.012048 | 149 | 0.774552 |
import os
import sys
from shutil import copyfile
import argparse
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
NUMBERED_FILENAME_SPLIT_CHARACTER = "_"
parser = argparse.ArgumentParser(description='')
parser.add_argument('filepath', help='')
parser.add_argument('--force', '-f', action="store_true", help='Override any existing files')
parser.add_argument('--increment', '-i', action="store_true", help='Increment the version number on the file so 00_X.txt will be copied as 01_X.txt')
args = parser.parse_args()
CRUSADER_KINGS_3_CURRENT_MOD_NAME = "CRUSADER_KINGS_3_CURRENT_MOD_NAME"
CRUSADER_KINGS_3_MAIN_DIR = "CRUSADER_KINGS_3_MAIN_DIR"
CRUSADER_KINGS_3_MOD_DIR = "CRUSADER_KINGS_3_MOD_DIR"
mod_name = os.environ.get(CRUSADER_KINGS_3_CURRENT_MOD_NAME, '')
main_directory_str = os.environ.get(CRUSADER_KINGS_3_MAIN_DIR, '').replace(" ", "\\ ")
base_mod_directory_str = os.environ.get(CRUSADER_KINGS_3_MOD_DIR, '').replace(" ", "\\ ")
if not mod_name:
logging.error(f"The {CRUSADER_KINGS_3_CURRENT_MOD_NAME} environment variable must be set")
sys.exit(1)
if not main_directory_str:
logging.error(f"The {CRUSADER_KINGS_3_MAIN_DIR} environment variable must be set")
sys.exit(1)
if not base_mod_directory_str:
logging.error(f"The {CRUSADER_KINGS_3_MOD_DIR} environment variable must be set")
sys.exit(1)
main_path = Path(main_directory_str)
if not main_path.exists() or not main_path.is_dir():
logging.error(f"Please ensure that {main_directory_str} points to a valid directory")
sys.exit(1)
base_mod_path = Path(base_mod_directory_str)
if not base_mod_path.exists() or not base_mod_path.is_dir():
logging.error(f"Please ensure that {base_mod_directory_str} points to a valid directory")
sys.exit(1)
mod_directory_str = f"{base_mod_directory_str}/{mod_name}"
mod_path = Path(mod_directory_str)
if not mod_path.exists() or not mod_path.is_dir():
logging.error(f"Please ensure that {mod_directory_str} points to a valid directory")
sys.exit(1)
filepath_str = f"{main_directory_str}/{args.filepath}"
filepath_path = Path(filepath_str)
if not filepath_path.exists() or not filepath_path.is_file():
logging.error(f"Please ensure that {filepath_str} points to an existing file")
sys.exit(1)
destination_filepath = args.filepath
if args.increment:
filepath = Path(args.filepath)
if NUMBERED_FILENAME_SPLIT_CHARACTER in filepath.name:
(n, tail) = filepath.name.split(NUMBERED_FILENAME_SPLIT_CHARACTER, 1)
n = str(int(n) + 1).zfill(len(n))
destination_filepath = str(filepath.parents[0]) + f"/{n}_{tail}"
destination_filepath_str = f"{mod_directory_str}/{destination_filepath}"
destination_filepath_path = Path(destination_filepath_str)
if destination_filepath_path.exists() and not args.force:
logging.error(f"File exists at {destination_filepath_str} already, please use the --force/-f parameter if you want to write over it")
sys.exit(1)
destination_filepath_path.parents[0].mkdir(parents=True, exist_ok=True)
destination_filepath_path.touch(exist_ok=True)
destination_filepath_path.write_text(filepath_path.read_text())
logging.info(f"Created at {destination_filepath_path}")
| true | true |
f70f86fe1ee1ad646b17cffe4377c00e9ea1c90b | 13,474 | py | Python | chapter05/blackjack.py | bhomaidan1990/reinforcement-learning-an-introduction | fbf020d9da2daec3194a17f968ef29d12ebde6f6 | [
"MIT"
] | 12,197 | 2016-10-04T03:34:49.000Z | 2022-03-31T12:55:36.000Z | chapter05/blackjack.py | bhomaidan1990/reinforcement-learning-an-introduction | fbf020d9da2daec3194a17f968ef29d12ebde6f6 | [
"MIT"
] | 134 | 2016-11-01T06:06:51.000Z | 2022-02-07T00:12:01.000Z | chapter05/blackjack.py | bhomaidan1990/reinforcement-learning-an-introduction | fbf020d9da2daec3194a17f968ef29d12ebde6f6 | [
"MIT"
] | 4,738 | 2016-09-27T07:38:23.000Z | 2022-03-31T10:09:14.000Z | #######################################################################
# Copyright (C) #
# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# 2016 Kenta Shimada(hyperkentakun@gmail.com) #
# 2017 Nicky van Foreest(vanforeest@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
# actions: hit or stand
ACTION_HIT = 0
ACTION_STAND = 1 # "strike" in the book
ACTIONS = [ACTION_HIT, ACTION_STAND]
# policy for player
POLICY_PLAYER = np.zeros(22, dtype=np.int)
for i in range(12, 20):
POLICY_PLAYER[i] = ACTION_HIT
POLICY_PLAYER[20] = ACTION_STAND
POLICY_PLAYER[21] = ACTION_STAND
# function form of target policy of player
def target_policy_player(usable_ace_player, player_sum, dealer_card):
return POLICY_PLAYER[player_sum]
# function form of behavior policy of player
def behavior_policy_player(usable_ace_player, player_sum, dealer_card):
if np.random.binomial(1, 0.5) == 1:
return ACTION_STAND
return ACTION_HIT
# policy for dealer
POLICY_DEALER = np.zeros(22)
for i in range(12, 17):
POLICY_DEALER[i] = ACTION_HIT
for i in range(17, 22):
POLICY_DEALER[i] = ACTION_STAND
# get a new card
def get_card():
card = np.random.randint(1, 14)
card = min(card, 10)
return card
# get the value of a card (11 for ace).
def card_value(card_id):
return 11 if card_id == 1 else card_id
# play a game
# @policy_player: specify policy for player
# @initial_state: [whether player has a usable Ace, sum of player's cards, one card of dealer]
# @initial_action: the initial action
def play(policy_player, initial_state=None, initial_action=None):
# player status
# sum of player
player_sum = 0
# trajectory of player
player_trajectory = []
# whether player uses Ace as 11
usable_ace_player = False
# dealer status
dealer_card1 = 0
dealer_card2 = 0
usable_ace_dealer = False
if initial_state is None:
# generate a random initial state
while player_sum < 12:
# if sum of player is less than 12, always hit
card = get_card()
player_sum += card_value(card)
# If the player's sum is larger than 21, he may hold one or two aces.
if player_sum > 21:
assert player_sum == 22
# last card must be ace
player_sum -= 10
else:
usable_ace_player |= (1 == card)
# initialize cards of dealer, suppose dealer will show the first card he gets
dealer_card1 = get_card()
dealer_card2 = get_card()
else:
# use specified initial state
usable_ace_player, player_sum, dealer_card1 = initial_state
dealer_card2 = get_card()
# initial state of the game
state = [usable_ace_player, player_sum, dealer_card1]
# initialize dealer's sum
dealer_sum = card_value(dealer_card1) + card_value(dealer_card2)
usable_ace_dealer = 1 in (dealer_card1, dealer_card2)
# if the dealer's sum is larger than 21, he must hold two aces.
if dealer_sum > 21:
assert dealer_sum == 22
# use one Ace as 1 rather than 11
dealer_sum -= 10
assert dealer_sum <= 21
assert player_sum <= 21
# game starts!
# player's turn
while True:
if initial_action is not None:
action = initial_action
initial_action = None
else:
# get action based on current sum
action = policy_player(usable_ace_player, player_sum, dealer_card1)
# track player's trajectory for importance sampling
player_trajectory.append([(usable_ace_player, player_sum, dealer_card1), action])
if action == ACTION_STAND:
break
# if hit, get new card
card = get_card()
# Keep track of the ace count. the usable_ace_player flag is insufficient alone as it cannot
# distinguish between having one ace or two.
ace_count = int(usable_ace_player)
if card == 1:
ace_count += 1
player_sum += card_value(card)
# If the player has a usable ace, use it as 1 to avoid busting and continue.
while player_sum > 21 and ace_count:
player_sum -= 10
ace_count -= 1
# player busts
if player_sum > 21:
return state, -1, player_trajectory
assert player_sum <= 21
usable_ace_player = (ace_count == 1)
# dealer's turn
while True:
# get action based on current sum
action = POLICY_DEALER[dealer_sum]
if action == ACTION_STAND:
break
# if hit, get a new card
new_card = get_card()
ace_count = int(usable_ace_dealer)
if new_card == 1:
ace_count += 1
dealer_sum += card_value(new_card)
# If the dealer has a usable ace, use it as 1 to avoid busting and continue.
while dealer_sum > 21 and ace_count:
dealer_sum -= 10
ace_count -= 1
# dealer busts
if dealer_sum > 21:
return state, 1, player_trajectory
usable_ace_dealer = (ace_count == 1)
# compare the sum between player and dealer
assert player_sum <= 21 and dealer_sum <= 21
if player_sum > dealer_sum:
return state, 1, player_trajectory
elif player_sum == dealer_sum:
return state, 0, player_trajectory
else:
return state, -1, player_trajectory
# Monte Carlo Sample with On-Policy
def monte_carlo_on_policy(episodes):
states_usable_ace = np.zeros((10, 10))
# initialze counts to 1 to avoid 0 being divided
states_usable_ace_count = np.ones((10, 10))
states_no_usable_ace = np.zeros((10, 10))
# initialze counts to 1 to avoid 0 being divided
states_no_usable_ace_count = np.ones((10, 10))
for i in tqdm(range(0, episodes)):
_, reward, player_trajectory = play(target_policy_player)
for (usable_ace, player_sum, dealer_card), _ in player_trajectory:
player_sum -= 12
dealer_card -= 1
if usable_ace:
states_usable_ace_count[player_sum, dealer_card] += 1
states_usable_ace[player_sum, dealer_card] += reward
else:
states_no_usable_ace_count[player_sum, dealer_card] += 1
states_no_usable_ace[player_sum, dealer_card] += reward
return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count
# Monte Carlo with Exploring Starts
def monte_carlo_es(episodes):
# (playerSum, dealerCard, usableAce, action)
state_action_values = np.zeros((10, 10, 2, 2))
# initialze counts to 1 to avoid division by 0
state_action_pair_count = np.ones((10, 10, 2, 2))
# behavior policy is greedy
def behavior_policy(usable_ace, player_sum, dealer_card):
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
# get argmax of the average returns(s, a)
values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \
state_action_pair_count[player_sum, dealer_card, usable_ace, :]
return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# play for several episodes
for episode in tqdm(range(episodes)):
# for each episode, use a randomly initialized state and action
initial_state = [bool(np.random.choice([0, 1])),
np.random.choice(range(12, 22)),
np.random.choice(range(1, 11))]
initial_action = np.random.choice(ACTIONS)
current_policy = behavior_policy if episode else target_policy_player
_, reward, trajectory = play(current_policy, initial_state, initial_action)
first_visit_check = set()
for (usable_ace, player_sum, dealer_card), action in trajectory:
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
state_action = (usable_ace, player_sum, dealer_card, action)
if state_action in first_visit_check:
continue
first_visit_check.add(state_action)
# update values of state-action pairs
state_action_values[player_sum, dealer_card, usable_ace, action] += reward
state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
return state_action_values / state_action_pair_count
# Monte Carlo Sample with Off-Policy
def monte_carlo_off_policy(episodes):
initial_state = [True, 13, 2]
rhos = []
returns = []
for i in range(0, episodes):
_, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)
# get the importance ratio
numerator = 1.0
denominator = 1.0
for (usable_ace, player_sum, dealer_card), action in player_trajectory:
if action == target_policy_player(usable_ace, player_sum, dealer_card):
denominator *= 0.5
else:
numerator = 0.0
break
rho = numerator / denominator
rhos.append(rho)
returns.append(reward)
rhos = np.asarray(rhos)
returns = np.asarray(returns)
weighted_returns = rhos * returns
weighted_returns = np.add.accumulate(weighted_returns)
rhos = np.add.accumulate(rhos)
ordinary_sampling = weighted_returns / np.arange(1, episodes + 1)
with np.errstate(divide='ignore',invalid='ignore'):
weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)
return ordinary_sampling, weighted_sampling
def figure_5_1():
states_usable_ace_1, states_no_usable_ace_1 = monte_carlo_on_policy(10000)
states_usable_ace_2, states_no_usable_ace_2 = monte_carlo_on_policy(500000)
states = [states_usable_ace_1,
states_usable_ace_2,
states_no_usable_ace_1,
states_no_usable_ace_2]
titles = ['Usable Ace, 10000 Episodes',
'Usable Ace, 500000 Episodes',
'No Usable Ace, 10000 Episodes',
'No Usable Ace, 500000 Episodes']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for state, title, axis in zip(states, titles, axes):
fig = sns.heatmap(np.flipud(state), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_1.png')
plt.close()
def figure_5_2():
state_action_values = monte_carlo_es(500000)
state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)
state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)
# get the optimal policy
action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)
action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)
images = [action_usable_ace,
state_value_usable_ace,
action_no_usable_ace,
state_value_no_usable_ace]
titles = ['Optimal policy with usable Ace',
'Optimal value with usable Ace',
'Optimal policy without usable Ace',
'Optimal value without usable Ace']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for image, title, axis in zip(images, titles, axes):
fig = sns.heatmap(np.flipud(image), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_2.png')
plt.close()
def figure_5_3():
true_value = -0.27726
episodes = 10000
runs = 100
error_ordinary = np.zeros(episodes)
error_weighted = np.zeros(episodes)
for i in tqdm(range(0, runs)):
ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)
# get the squared error
error_ordinary += np.power(ordinary_sampling_ - true_value, 2)
error_weighted += np.power(weighted_sampling_ - true_value, 2)
error_ordinary /= runs
error_weighted /= runs
plt.plot(np.arange(1, episodes + 1), error_ordinary, color='green', label='Ordinary Importance Sampling')
plt.plot(np.arange(1, episodes + 1), error_weighted, color='red', label='Weighted Importance Sampling')
plt.ylim(-0.1, 5)
plt.xlabel('Episodes (log scale)')
plt.ylabel(f'Mean square error\n(average over {runs} runs)')
plt.xscale('log')
plt.legend()
plt.savefig('../images/figure_5_3.png')
plt.close()
if __name__ == '__main__':
figure_5_1()
figure_5_2()
figure_5_3()
| 36.318059 | 113 | 0.635149 | _sum -= 12
dealer_card -= 1
if usable_ace:
states_usable_ace_count[player_sum, dealer_card] += 1
states_usable_ace[player_sum, dealer_card] += reward
else:
states_no_usable_ace_count[player_sum, dealer_card] += 1
states_no_usable_ace[player_sum, dealer_card] += reward
return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count
# Monte Carlo with Exploring Starts
def monte_carlo_es(episodes):
# (playerSum, dealerCard, usableAce, action)
state_action_values = np.zeros((10, 10, 2, 2))
# initialze counts to 1 to avoid division by 0
state_action_pair_count = np.ones((10, 10, 2, 2))
# behavior policy is greedy
def behavior_policy(usable_ace, player_sum, dealer_card):
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
# get argmax of the average returns(s, a)
values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \
state_action_pair_count[player_sum, dealer_card, usable_ace, :]
return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# play for several episodes
for episode in tqdm(range(episodes)):
# for each episode, use a randomly initialized state and action
initial_state = [bool(np.random.choice([0, 1])),
np.random.choice(range(12, 22)),
np.random.choice(range(1, 11))]
initial_action = np.random.choice(ACTIONS)
current_policy = behavior_policy if episode else target_policy_player
_, reward, trajectory = play(current_policy, initial_state, initial_action)
first_visit_check = set()
for (usable_ace, player_sum, dealer_card), action in trajectory:
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
state_action = (usable_ace, player_sum, dealer_card, action)
if state_action in first_visit_check:
continue
first_visit_check.add(state_action)
# update values of state-action pairs
state_action_values[player_sum, dealer_card, usable_ace, action] += reward
state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
return state_action_values / state_action_pair_count
# Monte Carlo Sample with Off-Policy
def monte_carlo_off_policy(episodes):
initial_state = [True, 13, 2]
rhos = []
returns = []
for i in range(0, episodes):
_, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)
# get the importance ratio
numerator = 1.0
denominator = 1.0
for (usable_ace, player_sum, dealer_card), action in player_trajectory:
if action == target_policy_player(usable_ace, player_sum, dealer_card):
denominator *= 0.5
else:
numerator = 0.0
break
rho = numerator / denominator
rhos.append(rho)
returns.append(reward)
rhos = np.asarray(rhos)
returns = np.asarray(returns)
weighted_returns = rhos * returns
weighted_returns = np.add.accumulate(weighted_returns)
rhos = np.add.accumulate(rhos)
ordinary_sampling = weighted_returns / np.arange(1, episodes + 1)
with np.errstate(divide='ignore',invalid='ignore'):
weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)
return ordinary_sampling, weighted_sampling
def figure_5_1():
states_usable_ace_1, states_no_usable_ace_1 = monte_carlo_on_policy(10000)
states_usable_ace_2, states_no_usable_ace_2 = monte_carlo_on_policy(500000)
states = [states_usable_ace_1,
states_usable_ace_2,
states_no_usable_ace_1,
states_no_usable_ace_2]
titles = ['Usable Ace, 10000 Episodes',
'Usable Ace, 500000 Episodes',
'No Usable Ace, 10000 Episodes',
'No Usable Ace, 500000 Episodes']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for state, title, axis in zip(states, titles, axes):
fig = sns.heatmap(np.flipud(state), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_1.png')
plt.close()
def figure_5_2():
state_action_values = monte_carlo_es(500000)
state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)
state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)
# get the optimal policy
action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)
action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)
images = [action_usable_ace,
state_value_usable_ace,
action_no_usable_ace,
state_value_no_usable_ace]
titles = ['Optimal policy with usable Ace',
'Optimal value with usable Ace',
'Optimal policy without usable Ace',
'Optimal value without usable Ace']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for image, title, axis in zip(images, titles, axes):
fig = sns.heatmap(np.flipud(image), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_2.png')
plt.close()
def figure_5_3():
true_value = -0.27726
episodes = 10000
runs = 100
error_ordinary = np.zeros(episodes)
error_weighted = np.zeros(episodes)
for i in tqdm(range(0, runs)):
ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)
# get the squared error
error_ordinary += np.power(ordinary_sampling_ - true_value, 2)
error_weighted += np.power(weighted_sampling_ - true_value, 2)
error_ordinary /= runs
error_weighted /= runs
plt.plot(np.arange(1, episodes + 1), error_ordinary, color='green', label='Ordinary Importance Sampling')
plt.plot(np.arange(1, episodes + 1), error_weighted, color='red', label='Weighted Importance Sampling')
plt.ylim(-0.1, 5)
plt.xlabel('Episodes (log scale)')
plt.ylabel(f'Mean square error\n(average over {runs} runs)')
plt.xscale('log')
plt.legend()
plt.savefig('../images/figure_5_3.png')
plt.close()
if __name__ == '__main__':
figure_5_1()
figure_5_2()
figure_5_3()
| true | true |
f70f874944a8646dbb751573e99b3ab43f3e5dcf | 1,826 | py | Python | image_preprocessing.py | kpullak/PlantPhenotyping | a0b5bd68787b0850ad1d4d56cab7767cc4f2dc61 | [
"Apache-2.0"
] | null | null | null | image_preprocessing.py | kpullak/PlantPhenotyping | a0b5bd68787b0850ad1d4d56cab7767cc4f2dc61 | [
"Apache-2.0"
] | null | null | null | image_preprocessing.py | kpullak/PlantPhenotyping | a0b5bd68787b0850ad1d4d56cab7767cc4f2dc61 | [
"Apache-2.0"
] | null | null | null | import os
import cv2
source_path = './test_images/'
def processImage(filename, mImage):
if '2019' in filename:
# ----------------------------------
# Remove noise - by applying guassian blur on src image
mImage = cv2.GaussianBlur(mImage, (5, 5), cv2.BORDER_DEFAULT)
# pink rgb values - 255, 153, 255
# white rgb values - 255, 255, 255
# ghost white values - 248, 248, 255
# mImage = mImage[np.where((mImage == [255, 255, 255]).all(axis=2))] = [255, 153, 255]
# working (best performing, descending) - gtruth 55 - 200 (58), 220 (86), 180 (33), 150 (0)
mImage[mImage >= 128] = 200
mImage[mImage < 128] = 0
'''
hsvImg = cv2.cvtColor(mImage,cv2.COLOR_BGR2HSV)
value = 5 # changeable
vValue = hsvImg[..., 2]
hsvImg[..., 2] = np.where((255-vValue) < value, 255, vValue + value)
'''
# save the processed image with a new file name
new_name = source_path + os.path.splitext(filename)[0] + '_processed.jpg'
cv2.imwrite(new_name, mImage)
else:
pass
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
# read the image
img = cv2.imread(os.path.join(source_path, filename))
if img is not None:
processImage(filename, img)
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
if '_processed' in filename:
to_remove = filename.replace('_processed', '')
to_remove_file = os.path.join(source_path, to_remove)
os.remove(to_remove_file)
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
if '_processed' in filename:
new_name = filename.replace('_processed', '')
os.rename(os.path.join(source_path, filename), os.path.join(source_path, new_name)) | 33.814815 | 93 | 0.646769 | import os
import cv2
source_path = './test_images/'
def processImage(filename, mImage):
if '2019' in filename:
mImage = cv2.GaussianBlur(mImage, (5, 5), cv2.BORDER_DEFAULT)
mImage[mImage >= 128] = 200
mImage[mImage < 128] = 0
new_name = source_path + os.path.splitext(filename)[0] + '_processed.jpg'
cv2.imwrite(new_name, mImage)
else:
pass
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
img = cv2.imread(os.path.join(source_path, filename))
if img is not None:
processImage(filename, img)
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
if '_processed' in filename:
to_remove = filename.replace('_processed', '')
to_remove_file = os.path.join(source_path, to_remove)
os.remove(to_remove_file)
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
if '_processed' in filename:
new_name = filename.replace('_processed', '')
os.rename(os.path.join(source_path, filename), os.path.join(source_path, new_name)) | true | true |
f70f87745a9dddf3fca3ea25b23a3a50d07f934b | 765 | py | Python | kur/engine/__init__.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 867 | 2016-12-05T20:24:23.000Z | 2022-02-18T09:07:14.000Z | kur/engine/__init__.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 90 | 2017-01-14T22:46:23.000Z | 2021-02-09T13:32:27.000Z | kur/engine/__init__.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 135 | 2017-01-18T19:21:20.000Z | 2022-01-24T16:57:59.000Z | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .engine import Engine, ScopeStack
from .passthrough_engine import PassthroughEngine
from .jinja_engine import JinjaEngine
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| 34.772727 | 79 | 0.789542 |
from .engine import Engine, ScopeStack
from .passthrough_engine import PassthroughEngine
from .jinja_engine import JinjaEngine
| true | true |
f70f8807470cc0430f7ec49eed3f61f2abc42cb2 | 14,684 | py | Python | sphinx_ext/design_choice.py | friendly-traceback/docs | 413a1d6980b605e2305d5b0ab5757f098a1700c1 | [
"CC0-1.0"
] | null | null | null | sphinx_ext/design_choice.py | friendly-traceback/docs | 413a1d6980b605e2305d5b0ab5757f098a1700c1 | [
"CC0-1.0"
] | 3 | 2021-07-17T17:19:47.000Z | 2022-02-01T13:39:12.000Z | sphinx_ext/design_choice.py | friendly-traceback/docs | 413a1d6980b605e2305d5b0ab5757f098a1700c1 | [
"CC0-1.0"
] | 1 | 2021-07-11T12:59:46.000Z | 2021-07-11T12:59:46.000Z | """
design_choice
~~~~~~~~~~~~~~
IMPORTANT: This is a straightforward adaptation of sphinx's todo extension
done by search/replace.
Allow design_choices to be inserted into your documentation.
Inclusion of design_choices can be switched of by a configuration variable.
The design_choice_list directive collects all design_choices of your
project and lists them along with a backlink to the original location.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import warnings
from typing import Any, Dict, Iterable, List, Tuple, cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import sphinx
from sphinx import addnodes
from sphinx.application import Sphinx
# from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.domains import Domain
from sphinx.environment import BuildEnvironment
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging, texescape
from sphinx.util.docutils import SphinxDirective, new_document
from sphinx.util.nodes import make_refnode
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.latex import LaTeXTranslator
logger = logging.getLogger(__name__)
class design_choice_node(nodes.Admonition, nodes.Element):
pass
class design_choice_list(nodes.General, nodes.Element):
pass
class DesignChoice(BaseAdmonition, SphinxDirective):
"""
A design_choice entry, displayed (if configured) in the form of an admonition.
"""
node_class = design_choice_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
"class": directives.class_option,
"name": directives.unchanged,
"title": directives.unchanged,
"prefix": directives.unchanged,
}
def run(self) -> List[Node]:
if not self.options.get("class"):
self.options["class"] = ["admonition-design_choice"]
(design_choice,) = super().run() # type: Tuple[Node]
if isinstance(design_choice, nodes.system_message):
return [design_choice]
elif isinstance(design_choice, design_choice_node):
prefix = ''
if "prefix" in self.options:
prefix = self.options["prefix"] + " "
design_choice.insert(
0, nodes.title(text=prefix + _("Design Choice: ") + self.options["title"])
)
design_choice["docname"] = self.env.docname
self.add_name(design_choice)
self.set_source_info(design_choice)
self.state.document.note_explicit_target(design_choice)
return [design_choice]
else:
raise RuntimeError # never reached here
class DesignChoiceDomain(Domain):
name = "design_choice"
label = "design_choice"
@property
def design_choices(self) -> Dict[str, List[design_choice_node]]:
return self.data.setdefault("design_choices", {})
def clear_doc(self, docname: str) -> None:
self.design_choices.pop(docname, None)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
for docname in docnames:
self.design_choices[docname] = otherdata["design_choices"][docname]
def process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
design_choices = self.design_choices.setdefault(docname, [])
for design_choice in document.traverse(design_choice_node):
env.app.emit("design_choice-defined", design_choice)
design_choices.append(design_choice)
if env.config.design_choice_emit_warnings:
logger.warning(
__("TODO entry found: %s"),
design_choice[1].astext(),
location=design_choice,
)
def process_design_choices(app: Sphinx, doctree: nodes.document) -> None:
# warnings.warn(
# "process_design_choices() is deprecated.",
# RemovedInSphinx40Warning,
# stacklevel=2,
# )
# collect all design_choices in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
env = app.builder.env
if not hasattr(env, "design_choice_all_design_choices"):
env.design_choice_all_design_choices = [] # type: ignore
for node in doctree.traverse(design_choice_node):
app.events.emit("design_choice-defined", node)
newnode = node.deepcopy()
newnode["ids"] = []
env.design_choice_all_design_choices.append(
{ # type: ignore
"docname": env.docname,
"source": node.source or env.doc2path(env.docname),
"lineno": node.line,
"design_choice": newnode,
"target": node["ids"][0],
}
)
if env.config.design_choice_emit_warnings:
label = cast(nodes.Element, node[1])
logger.warning(__("TODO entry found: %s"), label.astext(), location=node)
class DesignChoiceList(SphinxDirective):
"""
A list of all design_choice entries.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {} # type: Dict
def run(self) -> List[Node]:
# Simply insert an empty design_choice_list node which will be replaced later
# when process_design_choice_nodes is called
return [design_choice_list("")]
class DesignChoiceListProcessor:
def __init__(self, app: Sphinx, doctree: nodes.document, docname: str) -> None:
self.builder = app.builder
self.config = app.config
self.env = app.env
self.domain = cast(DesignChoiceDomain, app.env.get_domain("design_choice"))
self.document = new_document("")
self.process(doctree, docname)
def process(self, doctree: nodes.document, docname: str) -> None:
design_choices = sum(
self.domain.design_choices.values(), []
) # type: List[design_choice_node]
for node in doctree.traverse(design_choice_list):
if not self.config.design_choice_include_design_choices:
node.parent.remove(node)
continue
if node.get("ids"):
content = [nodes.target()] # type: List[Element]
else:
content = []
for design_choice in design_choices:
# Create a copy of the design_choice node
new_design_choice = design_choice.deepcopy()
new_design_choice["ids"].clear()
self.resolve_reference(new_design_choice, docname)
content.append(new_design_choice)
design_choice_ref = self.create_design_choice_reference(
design_choice, docname
)
content.append(design_choice_ref)
node.replace_self(content)
def create_design_choice_reference(
self, design_choice: design_choice_node, docname: str
) -> nodes.paragraph:
if self.config.design_choice_link_only:
description = _("<<original entry>>")
else:
description = _("(The <<original entry>> is located in %s, line %d.)") % (
design_choice.source,
design_choice.line,
)
prefix = description[: description.find("<<")]
suffix = description[description.find(">>") + 2 :]
para = nodes.paragraph(classes=["design_choice-source"])
para += nodes.Text(prefix, prefix)
# Create a reference
linktext = nodes.emphasis(_("original entry"), _("original entry"))
reference = nodes.reference("", "", linktext, internal=True)
try:
reference["refuri"] = self.builder.get_relative_uri(
docname, design_choice["docname"]
)
reference["refuri"] += "#" + design_choice["ids"][0]
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
para += reference
para += nodes.Text(suffix, suffix)
return para
def resolve_reference(
self, design_choice: design_choice_node, docname: str
) -> None:
"""Resolve references in the design_choice content."""
for node in design_choice.traverse(addnodes.pending_xref):
if "refdoc" in node:
node["refdoc"] = docname
# Note: To resolve references, it is needed to wrap it with document node
self.document += design_choice
self.env.resolve_references(self.document, docname, self.builder)
self.document.remove(design_choice)
def process_design_choice_nodes(
app: Sphinx, doctree: nodes.document, fromdocname: str
) -> None:
"""Replace all design_choice_list nodes with a list of the collected design_choices.
Augment each design_choice with a backlink to the original location.
"""
# warnings.warn(
# "process_design_choice_nodes() is deprecated.",
# RemovedInSphinx40Warning,
# stacklevel=2,
# )
domain = cast(DesignChoiceDomain, app.env.get_domain("design_choice"))
design_choices = sum(
domain.design_choices.values(), []
) # type: List[design_choice_node]
for node in doctree.traverse(design_choice_list):
if node.get("ids"):
content = [nodes.target()] # type: List[Element]
else:
content = []
if not app.config["design_choice_include_design_choices"]:
node.replace_self(content)
continue
for design_choice_info in design_choices:
para = nodes.paragraph(classes=["design_choice-source"])
if app.config["design_choice_link_only"]:
description = _("<<original entry>>")
else:
description = _(
"(The <<original entry>> is located in %s, line %d.)"
) % (design_choice_info.source, design_choice_info.line)
desc1 = description[: description.find("<<")]
desc2 = description[description.find(">>") + 2 :]
para += nodes.Text(desc1, desc1)
# Create a reference
innernode = nodes.emphasis(_("original entry"), _("original entry"))
try:
para += make_refnode(
app.builder,
fromdocname,
design_choice_info["docname"],
design_choice_info["ids"][0],
innernode,
)
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
para += nodes.Text(desc2, desc2)
design_choice_entry = design_choice_info.deepcopy()
design_choice_entry["ids"].clear()
# (Recursively) resolve references in the design_choice content
app.env.resolve_references(design_choice_entry, design_choice_info["docname"], app.builder) # type: ignore # NOQA
# Insert into the design_choice_list
content.append(design_choice_entry)
content.append(para)
node.replace_self(content)
def purge_design_choices(app: Sphinx, env: BuildEnvironment, docname: str) -> None:
# warnings.warn(
# "purge_design_choices() is deprecated.", RemovedInSphinx40Warning, stacklevel=2
# )
if not hasattr(env, "design_choice_all_design_choices"):
return
env.design_choice_all_design_choices = [
design_choice
for design_choice in env.design_choice_all_design_choices # type: ignore
if design_choice["docname"] != docname
]
def merge_info(
app: Sphinx, env: BuildEnvironment, docnames: Iterable[str], other: BuildEnvironment
) -> None:
# warnings.warn("merge_info() is deprecated.", RemovedInSphinx40Warning, stacklevel=2)
if not hasattr(other, "design_choice_all_design_choices"):
return
if not hasattr(env, "design_choice_all_design_choices"):
env.design_choice_all_design_choices = [] # type: ignore
env.design_choice_all_design_choices.extend(other.design_choice_all_design_choices) # type: ignore
def visit_design_choice_node(self: HTMLTranslator, node: design_choice_node) -> None:
if self.config.design_choice_include_design_choices:
self.visit_admonition(node)
else:
raise nodes.SkipNode
def depart_design_choice_node(self: HTMLTranslator, node: design_choice_node) -> None:
self.depart_admonition(node)
def latex_visit_design_choice_node(
self: LaTeXTranslator, node: design_choice_node
) -> None:
if self.config.design_choice_include_design_choices:
self.body.append("\n\\begin{sphinxadmonition}{note}{")
self.body.append(self.hypertarget_to(node))
title_node = cast(nodes.title, node[0])
title = texescape.escape(title_node.astext(), self.config.latex_engine)
self.body.append("%s:}" % title)
node.pop(0)
else:
raise nodes.SkipNode
def latex_depart_design_choice_node(
self: LaTeXTranslator, node: design_choice_node
) -> None:
self.body.append("\\end{sphinxadmonition}\n")
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_event("design_choice-defined")
app.add_config_value("design_choice_include_design_choices", False, "html")
app.add_config_value("design_choice_link_only", False, "html")
app.add_config_value("design_choice_emit_warnings", False, "html")
app.add_node(design_choice_list)
app.add_node(
design_choice_node,
html=(visit_design_choice_node, depart_design_choice_node),
latex=(latex_visit_design_choice_node, latex_depart_design_choice_node),
text=(visit_design_choice_node, depart_design_choice_node),
man=(visit_design_choice_node, depart_design_choice_node),
texinfo=(visit_design_choice_node, depart_design_choice_node),
)
app.add_directive("design_choice", DesignChoice)
app.add_directive("design_choice_list", DesignChoiceList)
app.add_domain(DesignChoiceDomain)
app.connect("doctree-resolved", DesignChoiceListProcessor)
return {
"version": sphinx.__display_version__,
"env_version": 2,
"parallel_read_safe": True,
}
| 36.078624 | 127 | 0.649346 |
import warnings
from typing import Any, Dict, Iterable, List, Tuple, cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import sphinx
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.domains import Domain
from sphinx.environment import BuildEnvironment
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging, texescape
from sphinx.util.docutils import SphinxDirective, new_document
from sphinx.util.nodes import make_refnode
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.latex import LaTeXTranslator
logger = logging.getLogger(__name__)
class design_choice_node(nodes.Admonition, nodes.Element):
pass
class design_choice_list(nodes.General, nodes.Element):
pass
class DesignChoice(BaseAdmonition, SphinxDirective):
node_class = design_choice_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
"class": directives.class_option,
"name": directives.unchanged,
"title": directives.unchanged,
"prefix": directives.unchanged,
}
def run(self) -> List[Node]:
if not self.options.get("class"):
self.options["class"] = ["admonition-design_choice"]
(design_choice,) = super().run()
if isinstance(design_choice, nodes.system_message):
return [design_choice]
elif isinstance(design_choice, design_choice_node):
prefix = ''
if "prefix" in self.options:
prefix = self.options["prefix"] + " "
design_choice.insert(
0, nodes.title(text=prefix + _("Design Choice: ") + self.options["title"])
)
design_choice["docname"] = self.env.docname
self.add_name(design_choice)
self.set_source_info(design_choice)
self.state.document.note_explicit_target(design_choice)
return [design_choice]
else:
raise RuntimeError
class DesignChoiceDomain(Domain):
name = "design_choice"
label = "design_choice"
@property
def design_choices(self) -> Dict[str, List[design_choice_node]]:
return self.data.setdefault("design_choices", {})
def clear_doc(self, docname: str) -> None:
self.design_choices.pop(docname, None)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
for docname in docnames:
self.design_choices[docname] = otherdata["design_choices"][docname]
def process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
design_choices = self.design_choices.setdefault(docname, [])
for design_choice in document.traverse(design_choice_node):
env.app.emit("design_choice-defined", design_choice)
design_choices.append(design_choice)
if env.config.design_choice_emit_warnings:
logger.warning(
__("TODO entry found: %s"),
design_choice[1].astext(),
location=design_choice,
)
def process_design_choices(app: Sphinx, doctree: nodes.document) -> None:
env = app.builder.env
if not hasattr(env, "design_choice_all_design_choices"):
env.design_choice_all_design_choices = []
for node in doctree.traverse(design_choice_node):
app.events.emit("design_choice-defined", node)
newnode = node.deepcopy()
newnode["ids"] = []
env.design_choice_all_design_choices.append(
{
"docname": env.docname,
"source": node.source or env.doc2path(env.docname),
"lineno": node.line,
"design_choice": newnode,
"target": node["ids"][0],
}
)
if env.config.design_choice_emit_warnings:
label = cast(nodes.Element, node[1])
logger.warning(__("TODO entry found: %s"), label.astext(), location=node)
class DesignChoiceList(SphinxDirective):
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self) -> List[Node]:
return [design_choice_list("")]
class DesignChoiceListProcessor:
def __init__(self, app: Sphinx, doctree: nodes.document, docname: str) -> None:
self.builder = app.builder
self.config = app.config
self.env = app.env
self.domain = cast(DesignChoiceDomain, app.env.get_domain("design_choice"))
self.document = new_document("")
self.process(doctree, docname)
def process(self, doctree: nodes.document, docname: str) -> None:
design_choices = sum(
self.domain.design_choices.values(), []
)
for node in doctree.traverse(design_choice_list):
if not self.config.design_choice_include_design_choices:
node.parent.remove(node)
continue
if node.get("ids"):
content = [nodes.target()]
else:
content = []
for design_choice in design_choices:
new_design_choice = design_choice.deepcopy()
new_design_choice["ids"].clear()
self.resolve_reference(new_design_choice, docname)
content.append(new_design_choice)
design_choice_ref = self.create_design_choice_reference(
design_choice, docname
)
content.append(design_choice_ref)
node.replace_self(content)
def create_design_choice_reference(
self, design_choice: design_choice_node, docname: str
) -> nodes.paragraph:
if self.config.design_choice_link_only:
description = _("<<original entry>>")
else:
description = _("(The <<original entry>> is located in %s, line %d.)") % (
design_choice.source,
design_choice.line,
)
prefix = description[: description.find("<<")]
suffix = description[description.find(">>") + 2 :]
para = nodes.paragraph(classes=["design_choice-source"])
para += nodes.Text(prefix, prefix)
linktext = nodes.emphasis(_("original entry"), _("original entry"))
reference = nodes.reference("", "", linktext, internal=True)
try:
reference["refuri"] = self.builder.get_relative_uri(
docname, design_choice["docname"]
)
reference["refuri"] += "#" + design_choice["ids"][0]
except NoUri:
pass
para += reference
para += nodes.Text(suffix, suffix)
return para
def resolve_reference(
self, design_choice: design_choice_node, docname: str
) -> None:
for node in design_choice.traverse(addnodes.pending_xref):
if "refdoc" in node:
node["refdoc"] = docname
self.document += design_choice
self.env.resolve_references(self.document, docname, self.builder)
self.document.remove(design_choice)
def process_design_choice_nodes(
app: Sphinx, doctree: nodes.document, fromdocname: str
) -> None:
domain = cast(DesignChoiceDomain, app.env.get_domain("design_choice"))
design_choices = sum(
domain.design_choices.values(), []
)
for node in doctree.traverse(design_choice_list):
if node.get("ids"):
content = [nodes.target()]
else:
content = []
if not app.config["design_choice_include_design_choices"]:
node.replace_self(content)
continue
for design_choice_info in design_choices:
para = nodes.paragraph(classes=["design_choice-source"])
if app.config["design_choice_link_only"]:
description = _("<<original entry>>")
else:
description = _(
"(The <<original entry>> is located in %s, line %d.)"
) % (design_choice_info.source, design_choice_info.line)
desc1 = description[: description.find("<<")]
desc2 = description[description.find(">>") + 2 :]
para += nodes.Text(desc1, desc1)
innernode = nodes.emphasis(_("original entry"), _("original entry"))
try:
para += make_refnode(
app.builder,
fromdocname,
design_choice_info["docname"],
design_choice_info["ids"][0],
innernode,
)
except NoUri:
pass
para += nodes.Text(desc2, desc2)
design_choice_entry = design_choice_info.deepcopy()
design_choice_entry["ids"].clear()
app.env.resolve_references(design_choice_entry, design_choice_info["docname"], app.builder)
content.append(design_choice_entry)
content.append(para)
node.replace_self(content)
def purge_design_choices(app: Sphinx, env: BuildEnvironment, docname: str) -> None:
if not hasattr(env, "design_choice_all_design_choices"):
return
env.design_choice_all_design_choices = [
design_choice
for design_choice in env.design_choice_all_design_choices
if design_choice["docname"] != docname
]
def merge_info(
app: Sphinx, env: BuildEnvironment, docnames: Iterable[str], other: BuildEnvironment
) -> None:
if not hasattr(other, "design_choice_all_design_choices"):
return
if not hasattr(env, "design_choice_all_design_choices"):
env.design_choice_all_design_choices = []
env.design_choice_all_design_choices.extend(other.design_choice_all_design_choices)
def visit_design_choice_node(self: HTMLTranslator, node: design_choice_node) -> None:
if self.config.design_choice_include_design_choices:
self.visit_admonition(node)
else:
raise nodes.SkipNode
def depart_design_choice_node(self: HTMLTranslator, node: design_choice_node) -> None:
self.depart_admonition(node)
def latex_visit_design_choice_node(
self: LaTeXTranslator, node: design_choice_node
) -> None:
if self.config.design_choice_include_design_choices:
self.body.append("\n\\begin{sphinxadmonition}{note}{")
self.body.append(self.hypertarget_to(node))
title_node = cast(nodes.title, node[0])
title = texescape.escape(title_node.astext(), self.config.latex_engine)
self.body.append("%s:}" % title)
node.pop(0)
else:
raise nodes.SkipNode
def latex_depart_design_choice_node(
self: LaTeXTranslator, node: design_choice_node
) -> None:
self.body.append("\\end{sphinxadmonition}\n")
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_event("design_choice-defined")
app.add_config_value("design_choice_include_design_choices", False, "html")
app.add_config_value("design_choice_link_only", False, "html")
app.add_config_value("design_choice_emit_warnings", False, "html")
app.add_node(design_choice_list)
app.add_node(
design_choice_node,
html=(visit_design_choice_node, depart_design_choice_node),
latex=(latex_visit_design_choice_node, latex_depart_design_choice_node),
text=(visit_design_choice_node, depart_design_choice_node),
man=(visit_design_choice_node, depart_design_choice_node),
texinfo=(visit_design_choice_node, depart_design_choice_node),
)
app.add_directive("design_choice", DesignChoice)
app.add_directive("design_choice_list", DesignChoiceList)
app.add_domain(DesignChoiceDomain)
app.connect("doctree-resolved", DesignChoiceListProcessor)
return {
"version": sphinx.__display_version__,
"env_version": 2,
"parallel_read_safe": True,
}
| true | true |
f70f8831567b456197e476ae0d16b0a4367f7af6 | 4,769 | py | Python | src/alias/azext_alias/_validators.py | PoisonousJohn/azure-cli-extensions | cf0d7b6c031ba844dd5e43cc4e07533b85ef1269 | [
"MIT"
] | 1 | 2018-09-22T14:53:04.000Z | 2018-09-22T14:53:04.000Z | src/alias/azext_alias/_validators.py | PoisonousJohn/azure-cli-extensions | cf0d7b6c031ba844dd5e43cc4e07533b85ef1269 | [
"MIT"
] | null | null | null | src/alias/azext_alias/_validators.py | PoisonousJohn/azure-cli-extensions | cf0d7b6c031ba844dd5e43cc4e07533b85ef1269 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import shlex
from knack.util import CLIError
import azext_alias
from azext_alias.argument import get_placeholders
from azext_alias._const import (
COLLISION_CHECK_LEVEL_DEPTH,
INVALID_ALIAS_COMMAND_ERROR,
EMPTY_ALIAS_ERROR,
INVALID_STARTING_CHAR_ERROR,
INCONSISTENT_ARG_ERROR,
COMMAND_LVL_ERROR
)
from azext_alias.alias import AliasManager
def process_alias_create_namespace(namespace):
"""
Validate input arguments when the user invokes 'az alias create'.
Args:
namespace: argparse namespace object.
"""
_validate_alias_name(namespace.alias_name)
_validate_alias_command(namespace.alias_command)
_validate_alias_command_level(namespace.alias_name, namespace.alias_command)
_validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
def _validate_alias_name(alias_name):
"""
Check if the alias name is valid.
Args:
alias_name: The name of the alias to validate.
"""
if not alias_name:
raise CLIError(EMPTY_ALIAS_ERROR)
if not re.match('^[a-zA-Z]', alias_name):
raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))
def _validate_alias_command(alias_command):
"""
Check if the alias command is valid.
Args:
alias_command: The command to validate.
"""
if not alias_command:
raise CLIError(EMPTY_ALIAS_ERROR)
# Boundary index is the index at which named argument or positional argument starts
split_command = shlex.split(alias_command)
boundary_index = len(split_command)
for i, subcommand in enumerate(split_command):
if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:
boundary_index = i
break
# Extract possible CLI commands and validate
command_to_validate = ' '.join(split_command[:boundary_index]).lower()
for command in azext_alias.cached_reserved_commands:
if re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate), command):
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(command_to_validate if command_to_validate else alias_command))
def _validate_pos_args_syntax(alias_name, alias_command):
"""
Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate.
"""
pos_args_from_alias = get_placeholders(alias_name)
# Split by '|' to extract positional argument name from Jinja filter (e.g. {{ arg_name | upper }})
# Split by '.' to extract positional argument name from function call (e.g. {{ arg_name.split()[0] }})
pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]
if set(pos_args_from_alias) != set(pos_args_from_command):
arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)
raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',
arg_diff,
'is' if len(arg_diff) == 1 else 'are'))
def _validate_alias_command_level(alias, command):
"""
Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to.
"""
alias_collision_table = AliasManager.build_collision_table([alias], azext_alias.cached_reserved_commands)
# Alias is not a reserved command, so it can point to any command
if not alias_collision_table:
return
command_collision_table = AliasManager.build_collision_table([command], azext_alias.cached_reserved_commands)
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
# Check if there is a command level conflict
if set(alias_collision_levels) & set(command_collision_levels):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command))
| 38.772358 | 117 | 0.681275 |
import re
import shlex
from knack.util import CLIError
import azext_alias
from azext_alias.argument import get_placeholders
from azext_alias._const import (
COLLISION_CHECK_LEVEL_DEPTH,
INVALID_ALIAS_COMMAND_ERROR,
EMPTY_ALIAS_ERROR,
INVALID_STARTING_CHAR_ERROR,
INCONSISTENT_ARG_ERROR,
COMMAND_LVL_ERROR
)
from azext_alias.alias import AliasManager
def process_alias_create_namespace(namespace):
_validate_alias_name(namespace.alias_name)
_validate_alias_command(namespace.alias_command)
_validate_alias_command_level(namespace.alias_name, namespace.alias_command)
_validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
def _validate_alias_name(alias_name):
if not alias_name:
raise CLIError(EMPTY_ALIAS_ERROR)
if not re.match('^[a-zA-Z]', alias_name):
raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))
def _validate_alias_command(alias_command):
if not alias_command:
raise CLIError(EMPTY_ALIAS_ERROR)
split_command = shlex.split(alias_command)
boundary_index = len(split_command)
for i, subcommand in enumerate(split_command):
if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:
boundary_index = i
break
command_to_validate = ' '.join(split_command[:boundary_index]).lower()
for command in azext_alias.cached_reserved_commands:
if re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate), command):
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(command_to_validate if command_to_validate else alias_command))
def _validate_pos_args_syntax(alias_name, alias_command):
pos_args_from_alias = get_placeholders(alias_name)
pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]
if set(pos_args_from_alias) != set(pos_args_from_command):
arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)
raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',
arg_diff,
'is' if len(arg_diff) == 1 else 'are'))
def _validate_alias_command_level(alias, command):
alias_collision_table = AliasManager.build_collision_table([alias], azext_alias.cached_reserved_commands)
if not alias_collision_table:
return
command_collision_table = AliasManager.build_collision_table([command], azext_alias.cached_reserved_commands)
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
if set(alias_collision_levels) & set(command_collision_levels):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command))
| true | true |
f70f8a2f41f8fdb18d9b0a96c1d23e7abc0f59f7 | 1,752 | py | Python | python/lvmieb/exceptions.py | sdss/OsuActor | bf3d92448e07cefc4c1346db04b1eb9b7e00dd41 | [
"BSD-3-Clause"
] | 2 | 2021-07-30T04:38:30.000Z | 2021-08-13T13:34:04.000Z | python/lvmieb/exceptions.py | sdss/OsuActor | bf3d92448e07cefc4c1346db04b1eb9b7e00dd41 | [
"BSD-3-Clause"
] | 4 | 2021-06-03T12:01:00.000Z | 2021-08-14T09:34:12.000Z | python/lvmieb/exceptions.py | sdss/osuactor | bf3d92448e07cefc4c1346db04b1eb9b7e00dd41 | [
"BSD-3-Clause"
] | 2 | 2021-05-04T06:19:39.000Z | 2021-05-11T08:35:02.000Z | # -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Changgon Kim, Mingeyong Yang, Taeeun Kim
# @Date: 2021-04-26 17:14
# @Last modified by: Changgon Kim
from __future__ import absolute_import, division, print_function
class LvmIebError(Exception):
"""A custom core LvmIeb exception"""
def __init__(self, message=None):
message = "There has been an error" if not message else message
super(LvmIebError, self).__init__(message)
class LvmIebNotImplemented(LvmIebError):
"""A custom exception for not yet implemented features."""
def __init__(self, message=None):
message = "This feature is not implemented yet." if not message else message
super(LvmIebNotImplemented, self).__init__(message)
class LvmIebAPIError(LvmIebError):
"""A custom exception for API errors"""
def __init__(self, message=None):
if not message:
message = "Error with Http Response from LvmIeb API"
else:
message = "Http response error from LvmIeb API. {0}".format(message)
super(LvmIebAPIError, self).__init__(message)
class LvmIebApiAuthError(LvmIebAPIError):
"""A custom exception for API authentication errors"""
pass
class LvmIebMissingDependency(LvmIebError):
"""A custom exception for missing dependencies."""
pass
class LvmIebWarning(Warning):
"""Base warning for LvmIeb."""
class LvmIebUserWarning(UserWarning, LvmIebWarning):
"""The primary warning class."""
pass
class LvmIebSkippedTestWarning(LvmIebUserWarning):
"""A warning for when a test is skipped."""
pass
class LvmIebDeprecationWarning(LvmIebUserWarning):
"""A warning for deprecated features."""
pass
| 23.052632 | 84 | 0.695205 |
from __future__ import absolute_import, division, print_function
class LvmIebError(Exception):
def __init__(self, message=None):
message = "There has been an error" if not message else message
super(LvmIebError, self).__init__(message)
class LvmIebNotImplemented(LvmIebError):
def __init__(self, message=None):
message = "This feature is not implemented yet." if not message else message
super(LvmIebNotImplemented, self).__init__(message)
class LvmIebAPIError(LvmIebError):
def __init__(self, message=None):
if not message:
message = "Error with Http Response from LvmIeb API"
else:
message = "Http response error from LvmIeb API. {0}".format(message)
super(LvmIebAPIError, self).__init__(message)
class LvmIebApiAuthError(LvmIebAPIError):
pass
class LvmIebMissingDependency(LvmIebError):
pass
class LvmIebWarning(Warning):
class LvmIebUserWarning(UserWarning, LvmIebWarning):
pass
class LvmIebSkippedTestWarning(LvmIebUserWarning):
pass
class LvmIebDeprecationWarning(LvmIebUserWarning):
pass
| true | true |
f70f8a6fdb098ef333f9170579450717e8ff2c68 | 12,261 | py | Python | common/manager.py | hxwork/OMNet | be88a734e7327def365e1875bbc7cd2fea1539b0 | [
"MIT"
] | null | null | null | common/manager.py | hxwork/OMNet | be88a734e7327def365e1875bbc7cd2fea1539b0 | [
"MIT"
] | null | null | null | common/manager.py | hxwork/OMNet | be88a734e7327def365e1875bbc7cd2fea1539b0 | [
"MIT"
] | 1 | 2021-11-14T12:56:40.000Z | 2021-11-14T12:56:40.000Z | import os
from collections import defaultdict
import numpy as np
import torch
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
from common import utils
class Manager():
def __init__(self, model, optimizer, scheduler, params, dataloaders, logger):
# params status
self.params = params
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
self.logger = logger
self.epoch = 0
self.step = 0
self.best_val_score = np.inf
self.cur_val_score = np.inf
self.best_test_score = np.inf
self.cur_test_score = np.inf
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# init local tensorboard and html
self.init_tb_and_html()
def init_tb_and_html(self):
# tensorboard loss
local_tb_dir = os.path.join(self.params.model_dir, "summary/loss")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_loss_writter = SummaryWriter(log_dir=local_tb_dir)
# tensorboard metric
local_tb_dir = os.path.join(self.params.model_dir, "summary/metric")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_metric_writter = SummaryWriter(log_dir=local_tb_dir)
# html
local_html_dir = os.path.join(self.params.model_dir, "summary/html")
os.makedirs(local_html_dir, exist_ok=True)
self.local_html_dir = local_html_dir
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, batch_size):
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=batch_size)
def update_metric_status(self, metrics, split, batch_size):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=batch_size)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=batch_size)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def summarize_metric_status(self, metrics, split):
if split == "val":
for k in metrics:
if k.endswith('MSE'):
self.val_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.val_status[k].avg))
else:
continue
elif split == "test":
for k in metrics:
if k.endswith('MSE'):
self.test_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.test_status[k].avg))
else:
continue
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong split string: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.4f} ".format(exp_name, self.epoch, self.scheduler.get_last_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red", only_best=False):
if split == "val":
metric_status = self.val_status
is_best = self.cur_val_score < self.best_val_score
elif split == "test":
metric_status = self.test_status
is_best = self.cur_test_score < self.best_test_score
else:
raise ValueError("Wrong split string: {}".format(split))
print_str = " | ".join("{}: {:4g}".format(k, v.avg) for k, v in metric_status.items())
if only_best:
if is_best:
self.logger.info(colored("Best Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
else:
self.logger.info(colored("Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
def write_loss_to_tb(self, split):
for k, v in self.loss_status.items():
if split == "train":
self.local_loss_writter.add_scalar("train_Loss/{}".format(k), v.val, self.step)
elif split == "val":
self.local_loss_writter.add_scalar("val_Loss/{}".format(k), v.val, self.step)
elif split == "test":
self.local_loss_writter.add_scalar("test_Loss/{}".format(k), v.val, self.step)
else:
raise ValueError("Wrong split string: {}".format(split))
def write_metric_to_tb(self, split):
if split == "val":
for k, v in self.val_status.items():
self.local_metric_writter.add_scalar("val_Metric/{}".format(k), v.avg, self.epoch)
elif split == "test":
for k, v in self.test_status.items():
self.local_metric_writter.add_scalar("test_Metric/{}".format(k), v.avg, self.epoch)
else:
raise ValueError("Wrong split string: {}".format(split))
def check_best_save_last_checkpoints(self, save_latest_freq=5, save_best_after=50):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if self.dataloaders["val"] is not None:
state["best_val_score"] = self.best_val_score
if self.dataloaders["test"] is not None:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % save_latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
torch.save(state, latest_ckpt_name)
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if self.dataloaders["val"] is not None:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
is_best = self.cur_val_score < self.best_val_score
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.7f}".format(self.best_val_score))
# save checkpoint
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
# save test latest metrics, and check if test is best checkpoints
if self.dataloaders["test"] is not None:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
is_best = self.cur_test_score < self.best_test_score
if is_best:
# save metrics
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.7f}".format(self.best_test_score))
# save checkpoint
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved test best checkpoint to: {}".format(best_ckpt_name))
def load_checkpoints(self):
state = torch.load(self.params.restore_file)
ckpt_component = []
if "state_dict" in state and self.model is not None:
try:
self.model.load_state_dict(state["state_dict"])
except RuntimeError:
print("Using custom loading net")
net_dict = self.model.state_dict()
if "module" not in list(state["state_dict"].keys())[0]:
state_dict = {"module." + k: v for k, v in state["state_dict"].items() if "module." + k in net_dict.keys()}
else:
state_dict = {k: v for k, v in state["state_dict"].items() if k in net_dict.keys()}
net_dict.update(state_dict)
self.model.load_state_dict(net_dict, strict=False)
ckpt_component.append("net")
if not self.params.only_weights:
if "optimizer" in state and self.optimizer is not None:
try:
self.optimizer.load_state_dict(state["optimizer"])
except RuntimeError:
print("Using custom loading optimizer")
optimizer_dict = self.optimizer.state_dict()
state_dict = {k: v for k, v in state["optimizer"].items() if k in optimizer_dict.keys()}
optimizer_dict.update(state_dict)
self.optimizer.load_state_dict(optimizer_dict)
ckpt_component.append("opt")
if "scheduler" in state and self.train_status["scheduler"] is not None:
try:
self.scheduler.load_state_dict(state["scheduler"])
except RuntimeError:
print("Using custom loading scheduler")
scheduler_dict = self.scheduler.state_dict()
state_dict = {k: v for k, v in state["scheduler"].items() if k in scheduler_dict.keys()}
scheduler_dict.update(state_dict)
self.scheduler.load_state_dict(scheduler_dict)
ckpt_component.append("sch")
if "step" in state:
self.step = state["step"] + 1
ckpt_component.append("step")
if "epoch" in state:
self.epoch = state["epoch"] + 1
ckpt_component.append("epoch")
if "best_val_score" in state:
self.best_val_score = state["best_val_score"]
ckpt_component.append("best val score: {:.3g}".format(self.best_val_score))
if "best_test_score" in state:
self.best_test_score = state["best_test_score"]
ckpt_component.append("best test score: {:.3g}".format(self.best_test_score))
ckpt_component = ", ".join(i for i in ckpt_component)
self.logger.info("Loaded models from: {}".format(self.params.restore_file))
self.logger.info("Ckpt load: {}".format(ckpt_component))
| 45.077206 | 136 | 0.581437 | import os
from collections import defaultdict
import numpy as np
import torch
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
from common import utils
class Manager():
def __init__(self, model, optimizer, scheduler, params, dataloaders, logger):
self.params = params
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
self.logger = logger
self.epoch = 0
self.step = 0
self.best_val_score = np.inf
self.cur_val_score = np.inf
self.best_test_score = np.inf
self.cur_test_score = np.inf
self.train_status = defaultdict(utils.AverageMeter)
self.val_status = defaultdict(utils.AverageMeter)
self.test_status = defaultdict(utils.AverageMeter)
self.loss_status = defaultdict(utils.AverageMeter)
self.init_tb_and_html()
def init_tb_and_html(self):
local_tb_dir = os.path.join(self.params.model_dir, "summary/loss")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_loss_writter = SummaryWriter(log_dir=local_tb_dir)
local_tb_dir = os.path.join(self.params.model_dir, "summary/metric")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_metric_writter = SummaryWriter(log_dir=local_tb_dir)
local_html_dir = os.path.join(self.params.model_dir, "summary/html")
os.makedirs(local_html_dir, exist_ok=True)
self.local_html_dir = local_html_dir
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, batch_size):
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=batch_size)
def update_metric_status(self, metrics, split, batch_size):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=batch_size)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=batch_size)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def summarize_metric_status(self, metrics, split):
if split == "val":
for k in metrics:
if k.endswith('MSE'):
self.val_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.val_status[k].avg))
else:
continue
elif split == "test":
for k in metrics:
if k.endswith('MSE'):
self.test_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.test_status[k].avg))
else:
continue
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong split string: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.4f} ".format(exp_name, self.epoch, self.scheduler.get_last_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red", only_best=False):
if split == "val":
metric_status = self.val_status
is_best = self.cur_val_score < self.best_val_score
elif split == "test":
metric_status = self.test_status
is_best = self.cur_test_score < self.best_test_score
else:
raise ValueError("Wrong split string: {}".format(split))
print_str = " | ".join("{}: {:4g}".format(k, v.avg) for k, v in metric_status.items())
if only_best:
if is_best:
self.logger.info(colored("Best Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
else:
self.logger.info(colored("Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
def write_loss_to_tb(self, split):
for k, v in self.loss_status.items():
if split == "train":
self.local_loss_writter.add_scalar("train_Loss/{}".format(k), v.val, self.step)
elif split == "val":
self.local_loss_writter.add_scalar("val_Loss/{}".format(k), v.val, self.step)
elif split == "test":
self.local_loss_writter.add_scalar("test_Loss/{}".format(k), v.val, self.step)
else:
raise ValueError("Wrong split string: {}".format(split))
def write_metric_to_tb(self, split):
if split == "val":
for k, v in self.val_status.items():
self.local_metric_writter.add_scalar("val_Metric/{}".format(k), v.avg, self.epoch)
elif split == "test":
for k, v in self.test_status.items():
self.local_metric_writter.add_scalar("test_Metric/{}".format(k), v.avg, self.epoch)
else:
raise ValueError("Wrong split string: {}".format(split))
def check_best_save_last_checkpoints(self, save_latest_freq=5, save_best_after=50):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if self.dataloaders["val"] is not None:
state["best_val_score"] = self.best_val_score
if self.dataloaders["test"] is not None:
state["best_test_score"] = self.best_test_score
if self.epoch % save_latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
torch.save(state, latest_ckpt_name)
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
if self.dataloaders["val"] is not None:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
is_best = self.cur_val_score < self.best_val_score
if is_best:
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.7f}".format(self.best_val_score))
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
if self.dataloaders["test"] is not None:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
is_best = self.cur_test_score < self.best_test_score
if is_best:
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.7f}".format(self.best_test_score))
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved test best checkpoint to: {}".format(best_ckpt_name))
def load_checkpoints(self):
state = torch.load(self.params.restore_file)
ckpt_component = []
if "state_dict" in state and self.model is not None:
try:
self.model.load_state_dict(state["state_dict"])
except RuntimeError:
print("Using custom loading net")
net_dict = self.model.state_dict()
if "module" not in list(state["state_dict"].keys())[0]:
state_dict = {"module." + k: v for k, v in state["state_dict"].items() if "module." + k in net_dict.keys()}
else:
state_dict = {k: v for k, v in state["state_dict"].items() if k in net_dict.keys()}
net_dict.update(state_dict)
self.model.load_state_dict(net_dict, strict=False)
ckpt_component.append("net")
if not self.params.only_weights:
if "optimizer" in state and self.optimizer is not None:
try:
self.optimizer.load_state_dict(state["optimizer"])
except RuntimeError:
print("Using custom loading optimizer")
optimizer_dict = self.optimizer.state_dict()
state_dict = {k: v for k, v in state["optimizer"].items() if k in optimizer_dict.keys()}
optimizer_dict.update(state_dict)
self.optimizer.load_state_dict(optimizer_dict)
ckpt_component.append("opt")
if "scheduler" in state and self.train_status["scheduler"] is not None:
try:
self.scheduler.load_state_dict(state["scheduler"])
except RuntimeError:
print("Using custom loading scheduler")
scheduler_dict = self.scheduler.state_dict()
state_dict = {k: v for k, v in state["scheduler"].items() if k in scheduler_dict.keys()}
scheduler_dict.update(state_dict)
self.scheduler.load_state_dict(scheduler_dict)
ckpt_component.append("sch")
if "step" in state:
self.step = state["step"] + 1
ckpt_component.append("step")
if "epoch" in state:
self.epoch = state["epoch"] + 1
ckpt_component.append("epoch")
if "best_val_score" in state:
self.best_val_score = state["best_val_score"]
ckpt_component.append("best val score: {:.3g}".format(self.best_val_score))
if "best_test_score" in state:
self.best_test_score = state["best_test_score"]
ckpt_component.append("best test score: {:.3g}".format(self.best_test_score))
ckpt_component = ", ".join(i for i in ckpt_component)
self.logger.info("Loaded models from: {}".format(self.params.restore_file))
self.logger.info("Ckpt load: {}".format(ckpt_component))
| true | true |
f70f8c67ca5c8f4c4bc0844a6821081bdf4ce0d1 | 33,589 | py | Python | Cogs/Strike.py | camielverdult/CorpBot.py | 56cf3ee736625525d05f9f447b31e34baf93596d | [
"MIT"
] | null | null | null | Cogs/Strike.py | camielverdult/CorpBot.py | 56cf3ee736625525d05f9f447b31e34baf93596d | [
"MIT"
] | null | null | null | Cogs/Strike.py | camielverdult/CorpBot.py | 56cf3ee736625525d05f9f447b31e34baf93596d | [
"MIT"
] | null | null | null | import asyncio
import discord
import time
import parsedatetime
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import ReadableTime
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
mute = bot.get_cog("Mute")
bot.add_cog(Strike(bot, settings, mute))
# This is the Strike module. It keeps track of warnings and kicks/bans accordingly
# Strikes = [ time until drops off ]
# StrikeOut = 3 (3 strikes and you're out)
# StrikeLevel (a list similar to xproles)
# Standard strike roles:
# 0 = Not been punished already
# 1 = Muted for x amount of time
# 2 = Already been kicked (id in kick list)
# 3 = Already been banned (auto-mute)
class Strike(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings, mute):
self.bot = bot
self.settings = settings
self.mute = mute
self.loop_list = []
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def onjoin(self, member, server):
# Check id against the kick and ban list and react accordingly
kickList = self.settings.getServerStat(server, "KickList")
if str(member.id) in kickList:
# The user has been kicked before - set their strikeLevel to 2
self.settings.setUserStat(member, server, "StrikeLevel", 2)
banList = self.settings.getServerStat(server, "BanList")
if str(member.id) in banList:
# The user has been kicked before - set their strikeLevel to 3
# Also mute them
self.settings.setUserStat(member, server, "StrikeLevel", 3)
self.settings.setUserStat(member, server, "Muted", True)
self.settings.setUserStat(member, server, "Cooldown", None)
await self.mute._mute(member, server)
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
for task in self.loop_list:
task.cancel()
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
self.bot.loop.create_task(self.start_loading())
async def start_loading(self):
await self.bot.wait_until_ready()
await self.bot.loop.run_in_executor(None, self.check_strikes)
def check_strikes(self):
# Check all strikes - and start timers
print("Checking strikes...")
t = time.time()
for server in self.bot.guilds:
for member in server.members:
strikes = self.settings.getUserStat(member, server, "Strikes")
if strikes == None:
continue
if len(strikes):
# We have a list
for strike in strikes:
# Make sure it's a strike that *can* roll off
if not strike['Time'] == -1:
self.loop_list.append(self.bot.loop.create_task(
self.checkStrike(member, strike)))
print("Strikes checked - took {} seconds.".format(time.time() - t))
async def checkStrike(self, member, strike):
# Start our countdown
countDown = int(strike['Time'])-int(time.time())
if countDown > 0:
# We have a positive countdown - let's wait
await asyncio.sleep(countDown)
strikes = self.settings.getUserStat(member, member.guild, "Strikes")
# Verify strike is still valid
if not strike in strikes:
return
strikes.remove(strike)
self.settings.setUserStat(member, member.guild, "Strikes", strikes)
@commands.command(pass_context=True)
async def strike(self, ctx, member: discord.Member = None, days=None, *, message: str = None):
"""Give a user a strike (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}strike [member] [strike timeout (in days) - 0 = forever] [message (optional)]`'.format(
ctx.prefix)
await ctx.channel.send(msg)
return
# Check if we're striking ourselves
if member.id == ctx.message.author.id:
# We're giving ourselves a strike?
await ctx.channel.send('You can\'t give yourself a strike, silly.')
return
# Check if the bot is getting the strike
if member.id == self.bot.user.id:
await ctx.channel.send('I can\'t do that, *{}*.'.format(DisplayName.name(ctx.message.author)))
return
# Check if we're striking another admin/bot-admin
isAdmin = member.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in member.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
if isAdmin:
await ctx.channel.send('You can\'t give other admins/bot-admins strikes, bub.')
return
# Check if days is an int - otherwise assume it's part of the message
try:
days = int(days)
except Exception:
if not days == None:
if message == None:
message = days
else:
message = days + ' ' + message
days = 0
# If it's not at least a day, it's forever
if days < 1:
days = -1
currentTime = int(time.time())
# Build our Strike
strike = {}
if days == -1:
strike['Time'] = -1
else:
strike['Time'] = currentTime+(86400*days)
self.loop_list.append(self.bot.loop.create_task(
self.checkStrike(member, strike)))
strike['Message'] = message
strike['GivenBy'] = ctx.message.author.id
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
strikeLevel = int(self.settings.getUserStat(
member, ctx.message.guild, "StrikeLevel"))
strikes.append(strike)
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
strikeNum = len(strikes)
# Set up consequences
if strikeLevel == 0:
consequence = '**muted for a day**.'
elif strikeLevel == 1:
consequence = '**kicked**.'
else:
consequence = '**banned**.'
# Check if we've struck out
if strikeNum < strikeout:
# We haven't struck out yet
msg = '*{}* has just received *strike {}*. *{}* more and they will be {}'.format(
DisplayName.name(member), strikeNum, strikeout-strikeNum, consequence)
else:
# We struck out - let's evaluate
if strikeLevel == 0:
cooldownFinal = currentTime+86400
checkRead = ReadableTime.getReadableTimeBetween(
currentTime, cooldownFinal)
if message:
mutemessage = 'You have been muted in *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
mutemessage = 'You have been muted in *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
# Check if already muted
alreadyMuted = self.settings.getUserStat(
member, ctx.message.guild, "Muted")
if alreadyMuted:
# Find out for how long
muteTime = self.settings.getUserStat(
member, ctx.message.guild, "Cooldown")
if not muteTime == None:
if muteTime < cooldownFinal:
self.settings.setUserStat(
member, ctx.message.guild, "Cooldown", cooldownFinal)
timeRemains = ReadableTime.getReadableTimeBetween(
currentTime, cooldownFinal)
if message:
mutemessage = 'Your muted time in *{}* has been extended to *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), timeRemains, message)
else:
mutemessage = 'You muted time in *{}* has been extended to *{}*.'.format(
Nullify.escape_all(ctx.guild.name), timeRemains)
else:
self.settings.setUserStat(
member, ctx.message.guild, "Muted", True)
self.settings.setUserStat(
member, ctx.message.guild, "Cooldown", cooldownFinal)
await self.mute._mute(member, ctx.message.guild, cooldownFinal)
await member.send(mutemessage)
elif strikeLevel == 1:
kickList = self.settings.getServerStat(
ctx.message.guild, "KickList")
if not str(member.id) in kickList:
kickList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
if message:
kickmessage = 'You have been kicked from *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
kickmessage = 'You have been kicked from *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
await member.send(kickmessage)
await ctx.guild.kick(member)
else:
banList = self.settings.getServerStat(
ctx.message.guild, "BanList")
if not str(member.id) in banList:
banList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "BanList", banList)
if message:
banmessage = 'You have been banned from *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
banmessage = 'You have been banned from *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
await member.send(banmessage)
await ctx.guild.ban(member)
self.settings.incrementStat(
member, ctx.message.guild, "StrikeLevel", 1)
self.settings.setUserStat(member, ctx.message.guild, "Strikes", [])
msg = '*{}* has just received *strike {}*. They have been {}'.format(
DisplayName.name(member), strikeNum, consequence)
await ctx.channel.send(msg)
@strike.error
async def strike_error(self, ctx, error):
# do stuff
msg = 'strike Error: {}'.format(error)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def strikes(self, ctx, *, member=None):
"""Check a your own, or another user's total strikes (bot-admin needed to check other users)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
# Only allow admins to check others' strikes
if not isAdmin:
if member:
if not member.id == ctx.message.author.id:
await ctx.channel.send('You are not a bot-admin. You can only see your own strikes.')
member = ctx.message.author
# Create blank embed
stat_embed = discord.Embed(color=member.color)
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
strikeLevel = int(self.settings.getUserStat(
member, ctx.message.guild, "StrikeLevel"))
# Add strikes, and strike level
stat_embed.add_field(name="Strikes", value=len(strikes), inline=True)
stat_embed.add_field(name="Strike Level",
value=strikeLevel, inline=True)
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(
member.name, member.nick)
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(
member.name, member.nick), icon_url=avURL)
else:
msg = "__***{}:***__\n\n".format(member.name)
# Add to embed
stat_embed.set_author(name='{}'.format(
member.name), icon_url=avURL)
# Get messages - and cooldowns
currentTime = int(time.time())
if not len(strikes):
# no strikes
messages = "None."
cooldowns = "None."
givenBy = "None."
else:
messages = ''
cooldowns = ''
givenBy = ''
for i in range(0, len(strikes)):
if strikes[i]['Message']:
messages += '{}. {}\n'.format(i+1, strikes[i]['Message'])
else:
messages += '{}. No message\n'.format(i+1)
timeLeft = strikes[i]['Time']
if timeLeft == -1:
cooldowns += '{}. Never rolls off\n'.format(i+1)
else:
timeRemains = ReadableTime.getReadableTimeBetween(
currentTime, timeLeft)
cooldowns += '{}. {}\n'.format(i+1, timeRemains)
given = strikes[i]['GivenBy']
givenBy += '{}. {}\n'.format(i+1, DisplayName.name(
DisplayName.memberForID(given, ctx.message.guild)))
# Add messages and cooldowns
stat_embed.add_field(name="Messages", value=messages, inline=True)
stat_embed.add_field(name="Time Left", value=cooldowns, inline=True)
stat_embed.add_field(name="Given By", value=givenBy, inline=True)
# Strikes remaining
stat_embed.add_field(name="Strikes Remaining",
value=strikeout-len(strikes), inline=True)
await ctx.channel.send(embed=stat_embed)
@commands.command(pass_context=True)
async def removestrike(self, ctx, *, member=None):
"""Removes a strike given to a member (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removestrike [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
# We have what we need - get the list
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
# Return if no strikes to take
if not len(strikes):
await ctx.channel.send('*{}* has no strikes to remove.'.format(DisplayName.name(member)))
return
# We have some - naughty naughty!
strikes = sorted(strikes, key=lambda x: int(x['Time']))
for strike in strikes:
# Check if we've got one that's not -1
if not strike['Time'] == -1:
# First item that isn't forever - kill it
strikes.remove(strike)
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
await ctx.channel.send('*{}* has one less strike. They are down to *{}*.'.format(DisplayName.name(member), len(strikes)))
return
# If we're here - we just remove one
del strikes[0]
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
await ctx.channel.send('*{}* has one less strike. They are down to *{}*.'.format(DisplayName.name(member), len(strikes)))
return
@commands.command(pass_context=True)
async def setstrikelevel(self, ctx, *, member=None, strikelevel: int = None):
"""Sets the strike level of the passed user (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
usage = 'Usage: `{}setstrikelevel [member] [strikelevel]`'.format(
ctx.prefix)
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if strikelevel == None:
# Either strike level wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(
Nullify.escape_all(member))
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
strikelevel = nameCheck["Int"]
if strikelevel == None:
# Still no strike level
await ctx.channel.send(usage)
return
self.settings.setUserStat(
member, ctx.message.guild, "StrikeLevel", strikelevel)
msg = '*{}\'s* strike level has been set to *{}!*'.format(
DisplayName.name(member), strikelevel)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def addkick(self, ctx, *, member=None):
"""Adds the passed user to the kick list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}addkick [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if not str(member.id) in kickList:
kickList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
msg = '*{}* was added to the kick list.'.format(
DisplayName.name(member))
else:
msg = '*{}* is already in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def removekick(self, ctx, *, member=None):
"""Removes the passed user from the kick list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removekick [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if str(member.id) in kickList:
kickList.remove(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
msg = '*{}* was removed from the kick list.'.format(
DisplayName.name(member))
else:
msg = '*{}* was not found in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def addban(self, ctx, *, member=None):
"""Adds the passed user to the ban list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}addban [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if not str(member.id) in banList:
banList.append(str(member.id))
self.settings.setServerStat(ctx.message.guild, "BanList", banList)
msg = '*{}* was added to the ban list.'.format(
DisplayName.name(member))
else:
msg = '*{}* is already in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def removeban(self, ctx, *, member=None):
"""Removes the passed user from the ban list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removeban [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if str(member.id) in banList:
banList.remove(str(member.id))
self.settings.setServerStat(ctx.message.guild, "BanList", banList)
msg = '*{}* was removed from the ban list.'.format(
DisplayName.name(member))
else:
msg = '*{}* was not found in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def iskicked(self, ctx, *, member=None):
"""Lists whether the user is in the kick list."""
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if str(member.id) in kickList:
msg = '*{}* is in the kick list.'.format(DisplayName.name(member))
else:
msg = '*{}* is **not** in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def isbanned(self, ctx, *, member=None):
"""Lists whether the user is in the ban list."""
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if str(member.id) in banList:
msg = '*{}* is in the ban list.'.format(DisplayName.name(member))
else:
msg = '*{}* is **not** in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def strikelimit(self, ctx):
"""Lists the number of strikes before advancing to the next consequence."""
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
msg = '*{}* strikes are required to strike out.'.format(strikeout)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setstrikelimit(self, ctx, limit=None):
"""Sets the number of strikes before advancing to the next consequence (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if not limit:
await ctx.channel.send('Strike limit must be *at least* one.')
return
try:
limit = int(limit)
except Exception:
await ctx.channel.send('Strike limit must be an integer.')
return
self.settings.setServerStat(ctx.message.guild, "StrikeOut", limit)
msg = '*{}* strikes are now required to strike out.'.format(limit)
await ctx.channel.send(msg)
@setstrikelimit.error
async def setstrikelimit_error(self, ctx, error):
# do stuff
msg = 'setstrikelimit Error: {}'.format(ctx)
await error.channel.send(msg)
| 42.46397 | 139 | 0.541397 | import asyncio
import discord
import time
import parsedatetime
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import ReadableTime
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
settings = bot.get_cog("Settings")
mute = bot.get_cog("Mute")
bot.add_cog(Strike(bot, settings, mute))
# StrikeLevel (a list similar to xproles)
# Standard strike roles:
# 0 = Not been punished already
# 1 = Muted for x amount of time
# 2 = Already been kicked (id in kick list)
# 3 = Already been banned (auto-mute)
class Strike(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings, mute):
self.bot = bot
self.settings = settings
self.mute = mute
self.loop_list = []
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def onjoin(self, member, server):
# Check id against the kick and ban list and react accordingly
kickList = self.settings.getServerStat(server, "KickList")
if str(member.id) in kickList:
# The user has been kicked before - set their strikeLevel to 2
self.settings.setUserStat(member, server, "StrikeLevel", 2)
banList = self.settings.getServerStat(server, "BanList")
if str(member.id) in banList:
# The user has been kicked before - set their strikeLevel to 3
# Also mute them
self.settings.setUserStat(member, server, "StrikeLevel", 3)
self.settings.setUserStat(member, server, "Muted", True)
self.settings.setUserStat(member, server, "Cooldown", None)
await self.mute._mute(member, server)
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
for task in self.loop_list:
task.cancel()
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
self.bot.loop.create_task(self.start_loading())
async def start_loading(self):
await self.bot.wait_until_ready()
await self.bot.loop.run_in_executor(None, self.check_strikes)
def check_strikes(self):
# Check all strikes - and start timers
print("Checking strikes...")
t = time.time()
for server in self.bot.guilds:
for member in server.members:
strikes = self.settings.getUserStat(member, server, "Strikes")
if strikes == None:
continue
if len(strikes):
# We have a list
for strike in strikes:
# Make sure it's a strike that *can* roll off
if not strike['Time'] == -1:
self.loop_list.append(self.bot.loop.create_task(
self.checkStrike(member, strike)))
print("Strikes checked - took {} seconds.".format(time.time() - t))
async def checkStrike(self, member, strike):
countDown = int(strike['Time'])-int(time.time())
if countDown > 0:
await asyncio.sleep(countDown)
strikes = self.settings.getUserStat(member, member.guild, "Strikes")
# Verify strike is still valid
if not strike in strikes:
return
strikes.remove(strike)
self.settings.setUserStat(member, member.guild, "Strikes", strikes)
@commands.command(pass_context=True)
async def strike(self, ctx, member: discord.Member = None, days=None, *, message: str = None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}strike [member] [strike timeout (in days) - 0 = forever] [message (optional)]`'.format(
ctx.prefix)
await ctx.channel.send(msg)
return
# Check if we're striking ourselves
if member.id == ctx.message.author.id:
await ctx.channel.send('You can\'t give yourself a strike, silly.')
return
if member.id == self.bot.user.id:
await ctx.channel.send('I can\'t do that, *{}*.'.format(DisplayName.name(ctx.message.author)))
return
# Check if we're striking another admin/bot-admin
isAdmin = member.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in member.roles:
for aRole in checkAdmin:
if str(aRole['ID']) == str(role.id):
isAdmin = True
if isAdmin:
await ctx.channel.send('You can\'t give other admins/bot-admins strikes, bub.')
return
# Check if days is an int - otherwise assume it's part of the message
try:
days = int(days)
except Exception:
if not days == None:
if message == None:
message = days
else:
message = days + ' ' + message
days = 0
if days < 1:
days = -1
currentTime = int(time.time())
strike = {}
if days == -1:
strike['Time'] = -1
else:
strike['Time'] = currentTime+(86400*days)
self.loop_list.append(self.bot.loop.create_task(
self.checkStrike(member, strike)))
strike['Message'] = message
strike['GivenBy'] = ctx.message.author.id
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
strikeLevel = int(self.settings.getUserStat(
member, ctx.message.guild, "StrikeLevel"))
strikes.append(strike)
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
strikeNum = len(strikes)
if strikeLevel == 0:
consequence = '**muted for a day**.'
elif strikeLevel == 1:
consequence = '**kicked**.'
else:
consequence = '**banned**.'
if strikeNum < strikeout:
# We haven't struck out yet
msg = '*{}* has just received *strike {}*. *{}* more and they will be {}'.format(
DisplayName.name(member), strikeNum, strikeout-strikeNum, consequence)
else:
if strikeLevel == 0:
cooldownFinal = currentTime+86400
checkRead = ReadableTime.getReadableTimeBetween(
currentTime, cooldownFinal)
if message:
mutemessage = 'You have been muted in *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
mutemessage = 'You have been muted in *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
# Check if already muted
alreadyMuted = self.settings.getUserStat(
member, ctx.message.guild, "Muted")
if alreadyMuted:
# Find out for how long
muteTime = self.settings.getUserStat(
member, ctx.message.guild, "Cooldown")
if not muteTime == None:
if muteTime < cooldownFinal:
self.settings.setUserStat(
member, ctx.message.guild, "Cooldown", cooldownFinal)
timeRemains = ReadableTime.getReadableTimeBetween(
currentTime, cooldownFinal)
if message:
mutemessage = 'Your muted time in *{}* has been extended to *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), timeRemains, message)
else:
mutemessage = 'You muted time in *{}* has been extended to *{}*.'.format(
Nullify.escape_all(ctx.guild.name), timeRemains)
else:
self.settings.setUserStat(
member, ctx.message.guild, "Muted", True)
self.settings.setUserStat(
member, ctx.message.guild, "Cooldown", cooldownFinal)
await self.mute._mute(member, ctx.message.guild, cooldownFinal)
await member.send(mutemessage)
elif strikeLevel == 1:
kickList = self.settings.getServerStat(
ctx.message.guild, "KickList")
if not str(member.id) in kickList:
kickList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
if message:
kickmessage = 'You have been kicked from *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
kickmessage = 'You have been kicked from *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
await member.send(kickmessage)
await ctx.guild.kick(member)
else:
banList = self.settings.getServerStat(
ctx.message.guild, "BanList")
if not str(member.id) in banList:
banList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "BanList", banList)
if message:
banmessage = 'You have been banned from *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
banmessage = 'You have been banned from *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
await member.send(banmessage)
await ctx.guild.ban(member)
self.settings.incrementStat(
member, ctx.message.guild, "StrikeLevel", 1)
self.settings.setUserStat(member, ctx.message.guild, "Strikes", [])
msg = '*{}* has just received *strike {}*. They have been {}'.format(
DisplayName.name(member), strikeNum, consequence)
await ctx.channel.send(msg)
@strike.error
async def strike_error(self, ctx, error):
# do stuff
msg = 'strike Error: {}'.format(error)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def strikes(self, ctx, *, member=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
if not isAdmin:
if member:
if not member.id == ctx.message.author.id:
await ctx.channel.send('You are not a bot-admin. You can only see your own strikes.')
member = ctx.message.author
# Create blank embed
stat_embed = discord.Embed(color=member.color)
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
strikeLevel = int(self.settings.getUserStat(
member, ctx.message.guild, "StrikeLevel"))
# Add strikes, and strike level
stat_embed.add_field(name="Strikes", value=len(strikes), inline=True)
stat_embed.add_field(name="Strike Level",
value=strikeLevel, inline=True)
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(
member.name, member.nick)
stat_embed.set_author(name='{}, who currently goes by {}'.format(
member.name, member.nick), icon_url=avURL)
else:
msg = "__***{}:***__\n\n".format(member.name)
stat_embed.set_author(name='{}'.format(
member.name), icon_url=avURL)
currentTime = int(time.time())
if not len(strikes):
messages = "None."
cooldowns = "None."
givenBy = "None."
else:
messages = ''
cooldowns = ''
givenBy = ''
for i in range(0, len(strikes)):
if strikes[i]['Message']:
messages += '{}. {}\n'.format(i+1, strikes[i]['Message'])
else:
messages += '{}. No message\n'.format(i+1)
timeLeft = strikes[i]['Time']
if timeLeft == -1:
cooldowns += '{}. Never rolls off\n'.format(i+1)
else:
timeRemains = ReadableTime.getReadableTimeBetween(
currentTime, timeLeft)
cooldowns += '{}. {}\n'.format(i+1, timeRemains)
given = strikes[i]['GivenBy']
givenBy += '{}. {}\n'.format(i+1, DisplayName.name(
DisplayName.memberForID(given, ctx.message.guild)))
stat_embed.add_field(name="Messages", value=messages, inline=True)
stat_embed.add_field(name="Time Left", value=cooldowns, inline=True)
stat_embed.add_field(name="Given By", value=givenBy, inline=True)
stat_embed.add_field(name="Strikes Remaining",
value=strikeout-len(strikes), inline=True)
await ctx.channel.send(embed=stat_embed)
@commands.command(pass_context=True)
async def removestrike(self, ctx, *, member=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
if str(aRole['ID']) == str(role.id):
isAdmin = True
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removestrike [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
# We have what we need - get the list
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
# Return if no strikes to take
if not len(strikes):
await ctx.channel.send('*{}* has no strikes to remove.'.format(DisplayName.name(member)))
return
# We have some - naughty naughty!
strikes = sorted(strikes, key=lambda x: int(x['Time']))
for strike in strikes:
# Check if we've got one that's not -1
if not strike['Time'] == -1:
# First item that isn't forever - kill it
strikes.remove(strike)
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
await ctx.channel.send('*{}* has one less strike. They are down to *{}*.'.format(DisplayName.name(member), len(strikes)))
return
del strikes[0]
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
await ctx.channel.send('*{}* has one less strike. They are down to *{}*.'.format(DisplayName.name(member), len(strikes)))
return
@commands.command(pass_context=True)
async def setstrikelevel(self, ctx, *, member=None, strikelevel: int = None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
usage = 'Usage: `{}setstrikelevel [member] [strikelevel]`'.format(
ctx.prefix)
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if strikelevel == None:
# Either strike level wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(
Nullify.escape_all(member))
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
strikelevel = nameCheck["Int"]
if strikelevel == None:
# Still no strike level
await ctx.channel.send(usage)
return
self.settings.setUserStat(
member, ctx.message.guild, "StrikeLevel", strikelevel)
msg = '*{}\'s* strike level has been set to *{}!*'.format(
DisplayName.name(member), strikelevel)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def addkick(self, ctx, *, member=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
if str(aRole['ID']) == str(role.id):
isAdmin = True
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}addkick [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if not str(member.id) in kickList:
kickList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
msg = '*{}* was added to the kick list.'.format(
DisplayName.name(member))
else:
msg = '*{}* is already in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def removekick(self, ctx, *, member=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removekick [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if str(member.id) in kickList:
kickList.remove(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
msg = '*{}* was removed from the kick list.'.format(
DisplayName.name(member))
else:
msg = '*{}* was not found in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def addban(self, ctx, *, member=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
if str(aRole['ID']) == str(role.id):
isAdmin = True
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}addban [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if not str(member.id) in banList:
banList.append(str(member.id))
self.settings.setServerStat(ctx.message.guild, "BanList", banList)
msg = '*{}* was added to the ban list.'.format(
DisplayName.name(member))
else:
msg = '*{}* is already in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def removeban(self, ctx, *, member=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removeban [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if str(member.id) in banList:
banList.remove(str(member.id))
self.settings.setServerStat(ctx.message.guild, "BanList", banList)
msg = '*{}* was removed from the ban list.'.format(
DisplayName.name(member))
else:
msg = '*{}* was not found in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def iskicked(self, ctx, *, member=None):
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if str(member.id) in kickList:
msg = '*{}* is in the kick list.'.format(DisplayName.name(member))
else:
msg = '*{}* is **not** in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def isbanned(self, ctx, *, member=None):
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if str(member.id) in banList:
msg = '*{}* is in the ban list.'.format(DisplayName.name(member))
else:
msg = '*{}* is **not** in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def strikelimit(self, ctx):
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
msg = '*{}* strikes are required to strike out.'.format(strikeout)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setstrikelimit(self, ctx, limit=None):
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
if str(aRole['ID']) == str(role.id):
isAdmin = True
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if not limit:
await ctx.channel.send('Strike limit must be *at least* one.')
return
try:
limit = int(limit)
except Exception:
await ctx.channel.send('Strike limit must be an integer.')
return
self.settings.setServerStat(ctx.message.guild, "StrikeOut", limit)
msg = '*{}* strikes are now required to strike out.'.format(limit)
await ctx.channel.send(msg)
@setstrikelimit.error
async def setstrikelimit_error(self, ctx, error):
msg = 'setstrikelimit Error: {}'.format(ctx)
await error.channel.send(msg)
| true | true |
f70f8da0066e2601d4d7d9e6e313a05b5a080127 | 19,727 | py | Python | src/watchdog/observers/inotify_c.py | rec/watchdog | 0224e4424daea365a41125c6691c3477ee1bf86f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/watchdog/observers/inotify_c.py | rec/watchdog | 0224e4424daea365a41125c6691c3477ee1bf86f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/watchdog/observers/inotify_c.py | rec/watchdog | 0224e4424daea365a41125c6691c3477ee1bf86f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import errno
import struct
import threading
import ctypes
import ctypes.util
from functools import reduce
from ctypes import c_int, c_char_p, c_uint32
from watchdog.utils import has_attribute
from watchdog.utils import UnsupportedLibc
from watchdog.utils.unicode_paths import decode
def _load_libc():
libc_path = None
try:
libc_path = ctypes.util.find_library('c')
except (OSError, IOError, RuntimeError):
# Note: find_library will on some platforms raise these undocumented
# errors, e.g.on android IOError "No usable temporary directory found"
# will be raised.
pass
if libc_path is not None:
return ctypes.CDLL(libc_path)
# Fallbacks
try:
return ctypes.CDLL('libc.so')
except (OSError, IOError):
pass
try:
return ctypes.CDLL('libc.so.6')
except (OSError, IOError):
pass
# uClibc
try:
return ctypes.CDLL('libc.so.0')
except (OSError, IOError) as err:
raise err
libc = _load_libc()
if not has_attribute(libc, 'inotify_init') or \
not has_attribute(libc, 'inotify_add_watch') or \
not has_attribute(libc, 'inotify_rm_watch'):
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(
("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
("inotify_init", libc))
class InotifyConstants(object):
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
class inotify_event_struct(ctypes.Structure):
"""
Structure representation of the inotify_event structure
(used in buffer size calculations)::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
"""
_fields_ = [('wd', c_int),
('mask', c_uint32),
('cookie', c_uint32),
('len', c_uint32),
('name', c_char_p)]
EVENT_SIZE = ctypes.sizeof(inotify_event_struct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify(object):
"""
Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:type path:
:class:`bytes`
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS):
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
# Stores the watch descriptor for a given path.
self._wd_for_path = dict()
self._path_for_wd = dict()
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
if os.path.isdir(path):
self._add_dir_watch(path, recursive, event_mask)
else:
self._add_watch(path, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self):
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self):
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self):
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = dict()
def source_for_move(self, destination_event):
"""
The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
"""
Save this event as the source event for future MOVED_TO events to
reference.
"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
"""
Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self):
"""
Closes the inotify instance and removes all associated watches.
"""
with self._lock:
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
os.close(self._inotify_fd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
"""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
event_buffer = None
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
if self.is_recursive:
for _path, _wd in self._wd_for_path.copy().items():
if _path.startswith(move_src_path + os.path.sep.encode()):
moved_wd = self._wd_for_path.pop(_path)
_move_to_path = _path.replace(move_src_path, inotify_event.src_path)
self._wd_for_path[_move_to_path] = moved_wd
self._path_for_wd[moved_wd] = _move_to_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
continue
event_list.append(inotify_event)
if (self.is_recursive and inotify_event.is_directory
and inotify_event.is_create):
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
"""
Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
if not os.path.isdir(path):
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
else:
raise OSError(err, os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer):
"""
Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent(object):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param name:
Base name of the event source path.
:param src_path:
Full event source path.
"""
def __init__(self, wd, mask, cookie, name, src_path):
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self):
return self._src_path
@property
def wd(self):
return self._wd
@property
def mask(self):
return self._mask
@property
def cookie(self):
return self._cookie
@property
def name(self):
return self._name
@property
def is_modify(self):
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self):
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self):
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_access(self):
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self):
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self):
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self):
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self):
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self):
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self):
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self):
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self):
return self._mask & InotifyConstants.IN_ATTRIB > 0
@property
def is_ignored(self):
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self):
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
return (self.is_delete_self or self.is_move_self
or self._mask & InotifyConstants.IN_ISDIR > 0)
@property
def key(self):
return self._src_path, self._wd, self._mask, self._cookie, self._name
def __eq__(self, inotify_event):
return self.key == inotify_event.key
def __ne__(self, inotify_event):
return self.key == inotify_event.key
def __hash__(self):
return hash(self.key)
@staticmethod
def _get_mask_string(mask):
masks = []
for c in dir(InotifyConstants):
if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
mask_string = '|'.join(masks)
return mask_string
def __repr__(self):
mask_string = self._get_mask_string(self.mask)
s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>'
return s % (type(self).__name__, self.src_path, self.wd, mask_string,
self.cookie, decode(self.name))
| 33.54932 | 114 | 0.596441 |
from __future__ import with_statement
import os
import errno
import struct
import threading
import ctypes
import ctypes.util
from functools import reduce
from ctypes import c_int, c_char_p, c_uint32
from watchdog.utils import has_attribute
from watchdog.utils import UnsupportedLibc
from watchdog.utils.unicode_paths import decode
def _load_libc():
libc_path = None
try:
libc_path = ctypes.util.find_library('c')
except (OSError, IOError, RuntimeError):
pass
if libc_path is not None:
return ctypes.CDLL(libc_path)
try:
return ctypes.CDLL('libc.so')
except (OSError, IOError):
pass
try:
return ctypes.CDLL('libc.so.6')
except (OSError, IOError):
pass
try:
return ctypes.CDLL('libc.so.0')
except (OSError, IOError) as err:
raise err
libc = _load_libc()
if not has_attribute(libc, 'inotify_init') or \
not has_attribute(libc, 'inotify_add_watch') or \
not has_attribute(libc, 'inotify_rm_watch'):
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(
("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
("inotify_init", libc))
class InotifyConstants(object):
IN_ACCESS = 0x00000001
IN_MODIFY = 0x00000002
IN_ATTRIB = 0x00000004
IN_CLOSE_WRITE = 0x00000008
IN_CLOSE_NOWRITE = 0x00000010
IN_OPEN = 0x00000020
IN_MOVED_FROM = 0x00000040
IN_MOVED_TO = 0x00000080
IN_CREATE = 0x00000100
IN_DELETE = 0x00000200
IN_DELETE_SELF = 0x00000400
IN_MOVE_SELF = 0x00000800
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO
IN_UNMOUNT = 0x00002000
IN_Q_OVERFLOW = 0x00004000
IN_IGNORED = 0x00008000
IN_ONLYDIR = 0x01000000
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
class inotify_event_struct(ctypes.Structure):
_fields_ = [('wd', c_int),
('mask', c_uint32),
('cookie', c_uint32),
('len', c_uint32),
('name', c_char_p)]
EVENT_SIZE = ctypes.sizeof(inotify_event_struct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify(object):
def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS):
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
self._wd_for_path = dict()
self._path_for_wd = dict()
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
if os.path.isdir(path):
self._add_dir_watch(path, recursive, event_mask)
else:
self._add_watch(path, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
return self._event_mask
@property
def path(self):
return self._path
@property
def is_recursive(self):
return self._is_recursive
@property
def fd(self):
return self._inotify_fd
def clear_move_records(self):
self._moved_from_events = dict()
def source_for_move(self, destination_event):
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self):
with self._lock:
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
os.close(self._inotify_fd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
event_buffer = None
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
if self.is_recursive:
for _path, _wd in self._wd_for_path.copy().items():
if _path.startswith(move_src_path + os.path.sep.encode()):
moved_wd = self._wd_for_path.pop(_path)
_move_to_path = _path.replace(move_src_path, inotify_event.src_path)
self._wd_for_path[_move_to_path] = moved_wd
self._path_for_wd[moved_wd] = _move_to_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
continue
event_list.append(inotify_event)
if (self.is_recursive and inotify_event.is_directory
and inotify_event.is_create):
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
if not os.path.isdir(path):
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error():
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
else:
raise OSError(err, os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer):
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent(object):
def __init__(self, wd, mask, cookie, name, src_path):
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self):
return self._src_path
@property
def wd(self):
return self._wd
@property
def mask(self):
return self._mask
@property
def cookie(self):
return self._cookie
@property
def name(self):
return self._name
@property
def is_modify(self):
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self):
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self):
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_access(self):
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self):
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self):
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self):
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self):
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self):
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self):
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self):
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self):
return self._mask & InotifyConstants.IN_ATTRIB > 0
@property
def is_ignored(self):
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self):
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
return (self.is_delete_self or self.is_move_self
or self._mask & InotifyConstants.IN_ISDIR > 0)
@property
def key(self):
return self._src_path, self._wd, self._mask, self._cookie, self._name
def __eq__(self, inotify_event):
return self.key == inotify_event.key
def __ne__(self, inotify_event):
return self.key == inotify_event.key
def __hash__(self):
return hash(self.key)
@staticmethod
def _get_mask_string(mask):
masks = []
for c in dir(InotifyConstants):
if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
mask_string = '|'.join(masks)
return mask_string
def __repr__(self):
mask_string = self._get_mask_string(self.mask)
s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>'
return s % (type(self).__name__, self.src_path, self.wd, mask_string,
self.cookie, decode(self.name))
| true | true |
f70f8e73132e3e8bee5eadba32f1938ebaba2aa7 | 319 | py | Python | example/routes.py | fitahol/aiohttprest | b9f1a386b22ad03e53f2f0e74ed3b29da5bcc220 | [
"Apache-2.0"
] | 1 | 2017-03-14T23:39:55.000Z | 2017-03-14T23:39:55.000Z | example/routes.py | fitahol/aiohttprest | b9f1a386b22ad03e53f2f0e74ed3b29da5bcc220 | [
"Apache-2.0"
] | null | null | null | example/routes.py | fitahol/aiohttprest | b9f1a386b22ad03e53f2f0e74ed3b29da5bcc220 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
__created__ = '06/01/2017'
__author__ = 'deling.ma'
"""
from aio_rest.routes import RouteCollector, Route
from example.views import publish, IndexView
routes = RouteCollector(prefix='/app', routes=[
Route('/', IndexView),
Route('/publish', publish, method='GET'),
])
| 19.9375 | 49 | 0.689655 |
from aio_rest.routes import RouteCollector, Route
from example.views import publish, IndexView
routes = RouteCollector(prefix='/app', routes=[
Route('/', IndexView),
Route('/publish', publish, method='GET'),
])
| true | true |
f70f8ec2299360b366ee181be4fd170341ad6326 | 869 | py | Python | Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | import tensorflow as tf
#placeholder variable(scalar)
X=tf.placeholder(tf.float32,shape=[None])
Y=tf.placeholder(tf.float32,shape=[None])
W=tf.Variable(tf.random_normal([1]),name='weight')
b=tf.Variable(tf.random_normal([1]),name='bias')
hypothesis=X*W+b
#average
cost=tf.reduce_mean(tf.square(hypothesis-Y))
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)
#minimize cost
train=optimizer.minimize(cost)
sess=tf.Session()
#initialize var
sess.run(tf.global_variables_initializer())
#learning
for step in range(2001):
cost_val,W_val,b_val,_=sess.run([cost,W,b,train],
feed_dict={X:[1,2,3,4,5],Y:[2.1,3.1,4.1,5.1,6.1]})
if step%20==0:
print(step,cost_val,W_val,b_val)
#evlauation
print(sess.run(hypothesis,feed_dict={X:[5]}))
print(sess.run(hypothesis,feed_dict={X:[2.5]}))
print(sess.run(hypothesis,feed_dict={X:[1.5,3.5]})) | 27.15625 | 63 | 0.727273 | import tensorflow as tf
X=tf.placeholder(tf.float32,shape=[None])
Y=tf.placeholder(tf.float32,shape=[None])
W=tf.Variable(tf.random_normal([1]),name='weight')
b=tf.Variable(tf.random_normal([1]),name='bias')
hypothesis=X*W+b
cost=tf.reduce_mean(tf.square(hypothesis-Y))
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)
train=optimizer.minimize(cost)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val,W_val,b_val,_=sess.run([cost,W,b,train],
feed_dict={X:[1,2,3,4,5],Y:[2.1,3.1,4.1,5.1,6.1]})
if step%20==0:
print(step,cost_val,W_val,b_val)
print(sess.run(hypothesis,feed_dict={X:[5]}))
print(sess.run(hypothesis,feed_dict={X:[2.5]}))
print(sess.run(hypothesis,feed_dict={X:[1.5,3.5]})) | true | true |
f70f8f560c5f80a9cb4578a691c38108667a8423 | 1,211 | py | Python | deel/model/lstm.py | ghelia/deel | 6ff67d7246daf12d1884357010dd82842fbc31d1 | [
"MIT"
] | null | null | null | deel/model/lstm.py | ghelia/deel | 6ff67d7246daf12d1884357010dd82842fbc31d1 | [
"MIT"
] | null | null | null | deel/model/lstm.py | ghelia/deel | 6ff67d7246daf12d1884357010dd82842fbc31d1 | [
"MIT"
] | null | null | null | import chainer
import chainer.functions as F
import chainer.links as L
"""
Based on chainer official example
https://github.com/pfnet/chainer/tree/master/examples/ptb
Modified by shi3z March 28,2016
"""
class RNNLM(chainer.Chain):
"""Recurrent neural net languabe model for penn tree bank corpus.
This is an example of deep LSTM network for infinite length input.
"""
def __init__(self, n_input_units=1000,n_vocab=100, n_units=100, train=True):
super(RNNLM, self).__init__(
inputVector= L.Linear(n_input_units, n_units),
embed=L.EmbedID(n_vocab, n_units),
l1=L.LSTM(n_units, n_units),
l2=L.LSTM(n_units, n_units),
l3=L.Linear(n_units, n_vocab),
)
self.train = train
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
def __call__(self, x,mode=0):
if mode == 1:
h0 = self.inputVector(x)
else:
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0, train=self.train))
h2 = self.l2(F.dropout(h1, train=self.train))
y = self.l3(F.dropout(h2, train=self.train))
return y
| 28.162791 | 80 | 0.618497 | import chainer
import chainer.functions as F
import chainer.links as L
class RNNLM(chainer.Chain):
def __init__(self, n_input_units=1000,n_vocab=100, n_units=100, train=True):
super(RNNLM, self).__init__(
inputVector= L.Linear(n_input_units, n_units),
embed=L.EmbedID(n_vocab, n_units),
l1=L.LSTM(n_units, n_units),
l2=L.LSTM(n_units, n_units),
l3=L.Linear(n_units, n_vocab),
)
self.train = train
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
def __call__(self, x,mode=0):
if mode == 1:
h0 = self.inputVector(x)
else:
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0, train=self.train))
h2 = self.l2(F.dropout(h1, train=self.train))
y = self.l3(F.dropout(h2, train=self.train))
return y
| true | true |
f70f910ce1791081915a783482feb7b0db02c894 | 5,716 | py | Python | src/films/tests/test_hdrfilm.py | tizian/layer-laboratory | 008cc94b76127e9eb74227fcd3d0145da8ddec30 | [
"CNRI-Python"
] | 7 | 2020-07-24T03:19:59.000Z | 2022-03-30T10:56:12.000Z | src/films/tests/test_hdrfilm.py | tizian/layer-laboratory | 008cc94b76127e9eb74227fcd3d0145da8ddec30 | [
"CNRI-Python"
] | 1 | 2021-04-07T22:30:23.000Z | 2021-04-08T00:55:36.000Z | src/films/tests/test_hdrfilm.py | tizian/layer-laboratory | 008cc94b76127e9eb74227fcd3d0145da8ddec30 | [
"CNRI-Python"
] | 2 | 2020-06-08T08:25:09.000Z | 2021-04-05T22:13:08.000Z | import mitsuba
import pytest
import os
import enoki as ek
def test01_construct(variant_scalar_rgb):
from mitsuba.core.xml import load_string
# With default reconstruction filter
film = load_string("""<film version="2.0.0" type="hdrfilm"></film>""")
assert film is not None
assert film.reconstruction_filter() is not None
# With a provided reconstruction filter
film = load_string("""<film version="2.0.0" type="hdrfilm">
<rfilter type="gaussian">
<float name="stddev" value="18.5"/>
</rfilter>
</film>""")
assert film is not None
assert film.reconstruction_filter().radius() == (4 * 18.5)
# Certain parameter values are not allowed
with pytest.raises(RuntimeError):
load_string("""<film version="2.0.0" type="hdrfilm">
<string name="component_format" value="uint8"/>
</film>""")
with pytest.raises(RuntimeError):
load_string("""<film version="2.0.0" type="hdrfilm">
<string name="pixel_format" value="brga"/>
</film>""")
def test02_crops(variant_scalar_rgb):
from mitsuba.core.xml import load_string
film = load_string("""<film version="2.0.0" type="hdrfilm">
<integer name="width" value="32"/>
<integer name="height" value="21"/>
<integer name="crop_width" value="11"/>
<integer name="crop_height" value="5"/>
<integer name="crop_offset_x" value="2"/>
<integer name="crop_offset_y" value="3"/>
<boolean name="high_quality_edges" value="true"/>
<string name="pixel_format" value="rgba"/>
</film>""")
assert film is not None
assert ek.all(film.size() == [32, 21])
assert ek.all(film.crop_size() == [11, 5])
assert ek.all(film.crop_offset() == [2, 3])
assert film.has_high_quality_edges()
# Crop size doesn't adjust its size, so an error should be raised if the
# resulting crop window goes out of bounds.
incomplete = """<film version="2.0.0" type="hdrfilm">
<integer name="width" value="32"/>
<integer name="height" value="21"/>
<integer name="crop_offset_x" value="30"/>
<integer name="crop_offset_y" value="20"/>"""
with pytest.raises(RuntimeError):
film = load_string(incomplete + "</film>")
film = load_string(incomplete + """
<integer name="crop_width" value="2"/>
<integer name="crop_height" value="1"/>
</film>""")
assert film is not None
assert ek.all(film.size() == [32, 21])
assert ek.all(film.crop_size() == [2, 1])
assert ek.all(film.crop_offset() == [30, 20])
@pytest.mark.parametrize('file_format', ['exr', 'rgbe', 'pfm'])
def test03_develop(variant_scalar_rgb, file_format, tmpdir):
from mitsuba.core.xml import load_string
from mitsuba.core import Bitmap, Struct, ReconstructionFilter, float_dtype
from mitsuba.render import ImageBlock
import numpy as np
"""Create a test image. Develop it to a few file format, each time reading
it back and checking that contents are unchanged."""
np.random.seed(12345 + ord(file_format[0]))
# Note: depending on the file format, the alpha channel may be automatically removed.
film = load_string("""<film version="2.0.0" type="hdrfilm">
<integer name="width" value="41"/>
<integer name="height" value="37"/>
<string name="file_format" value="{}"/>
<string name="pixel_format" value="rgba"/>
<string name="component_format" value="float32"/>
<rfilter type="box"/>
</film>""".format(file_format))
# Regardless of the output file format, values are stored as XYZAW (5 channels).
contents = np.random.uniform(size=(film.size()[1], film.size()[0], 5))
# RGBE and will only reconstruct well images that have similar scales on
# all channel (because exponent is shared between channels).
if file_format is "rgbe":
contents = 1 + 0.1 * contents
# Use unit weights.
contents[:, :, 4] = 1.0
block = ImageBlock(film.size(), 5, film.reconstruction_filter())
block.clear()
for x in range(film.size()[1]):
for y in range(film.size()[0]):
block.put([y+0.5, x+0.5], contents[x, y, :])
film.prepare(['X', 'Y', 'Z', 'A', 'W'])
film.put(block)
with pytest.raises(RuntimeError):
# Should raise when the destination file hasn't been specified.
film.develop()
filename = str(tmpdir.join('test_image.' + file_format))
film.set_destination_file(filename)
film.develop()
# Read back and check contents
other = Bitmap(filename).convert(Bitmap.PixelFormat.XYZAW, Struct.Type.Float32, srgb_gamma=False)
img = np.array(other, copy=False)
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(contents[:, :, :3])
plt.subplot(1, 3, 2)
plt.imshow(img[:, :, :3])
plt.subplot(1, 3, 3)
plt.imshow(ek.sum(ek.abs(img[:, :, :3] - contents[:, :, :3]), axis=2), cmap='coolwarm')
plt.colorbar()
plt.show()
if file_format == "exr":
assert ek.allclose(img, contents, atol=1e-5)
else:
if file_format == "rgbe":
assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-2), \
'\n{}\nvs\n{}\n'.format(img[:4, :4, :3], contents[:4, :4, :3])
else:
assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-5)
# Alpha channel was ignored, alpha and weights should default to 1.0.
assert ek.allclose(img[:, :, 3:5], 1.0, atol=1e-6)
| 39.42069 | 101 | 0.603569 | import mitsuba
import pytest
import os
import enoki as ek
def test01_construct(variant_scalar_rgb):
from mitsuba.core.xml import load_string
film = load_string("""<film version="2.0.0" type="hdrfilm"></film>""")
assert film is not None
assert film.reconstruction_filter() is not None
film = load_string("""<film version="2.0.0" type="hdrfilm">
<rfilter type="gaussian">
<float name="stddev" value="18.5"/>
</rfilter>
</film>""")
assert film is not None
assert film.reconstruction_filter().radius() == (4 * 18.5)
with pytest.raises(RuntimeError):
load_string("""<film version="2.0.0" type="hdrfilm">
<string name="component_format" value="uint8"/>
</film>""")
with pytest.raises(RuntimeError):
load_string("""<film version="2.0.0" type="hdrfilm">
<string name="pixel_format" value="brga"/>
</film>""")
def test02_crops(variant_scalar_rgb):
from mitsuba.core.xml import load_string
film = load_string("""<film version="2.0.0" type="hdrfilm">
<integer name="width" value="32"/>
<integer name="height" value="21"/>
<integer name="crop_width" value="11"/>
<integer name="crop_height" value="5"/>
<integer name="crop_offset_x" value="2"/>
<integer name="crop_offset_y" value="3"/>
<boolean name="high_quality_edges" value="true"/>
<string name="pixel_format" value="rgba"/>
</film>""")
assert film is not None
assert ek.all(film.size() == [32, 21])
assert ek.all(film.crop_size() == [11, 5])
assert ek.all(film.crop_offset() == [2, 3])
assert film.has_high_quality_edges()
# resulting crop window goes out of bounds.
incomplete = """<film version="2.0.0" type="hdrfilm">
<integer name="width" value="32"/>
<integer name="height" value="21"/>
<integer name="crop_offset_x" value="30"/>
<integer name="crop_offset_y" value="20"/>"""
with pytest.raises(RuntimeError):
film = load_string(incomplete + "</film>")
film = load_string(incomplete + """
<integer name="crop_width" value="2"/>
<integer name="crop_height" value="1"/>
</film>""")
assert film is not None
assert ek.all(film.size() == [32, 21])
assert ek.all(film.crop_size() == [2, 1])
assert ek.all(film.crop_offset() == [30, 20])
@pytest.mark.parametrize('file_format', ['exr', 'rgbe', 'pfm'])
def test03_develop(variant_scalar_rgb, file_format, tmpdir):
from mitsuba.core.xml import load_string
from mitsuba.core import Bitmap, Struct, ReconstructionFilter, float_dtype
from mitsuba.render import ImageBlock
import numpy as np
np.random.seed(12345 + ord(file_format[0]))
# Note: depending on the file format, the alpha channel may be automatically removed.
film = load_string("""<film version="2.0.0" type="hdrfilm">
<integer name="width" value="41"/>
<integer name="height" value="37"/>
<string name="file_format" value="{}"/>
<string name="pixel_format" value="rgba"/>
<string name="component_format" value="float32"/>
<rfilter type="box"/>
</film>""".format(file_format))
# Regardless of the output file format, values are stored as XYZAW (5 channels).
contents = np.random.uniform(size=(film.size()[1], film.size()[0], 5))
# RGBE and will only reconstruct well images that have similar scales on
# all channel (because exponent is shared between channels).
if file_format is "rgbe":
contents = 1 + 0.1 * contents
# Use unit weights.
contents[:, :, 4] = 1.0
block = ImageBlock(film.size(), 5, film.reconstruction_filter())
block.clear()
for x in range(film.size()[1]):
for y in range(film.size()[0]):
block.put([y+0.5, x+0.5], contents[x, y, :])
film.prepare(['X', 'Y', 'Z', 'A', 'W'])
film.put(block)
with pytest.raises(RuntimeError):
# Should raise when the destination file hasn't been specified.
film.develop()
filename = str(tmpdir.join('test_image.' + file_format))
film.set_destination_file(filename)
film.develop()
other = Bitmap(filename).convert(Bitmap.PixelFormat.XYZAW, Struct.Type.Float32, srgb_gamma=False)
img = np.array(other, copy=False)
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(contents[:, :, :3])
plt.subplot(1, 3, 2)
plt.imshow(img[:, :, :3])
plt.subplot(1, 3, 3)
plt.imshow(ek.sum(ek.abs(img[:, :, :3] - contents[:, :, :3]), axis=2), cmap='coolwarm')
plt.colorbar()
plt.show()
if file_format == "exr":
assert ek.allclose(img, contents, atol=1e-5)
else:
if file_format == "rgbe":
assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-2), \
'\n{}\nvs\n{}\n'.format(img[:4, :4, :3], contents[:4, :4, :3])
else:
assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-5)
assert ek.allclose(img[:, :, 3:5], 1.0, atol=1e-6)
| true | true |
f70f925d50a5f908572efb96b2f9609c818088a6 | 2,959 | py | Python | tests/test_inline_functions/test_query.py | pyansys/pyansys | adf51893be746c632f40a9dc8c9247dbee138dda | [
"MIT"
] | 1 | 2021-08-19T14:08:34.000Z | 2021-08-19T14:08:34.000Z | tests/test_inline_functions/test_query.py | pyansys/pyansys | adf51893be746c632f40a9dc8c9247dbee138dda | [
"MIT"
] | null | null | null | tests/test_inline_functions/test_query.py | pyansys/pyansys | adf51893be746c632f40a9dc8c9247dbee138dda | [
"MIT"
] | null | null | null | import pytest
class TestParseParameter:
@pytest.mark.parametrize(
"values",
[
("PARAMETER test = 4", 4.0),
("PARAMETER=4", 4.0),
("PARAMETER WARNING = 4", 4.0),
("PARAMETER = _=4", 4.0),
("WARNING = PARAMETER = 4", 4.0),
("PARAMETER = .4", 0.4),
],
)
def test_parse_float(self, values, query):
input_, output = values
assert query._parse_parameter_float_response(input_) == output
@pytest.mark.parametrize(
"values",
[
("PARAMETER test = 4", 4),
("PARAMETER=4", 4),
("PARAMETER WARNING = 4", 4),
("PARAMETER = _=4", 4),
("WARNING = PARAMETER = 4", 4),
("PARAMETER = .4", 0),
],
)
def test_parse_int(self, values, query):
input_, output = values
assert query._parse_parameter_integer_response(input_) == output
def test_parse_float_type_warning(self, query):
input_ = "WARNING PARAMETER = 4"
with pytest.warns(UserWarning):
query._parse_parameter_float_response(input_)
def test_parse_int_type_warning(self, query):
input_ = "WARNING PARAMETER = 4"
with pytest.warns(UserWarning):
query._parse_parameter_integer_response(input_)
@pytest.mark.parametrize(
"value", ["parameter test = 4", "PARAMETER 4", "WARNING = 4", ""]
)
def test_parse_float_type_error(self, value, query):
input_ = value
with pytest.raises(TypeError):
query._parse_parameter_float_response(input_)
@pytest.mark.parametrize(
"value", ["parameter test = 4", "PARAMETER 4", "WARNING = 4", ""]
)
def test_parse_int_type_error(self, value, query):
input_ = value
with pytest.raises(TypeError):
query._parse_parameter_integer_response(input_)
class TestRunQuery:
@pytest.mark.parametrize('command', [('KX(1)', float), ('KP(1,1,1)', int)])
def test_run_query_returned_type(self, line_geometry, command):
q, kps, l0 = line_geometry
cmd, type_ = command
integer = False if type_ == float else True
v = q._run_query(cmd, integer=integer)
assert isinstance(v, type_)
def test_interactive_mode_error(self, mapdl, line_geometry):
q, kps, l0 = line_geometry
with mapdl.non_interactive:
with pytest.raises(RuntimeError):
v = q.kx(1)
@pytest.mark.skip_grpc # only works in gRPC mode
def test_nopr_mode(self, mapdl, line_geometry):
try:
# enter no printout mode
mapdl._run('/NOPR', mute=True)
assert mapdl.prep7() is None
# verify that queries still work
q, kps, l0 = line_geometry
assert q.kx(2) == 1.0
finally:
# always return printing
mapdl._run('/GOPR', mute=True)
| 32.516484 | 79 | 0.57756 | import pytest
class TestParseParameter:
@pytest.mark.parametrize(
"values",
[
("PARAMETER test = 4", 4.0),
("PARAMETER=4", 4.0),
("PARAMETER WARNING = 4", 4.0),
("PARAMETER = _=4", 4.0),
("WARNING = PARAMETER = 4", 4.0),
("PARAMETER = .4", 0.4),
],
)
def test_parse_float(self, values, query):
input_, output = values
assert query._parse_parameter_float_response(input_) == output
@pytest.mark.parametrize(
"values",
[
("PARAMETER test = 4", 4),
("PARAMETER=4", 4),
("PARAMETER WARNING = 4", 4),
("PARAMETER = _=4", 4),
("WARNING = PARAMETER = 4", 4),
("PARAMETER = .4", 0),
],
)
def test_parse_int(self, values, query):
input_, output = values
assert query._parse_parameter_integer_response(input_) == output
def test_parse_float_type_warning(self, query):
input_ = "WARNING PARAMETER = 4"
with pytest.warns(UserWarning):
query._parse_parameter_float_response(input_)
def test_parse_int_type_warning(self, query):
input_ = "WARNING PARAMETER = 4"
with pytest.warns(UserWarning):
query._parse_parameter_integer_response(input_)
@pytest.mark.parametrize(
"value", ["parameter test = 4", "PARAMETER 4", "WARNING = 4", ""]
)
def test_parse_float_type_error(self, value, query):
input_ = value
with pytest.raises(TypeError):
query._parse_parameter_float_response(input_)
@pytest.mark.parametrize(
"value", ["parameter test = 4", "PARAMETER 4", "WARNING = 4", ""]
)
def test_parse_int_type_error(self, value, query):
input_ = value
with pytest.raises(TypeError):
query._parse_parameter_integer_response(input_)
class TestRunQuery:
@pytest.mark.parametrize('command', [('KX(1)', float), ('KP(1,1,1)', int)])
def test_run_query_returned_type(self, line_geometry, command):
q, kps, l0 = line_geometry
cmd, type_ = command
integer = False if type_ == float else True
v = q._run_query(cmd, integer=integer)
assert isinstance(v, type_)
def test_interactive_mode_error(self, mapdl, line_geometry):
q, kps, l0 = line_geometry
with mapdl.non_interactive:
with pytest.raises(RuntimeError):
v = q.kx(1)
@pytest.mark.skip_grpc
def test_nopr_mode(self, mapdl, line_geometry):
try:
mapdl._run('/NOPR', mute=True)
assert mapdl.prep7() is None
q, kps, l0 = line_geometry
assert q.kx(2) == 1.0
finally:
mapdl._run('/GOPR', mute=True)
| true | true |
f70f94b36a95bfd5abb974ec2563582a58d0197b | 169 | py | Python | app/src/imports/requests.py | jkulak/spotify-grabtrack | e6cd16709195ca6d2e186a3b8cc7ce1419b6aace | [
"MIT"
] | null | null | null | app/src/imports/requests.py | jkulak/spotify-grabtrack | e6cd16709195ca6d2e186a3b8cc7ce1419b6aace | [
"MIT"
] | 13 | 2022-02-10T20:07:49.000Z | 2022-03-27T20:07:21.000Z | app/src/imports/requests.py | jkulak/spotify-grabtrack | e6cd16709195ca6d2e186a3b8cc7ce1419b6aace | [
"MIT"
] | null | null | null | import requests_cache
from requests_cache import SQLiteCache
requests_cache.install_cache(
"grabtrack_sqlite_cache", SQLiteCache("spotify_api_cache", timeout=30)
)
| 24.142857 | 74 | 0.83432 | import requests_cache
from requests_cache import SQLiteCache
requests_cache.install_cache(
"grabtrack_sqlite_cache", SQLiteCache("spotify_api_cache", timeout=30)
)
| true | true |
f70f95230083e964837c7ccf662a8d3e815a9abf | 3,469 | py | Python | gui/mon/views.py | erigones/esdc-ce | 2e39211a8f5132d66e574d3a657906c7d3c406fe | [
"Apache-2.0"
] | 97 | 2016-11-15T14:44:23.000Z | 2022-03-13T18:09:15.000Z | gui/mon/views.py | erigones/esdc-ce | 2e39211a8f5132d66e574d3a657906c7d3c406fe | [
"Apache-2.0"
] | 334 | 2016-11-17T19:56:57.000Z | 2022-03-18T10:45:53.000Z | gui/mon/views.py | erigones/esdc-ce | 2e39211a8f5132d66e574d3a657906c7d3c406fe | [
"Apache-2.0"
] | 33 | 2017-01-02T16:04:13.000Z | 2022-02-07T19:20:24.000Z | import json
from re import match
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.shortcuts import redirect, render
from gui.mon.forms import BaseAlertFilterForm
from gui.utils import collect_view_data
from gui.decorators import ajax_required, profile_required, admin_required
from api.decorators import setting_required
from api.utils.views import call_api_view
from api.mon.alerting.views import mon_alert_list
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def mon_server_redirect(request):
"""
Monitoring management.
"""
if match("^http", request.dc.settings.MON_ZABBIX_SERVER_EXTERNAL_URL):
return redirect(request.dc.settings.MON_ZABBIX_SERVER_EXTERNAL_URL)
else:
return redirect(request.dc.settings.MON_ZABBIX_SERVER)
@login_required
@admin_required
@ajax_required
@require_POST
def alert_list_table(request):
context = collect_view_data(request, 'mon_alert_list')
try:
api_data = json.loads(request.POST.get('alert_filter', None))
except (ValueError, TypeError):
context['error'] = 'Unexpected error: could not parse alert filter.'
else:
context['alert_filter'] = api_data
res = call_api_view(request, 'GET', mon_alert_list, data=api_data)
if res.status_code == 200:
context['alerts'] = res.data['result']
elif res.status_code == 201:
context['error'] = 'Unexpected error: got into an API loop.'
else:
context['error'] = res.data.get('result', {}).get('error', res.data)
return render(request, 'gui/mon/alert_table.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def alert_list(request):
context = collect_view_data(request, 'mon_alert_list')
data = request.GET.copy()
data.pop('_', None)
if not data and request.user.is_staff and request.dc.is_default():
data['show_nodes'] = True
context['filters'] = form = BaseAlertFilterForm(request, data)
context['init'] = True
if form.is_valid() and form.api_data is not None: # new visit, or form submission
context['alert_filter'] = form.api_data
context['alert_filter_ok'] = True
else:
context['alert_filter_ok'] = False # Do not run javascript API TASKs!
return render(request, 'gui/mon/alert_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def hostgroup_list(request):
context = collect_view_data(request, 'mon_hostgroup_list')
return render(request, 'gui/mon/hostgroup_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def template_list(request):
context = collect_view_data(request, 'mon_template_list')
return render(request, 'gui/mon/template_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def action_list(request):
context = collect_view_data(request, 'mon_action_list')
return render(request, 'gui/mon/action_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def webcheck_list(request):
context = collect_view_data(request, 'mon_webcheck_list')
return render(request, 'gui/mon/webcheck_list.html', context)
| 29.649573 | 86 | 0.742289 | import json
from re import match
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.shortcuts import redirect, render
from gui.mon.forms import BaseAlertFilterForm
from gui.utils import collect_view_data
from gui.decorators import ajax_required, profile_required, admin_required
from api.decorators import setting_required
from api.utils.views import call_api_view
from api.mon.alerting.views import mon_alert_list
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def mon_server_redirect(request):
if match("^http", request.dc.settings.MON_ZABBIX_SERVER_EXTERNAL_URL):
return redirect(request.dc.settings.MON_ZABBIX_SERVER_EXTERNAL_URL)
else:
return redirect(request.dc.settings.MON_ZABBIX_SERVER)
@login_required
@admin_required
@ajax_required
@require_POST
def alert_list_table(request):
context = collect_view_data(request, 'mon_alert_list')
try:
api_data = json.loads(request.POST.get('alert_filter', None))
except (ValueError, TypeError):
context['error'] = 'Unexpected error: could not parse alert filter.'
else:
context['alert_filter'] = api_data
res = call_api_view(request, 'GET', mon_alert_list, data=api_data)
if res.status_code == 200:
context['alerts'] = res.data['result']
elif res.status_code == 201:
context['error'] = 'Unexpected error: got into an API loop.'
else:
context['error'] = res.data.get('result', {}).get('error', res.data)
return render(request, 'gui/mon/alert_table.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def alert_list(request):
context = collect_view_data(request, 'mon_alert_list')
data = request.GET.copy()
data.pop('_', None)
if not data and request.user.is_staff and request.dc.is_default():
data['show_nodes'] = True
context['filters'] = form = BaseAlertFilterForm(request, data)
context['init'] = True
if form.is_valid() and form.api_data is not None:
context['alert_filter'] = form.api_data
context['alert_filter_ok'] = True
else:
context['alert_filter_ok'] = False
return render(request, 'gui/mon/alert_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def hostgroup_list(request):
context = collect_view_data(request, 'mon_hostgroup_list')
return render(request, 'gui/mon/hostgroup_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def template_list(request):
context = collect_view_data(request, 'mon_template_list')
return render(request, 'gui/mon/template_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def action_list(request):
context = collect_view_data(request, 'mon_action_list')
return render(request, 'gui/mon/action_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def webcheck_list(request):
context = collect_view_data(request, 'mon_webcheck_list')
return render(request, 'gui/mon/webcheck_list.html', context)
| true | true |
f70f9523490f01a422da6c46e57bf7055ca504f2 | 515 | py | Python | ucp_intro/07_mad_lib_game.py | matiasmasca/python | 7631583820d51e3132bdb793fed28cc83f4877a2 | [
"MIT"
] | null | null | null | ucp_intro/07_mad_lib_game.py | matiasmasca/python | 7631583820d51e3132bdb793fed28cc83f4877a2 | [
"MIT"
] | null | null | null | ucp_intro/07_mad_lib_game.py | matiasmasca/python | 7631583820d51e3132bdb793fed28cc83f4877a2 | [
"MIT"
] | null | null | null | # Primer juego...
print("Mi poesia:")
print("Las rosas son Rojas")
print("Las violetas son Azules")
print("Y yo te amo a ti")
# Mad Libs
# ingresar palabras random, adjetivos, verbos, sustantivos.
print("Ahora te toca a vos")
print("")
color = input("Ingrese un color: ")
sustantivo_plular = input("Ingrese un sustantivo en plural: ")
celebridad = input("Ingrese el nombre de una celebridad: ")
print("Las rosas son " + color)
print( sustantivo_plular + " son Azules")
print("Y yo te amo a ti " + celebridad )
| 24.52381 | 62 | 0.702913 |
print("Mi poesia:")
print("Las rosas son Rojas")
print("Las violetas son Azules")
print("Y yo te amo a ti")
print("Ahora te toca a vos")
print("")
color = input("Ingrese un color: ")
sustantivo_plular = input("Ingrese un sustantivo en plural: ")
celebridad = input("Ingrese el nombre de una celebridad: ")
print("Las rosas son " + color)
print( sustantivo_plular + " son Azules")
print("Y yo te amo a ti " + celebridad )
| true | true |
f70f96b662e9909e240adb12a588cfe7baf1df63 | 14,338 | py | Python | src/pytorch_metric_learning/utils/logging_presets.py | kvzhao/pytorch-metric-learning | 9c8a94bd1a906317d5834f26d8a94e59d578b825 | [
"MIT"
] | 2 | 2020-08-11T03:42:15.000Z | 2022-01-11T07:25:30.000Z | src/pytorch_metric_learning/utils/logging_presets.py | FadouaKhm/pytorch-metric-learning | 9eb792bcfc1616b599e6ee457514e3cb3a7235dd | [
"MIT"
] | null | null | null | src/pytorch_metric_learning/utils/logging_presets.py | FadouaKhm/pytorch-metric-learning | 9eb792bcfc1616b599e6ee457514e3cb3a7235dd | [
"MIT"
] | 1 | 2021-03-15T04:24:52.000Z | 2021-03-15T04:24:52.000Z | import logging
from . import common_functions as c_f
import os
import torch
from collections import defaultdict
import sqlite3
# You can write your own hooks for logging.
# But if you'd like something that just works, then use this HookContainer.
# You'll need to install record-keeper and tensorboard.
# pip install record-keeper tensorboard
class HookContainer:
def __init__(self, record_keeper,
record_group_name_prefix=None,
primary_metric="mean_average_precision_at_r",
validation_split_name="val"):
self.record_keeper = record_keeper
self.record_group_name_prefix = record_group_name_prefix
self.saveable_trainer_objects = ["models", "optimizers", "lr_schedulers", "loss_funcs", "mining_funcs"]
self.primary_metric = primary_metric
self.validation_split_name = validation_split_name
############################################
############################################
################## HOOKS #################
############################################
############################################
### Define the end_of_iteration hook. This will be executed at the end of every iteration. ###
def end_of_iteration_hook(self, trainer):
record_these = [[trainer.loss_tracker.losses, {"input_group_name_for_non_objects": "loss_histories"}],
[trainer.loss_tracker.loss_weights, {"input_group_name_for_non_objects": "loss_weights"}],
[trainer.loss_funcs, {"recursive_types": [torch.nn.Module]}],
[trainer.mining_funcs, {}],
[trainer.models, {}],
[trainer.optimizers, {"custom_attr_func": self.optimizer_custom_attr_func}]]
for record, kwargs in record_these:
self.record_keeper.update_records(record, trainer.get_global_iteration(), **kwargs)
# This hook will be passed into the trainer and will be executed at the end of every epoch.
def end_of_epoch_hook(self, tester, dataset_dict, model_folder, test_interval=1, patience=None, test_collate_fn=None):
if not self.primary_metric in tester.accuracy_calculator.get_curr_metrics():
raise ValueError("HookContainer `primary_metric` must be one of: {}".format(tester.accuracy_calculator.get_curr_metrics()))
if not os.path.exists(model_folder): os.makedirs(model_folder)
def actual_hook(trainer):
continue_training = True
if trainer.epoch % test_interval == 0:
best_epoch = self.save_models_and_eval(trainer, dataset_dict, model_folder, test_interval, tester, test_collate_fn)
continue_training = self.patience_remaining(trainer.epoch, best_epoch, patience)
return continue_training
return actual_hook
def end_of_testing_hook(self, tester):
for split_name, accuracies in tester.all_accuracies.items():
epoch = accuracies["epoch"]
self.record_keeper.update_records(accuracies, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
_, _, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, split_name, epoch)
best = {"best_epoch":best_epoch, "best_accuracy": best_accuracy}
self.record_keeper.update_records(best, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
for split_name, u in tester.dim_reduced_embeddings.items():
for k, (dim_reduced, labels) in u.items():
tag = '%s/%s'%(self.record_group_name(tester, split_name), k)
self.record_keeper.add_embedding_plot(dim_reduced, labels, tag, epoch)
############################################
############################################
######### MODEL LOADING AND SAVING #########
############################################
############################################
def load_latest_saved_models(self, trainer, model_folder, device=None, best=False):
if device is None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
resume_epoch, model_suffix = c_f.latest_version(model_folder, "trunk_*.pth", best=best)
if resume_epoch > 0:
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.load_dict_of_models(obj_dict, model_suffix, model_folder, device, log_if_successful=True)
return resume_epoch + 1
def save_models(self, trainer, model_folder, curr_suffix, prev_suffix=None):
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.save_dict_of_models(obj_dict, curr_suffix, model_folder)
if prev_suffix is not None:
c_f.delete_dict_of_models(obj_dict, prev_suffix, model_folder)
def save_models_and_eval(self, trainer, dataset_dict, model_folder, test_interval, tester, collate_fn):
epoch = trainer.epoch
tester.test(dataset_dict, epoch, trainer.models["trunk"], trainer.models["embedder"], list(dataset_dict.keys()), collate_fn)
prev_best_epoch, _ = self.get_best_epoch_and_accuracy(tester, self.validation_split_name)
is_new_best, curr_accuracy, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, self.validation_split_name, epoch)
self.record_keeper.save_records()
trainer.step_lr_plateau_schedulers(curr_accuracy)
self.save_models(trainer, model_folder, epoch, epoch-test_interval) # save latest model
if is_new_best:
logging.info("New best accuracy! {}".format(curr_accuracy))
curr_suffix = "best%d"%best_epoch
prev_suffix = "best%d"%prev_best_epoch if prev_best_epoch is not None else None
self.save_models(trainer, model_folder, curr_suffix, prev_suffix) # save best model
return best_epoch
def is_new_best_accuracy(self, tester, split_name, epoch):
curr_accuracy = self.get_curr_primary_metric(tester, split_name)
best_epoch, best_accuracy = self.get_best_epoch_and_accuracy(tester, split_name)
is_new_best = False
if (curr_accuracy > best_accuracy) or (best_epoch is None):
best_epoch, best_accuracy = epoch, curr_accuracy
is_new_best = True
return is_new_best, curr_accuracy, best_epoch, best_accuracy
############################################
############################################
##### BEST EPOCH AND ACCURACY TRACKING #####
############################################
############################################
def get_loss_history(self, loss_names=()):
columns = "*" if len(loss_names) == 0 else ", ".join(loss_names)
table_name = "loss_histories"
if not self.record_keeper.table_exists(table_name):
return {}
output = self.record_keeper.query("SELECT {} FROM {}".format(columns, table_name), return_dict=True)
output.pop("id", None)
return output
def get_accuracy_history(self, tester, split_name, return_all_metrics=False, metrics=()):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return {}
def get_accuracies(keys):
keys = "*" if return_all_metrics else "epoch, %s"%keys
query = "SELECT {} FROM {}".format(keys, table_name)
return self.record_keeper.query(query, return_dict=True)
keys = metrics if len(metrics) > 0 else [self.primary_metric]
output = self.try_keys(keys, tester, get_accuracies)
output.pop("id", None)
return output
def get_curr_primary_metric(self, tester, split_name):
def get_curr(key):
return tester.all_accuracies[split_name][key]
return self.try_primary_metric(tester, get_curr)
def try_keys(self, input_keys, tester, input_func):
for average in [True, False]:
keys = ", ".join([tester.accuracies_keyname(k, average=average, label_hierarchy_level=tester.label_hierarchy_level) for k in input_keys])
try:
return input_func(keys)
except (KeyError, sqlite3.OperationalError):
pass
raise KeyError
def try_primary_metric(self, tester, input_func):
return self.try_keys([self.primary_metric], tester, input_func)
# returns accuracies of a specified epoch
def get_accuracies_of_epoch(self, tester, split_name, epoch, select_all=True):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return []
def get_accuracies(key):
columns = "*" if select_all else "epoch, %s"%key
query = "SELECT %s FROM %s WHERE epoch=?"%(columns, table_name)
return self.record_keeper.query(query, (epoch, ))
return self.try_primary_metric(tester, get_accuracies)
# returns accuracies of best epoch and the metric name used to determine best acuracy
def get_accuracies_of_best_epoch(self, tester, split_name, select_all=True, ignore_epoch=(-1,)):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return [], None
def get_accuracies(key):
columns = "*" if select_all else "epoch, %s"%key
params = ", ".join(["?"]*len(ignore_epoch))
query = """SELECT {0} FROM {1} WHERE {2}=
(SELECT max({2}) FROM {1} WHERE epoch NOT IN ({3}))
AND epoch NOT IN ({3})""".format(columns, table_name, key, params)
output = self.record_keeper.query(query, ignore_epoch+ignore_epoch)
return output, key
return self.try_primary_metric(tester, get_accuracies)
def get_best_epoch_and_accuracy(self, tester, split_name, ignore_epoch=(-1,)):
accuracies, key = self.get_accuracies_of_best_epoch(tester, split_name, select_all=False, ignore_epoch=ignore_epoch)
if len(accuracies) > 0:
return accuracies[0]["epoch"], accuracies[0][key]
return None, 0
def patience_remaining(self, epoch, best_epoch, patience):
if patience is not None and best_epoch is not None:
if epoch - best_epoch > patience:
logging.info("Validation accuracy has plateaued. Exiting.")
return False
return True
def run_tester_separately(self, tester, dataset_dict, epoch, trunk, embedder, splits_to_eval=None, collate_fn=None, skip_eval_if_already_done=True):
if skip_eval_if_already_done:
splits_to_eval = self.get_splits_to_eval(tester, dataset_dict, epoch, splits_to_eval)
if len(splits_to_eval) == 0:
logging.info("Already evaluated")
return False
tester.test(dataset_dict, epoch, trunk, embedder, splits_to_eval, collate_fn)
return True
def get_splits_to_eval(self, tester, dataset_dict, epoch, input_splits_to_eval):
input_splits_to_eval = list(dataset_dict.keys()) if input_splits_to_eval is None else input_splits_to_eval
splits_to_eval = []
for split in input_splits_to_eval:
if len(self.get_accuracies_of_epoch(tester, split, epoch)) == 0:
splits_to_eval.append(split)
return splits_to_eval
def base_record_group_name(self, tester):
base_record_group_name = "%s_"%self.record_group_name_prefix if self.record_group_name_prefix else ''
base_record_group_name += tester.description_suffixes("accuracies")
return base_record_group_name
def record_group_name(self, tester, split_name):
base_record_group_name = self.base_record_group_name(tester)
return "%s_%s"%(base_record_group_name, split_name.upper())
def optimizer_custom_attr_func(self, optimizer):
return {"lr": optimizer.param_groups[0]["lr"]}
class EmptyContainer:
def end_of_epoch_hook(self, *args):
return None
end_of_iteration_hook = None
end_of_testing_hook = None
def get_record_keeper(csv_folder, tensorboard_folder, global_db_path=None, experiment_name=None, is_new_experiment=True, save_figures=False, save_lists=False):
try:
import record_keeper as record_keeper_package
from torch.utils.tensorboard import SummaryWriter
record_writer = record_keeper_package.RecordWriter(folder = csv_folder,
global_db_path = global_db_path,
experiment_name = experiment_name,
is_new_experiment = is_new_experiment,
save_lists = save_lists)
tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)
record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer = tensorboard_writer,
record_writer = record_writer,
attributes_to_search_for = c_f.list_of_recordable_attributes_list_names(),
save_figures=save_figures)
return record_keeper, record_writer, tensorboard_writer
except ModuleNotFoundError as e:
logging.warn(e)
logging.warn("There won't be any logging or model saving.")
logging.warn("To fix this, pip install record-keeper tensorboard")
return None, None, None
def get_hook_container(record_keeper, **kwargs):
if record_keeper:
return HookContainer(record_keeper, **kwargs)
else:
logging.warn("No record_keeper, so no preset hooks are being returned.")
return EmptyContainer()
| 52.328467 | 160 | 0.625122 | import logging
from . import common_functions as c_f
import os
import torch
from collections import defaultdict
import sqlite3
# You'll need to install record-keeper and tensorboard.
class HookContainer:
def __init__(self, record_keeper,
record_group_name_prefix=None,
primary_metric="mean_average_precision_at_r",
validation_split_name="val"):
self.record_keeper = record_keeper
self.record_group_name_prefix = record_group_name_prefix
self.saveable_trainer_objects = ["models", "optimizers", "lr_schedulers", "loss_funcs", "mining_funcs"]
self.primary_metric = primary_metric
self.validation_split_name = validation_split_name
| true | true |
f70f96f0024b1699103a63385d86facba9fae422 | 9,937 | py | Python | xfer/contrib/xfer_leap/synthetic_data.py | apaleyes/xfer | 99cd83424bc7e76a2c2def9d5b1dacd06f6e9eb5 | [
"Apache-2.0"
] | null | null | null | xfer/contrib/xfer_leap/synthetic_data.py | apaleyes/xfer | 99cd83424bc7e76a2c2def9d5b1dacd06f6e9eb5 | [
"Apache-2.0"
] | null | null | null | xfer/contrib/xfer_leap/synthetic_data.py | apaleyes/xfer | 99cd83424bc7e76a2c2def9d5b1dacd06f6e9eb5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from mxnet.gluon.data import ArrayDataset
import mxnet
from .data import MetaTaskDataContainer, TaskDataContainer
from .config import DEFAULT_CONFIG_SYNTHETIC
class MetaTaskSynthetic(MetaTaskDataContainer):
def __init__(self, config=None, weights=None, bias=None, seed=1, context=None):
"""
:param config: If None, DEFAULT_CONFIG_SYNTHETIC is loaded.
:param weights: Tasks' weights matrix. Row k corresponds to the weight parameters of task k. If None, w is
sampled from a N(0,1).
:param bias: Tasks' biases vector. Row k corresponds to the bias parameters of task k. If None, w is sampled
from a N(0,1).
:param seed: Seed for random generator.
"""
if config is None:
config = DEFAULT_CONFIG_SYNTHETIC
self.config = config
self.weights = weights
self.bias = bias
if context is None:
context = mxnet.cpu()
self.context = context
self.seed = seed
random.seed(self.seed)
num_tasks_train = config["num_tasks_train"]
num_tasks_test = config["num_tasks_test"]
num_tasks_val = config["num_tasks_val"]
num_tasks = num_tasks_train + num_tasks_test + num_tasks_val
self.num_tasks = num_tasks
self._generate_parameters()
self._validate_parameters()
num_examples = config["num_examples_per_task"]
std_x = config["std_x"]
hold_out = config["hold_out"]
noise = config["std_noise"]
# Generate the training/test/val dataset.
# Each dataset is a list of TaskSynthetic objects (one per task)
data_train = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(0, num_tasks_train)]
data_test = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(num_tasks_train, num_tasks_train + num_tasks_test)]
data_val = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(num_tasks_train + num_tasks_test, num_tasks)]
super().__init__(data_train, data_test, data_val, context=context)
def plot_sample(self, root="./sample_synth"):
"""Plot N images from each alphabet and store the images in root."""
if self.weights.shape[1] != 2:
raise ValueError("Only 2D datasets can be plot.")
if not os.path.exists(root):
os.makedirs(root)
fig_train = self._plot([dd._train_dataset for dd in self.train_tasks],
"Training Samples for Training Tasks")
fig_train.savefig(os.path.join(root, "sample_train_train_tasks.png"))
del fig_train
fig_test = self._plot([dd._train_dataset for dd in self.test_tasks],
"Training Samples for Test Tasks")
fig_test.savefig(os.path.join(root, "sample_train_test_tasks.png"))
del fig_test
fig_val = self._plot([dd._train_dataset for dd in self.val_tasks],
"Training Samples for Validation Tasks")
fig_val.savefig(os.path.join(root, "sample_train_val_tasks.png"))
del fig_val
if self.config["hold_out"] > 0:
fig_train = self._plot([dd._val_dataset for dd in self.train_tasks],
"Validation Samples for Training Tasks")
fig_train.savefig(os.path.join(root, "sample_val_train_tasks.png"))
del fig_train
fig_test = self._plot([dd._val_dataset for dd in self.test_tasks],
"Validation Samples for Test Tasks")
fig_test.savefig(os.path.join(root, "sample_val_test_tasks.png"))
del fig_test
fig_val = self._plot([dd._val_dataset for dd in self.val_tasks],
"Validation Samples for Validation Tasks")
fig_val.savefig(os.path.join(root, "sample_val_val_tasks.png"))
del fig_val
def _plot(self, data, title):
"""Helper function for plotting."""
num_tasks = len(data)
fig, ax = plt.subplots(1, num_tasks, figsize=(num_tasks*5, 5))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
for mm in range(num_tasks):
X, y = data[mm][:]
X = X.asnumpy()
y = y.asnumpy()
ax[mm].scatter(X[:, 0], X[:, 1], c=y.flatten())
fig.suptitle(title, size=18)
return fig
def _validate_parameters(self):
if self.weights.shape[0] != self.num_tasks:
raise ValueError("Number of rows in w must be equal to the total number of tasks")
if len(self.bias) != self.num_tasks:
raise ValueError("Length of b must be equal to the total number of tasks")
def _generate_parameters(self):
if self.weights is None:
dim = self.config["dim"]
self.weights = self.config["global_bias"] + mxnet.nd.random_normal(shape=(self.num_tasks, dim),
ctx=self.context)
if self.bias is None:
if self.config["task_bias"]:
self.bias = mxnet.nd.random_normal(shape=self.num_tasks, ctx=self.context)
else:
self.bias = mxnet.nd.zeros(num_tasks, ctx=self.context)
class TaskSynthetic(TaskDataContainer):
"""
Synthetic Task Container: Linear Regression.
"""
def __init__(self, w, b, num_examples, std_x, noise, hold_out=None, seed=None, context=None):
"""
:param w: Task's weights vector.
:param b: Task's bias.
:param num_examples: Total number of examples per task.
:param std_x: The covariates are sampled from a zero mean normal distribution with
standard deviation equal to std_x.
:param hold_out: Number of examples to hold out for validation
:param seed: seed for the random generator
"""
self.w = w
self.b = b
self.num_examples = num_examples
self.seed = seed
if context is None:
context = mxnet.cpu()
self.context = context
if seed:
random.seed(seed)
if hold_out and hold_out < num_examples:
Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples - hold_out, len(w)),
ctx=context), noise)
train_dataset = ArrayDataset(Xtr, Ytr)
Xval, Yval = self._real_fn(std_x * mxnet.nd.random_normal(shape=(hold_out, len(w)), ctx=context), noise)
val_dataset = ArrayDataset(Xval, Yval)
else:
Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples, len(w)), ctx=context), noise)
train_dataset = ArrayDataset(Xtr, Ytr)
val_dataset = None
super().__init__(train_dataset, val_dataset, context=context)
def _real_fn(self, X, noise):
y = mxnet.nd.dot(X, mxnet.nd.expand_dims(self.w, axis=1)) + self.b
if noise > 0.0:
y += mxnet.nd.expand_dims(noise * mxnet.nd.random_normal(shape=(X.shape[0],)), axis=1)
return X, y
if __name__ == '__main__':
s1 = MetaTaskSynthetic()
s1.plot_sample()
batch_size = 20
train_tasks = s1.train_tasks
assert len(s1.train_tasks) == 3
for task in train_tasks:
tr_iterator = task.get_train_iterator(batch_size)
for data in tr_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
val_iterator = task.get_val_iterator(batch_size)
for data in val_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
dim = 2
num_tasks = 15
w = mxnet.nd.random_normal(shape=(num_tasks, dim))
b = mxnet.nd.random_normal(shape=num_tasks)
s2 = MetaTaskSynthetic(weights=w, bias=b)
s2.plot_sample(root="./sample_synth_w_b_given")
batch_size = 20
train_tasks = s2.train_tasks
assert len(train_tasks) == 3
for task in train_tasks:
tr_iterator = task.get_train_iterator(batch_size)
for data in tr_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
val_iterator = task.get_val_iterator(batch_size)
for data in val_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
| 39.748 | 118 | 0.603301 |
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from mxnet.gluon.data import ArrayDataset
import mxnet
from .data import MetaTaskDataContainer, TaskDataContainer
from .config import DEFAULT_CONFIG_SYNTHETIC
class MetaTaskSynthetic(MetaTaskDataContainer):
def __init__(self, config=None, weights=None, bias=None, seed=1, context=None):
if config is None:
config = DEFAULT_CONFIG_SYNTHETIC
self.config = config
self.weights = weights
self.bias = bias
if context is None:
context = mxnet.cpu()
self.context = context
self.seed = seed
random.seed(self.seed)
num_tasks_train = config["num_tasks_train"]
num_tasks_test = config["num_tasks_test"]
num_tasks_val = config["num_tasks_val"]
num_tasks = num_tasks_train + num_tasks_test + num_tasks_val
self.num_tasks = num_tasks
self._generate_parameters()
self._validate_parameters()
num_examples = config["num_examples_per_task"]
std_x = config["std_x"]
hold_out = config["hold_out"]
noise = config["std_noise"]
data_train = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(0, num_tasks_train)]
data_test = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(num_tasks_train, num_tasks_train + num_tasks_test)]
data_val = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(num_tasks_train + num_tasks_test, num_tasks)]
super().__init__(data_train, data_test, data_val, context=context)
def plot_sample(self, root="./sample_synth"):
if self.weights.shape[1] != 2:
raise ValueError("Only 2D datasets can be plot.")
if not os.path.exists(root):
os.makedirs(root)
fig_train = self._plot([dd._train_dataset for dd in self.train_tasks],
"Training Samples for Training Tasks")
fig_train.savefig(os.path.join(root, "sample_train_train_tasks.png"))
del fig_train
fig_test = self._plot([dd._train_dataset for dd in self.test_tasks],
"Training Samples for Test Tasks")
fig_test.savefig(os.path.join(root, "sample_train_test_tasks.png"))
del fig_test
fig_val = self._plot([dd._train_dataset for dd in self.val_tasks],
"Training Samples for Validation Tasks")
fig_val.savefig(os.path.join(root, "sample_train_val_tasks.png"))
del fig_val
if self.config["hold_out"] > 0:
fig_train = self._plot([dd._val_dataset for dd in self.train_tasks],
"Validation Samples for Training Tasks")
fig_train.savefig(os.path.join(root, "sample_val_train_tasks.png"))
del fig_train
fig_test = self._plot([dd._val_dataset for dd in self.test_tasks],
"Validation Samples for Test Tasks")
fig_test.savefig(os.path.join(root, "sample_val_test_tasks.png"))
del fig_test
fig_val = self._plot([dd._val_dataset for dd in self.val_tasks],
"Validation Samples for Validation Tasks")
fig_val.savefig(os.path.join(root, "sample_val_val_tasks.png"))
del fig_val
def _plot(self, data, title):
num_tasks = len(data)
fig, ax = plt.subplots(1, num_tasks, figsize=(num_tasks*5, 5))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
for mm in range(num_tasks):
X, y = data[mm][:]
X = X.asnumpy()
y = y.asnumpy()
ax[mm].scatter(X[:, 0], X[:, 1], c=y.flatten())
fig.suptitle(title, size=18)
return fig
def _validate_parameters(self):
if self.weights.shape[0] != self.num_tasks:
raise ValueError("Number of rows in w must be equal to the total number of tasks")
if len(self.bias) != self.num_tasks:
raise ValueError("Length of b must be equal to the total number of tasks")
def _generate_parameters(self):
if self.weights is None:
dim = self.config["dim"]
self.weights = self.config["global_bias"] + mxnet.nd.random_normal(shape=(self.num_tasks, dim),
ctx=self.context)
if self.bias is None:
if self.config["task_bias"]:
self.bias = mxnet.nd.random_normal(shape=self.num_tasks, ctx=self.context)
else:
self.bias = mxnet.nd.zeros(num_tasks, ctx=self.context)
class TaskSynthetic(TaskDataContainer):
def __init__(self, w, b, num_examples, std_x, noise, hold_out=None, seed=None, context=None):
self.w = w
self.b = b
self.num_examples = num_examples
self.seed = seed
if context is None:
context = mxnet.cpu()
self.context = context
if seed:
random.seed(seed)
if hold_out and hold_out < num_examples:
Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples - hold_out, len(w)),
ctx=context), noise)
train_dataset = ArrayDataset(Xtr, Ytr)
Xval, Yval = self._real_fn(std_x * mxnet.nd.random_normal(shape=(hold_out, len(w)), ctx=context), noise)
val_dataset = ArrayDataset(Xval, Yval)
else:
Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples, len(w)), ctx=context), noise)
train_dataset = ArrayDataset(Xtr, Ytr)
val_dataset = None
super().__init__(train_dataset, val_dataset, context=context)
def _real_fn(self, X, noise):
y = mxnet.nd.dot(X, mxnet.nd.expand_dims(self.w, axis=1)) + self.b
if noise > 0.0:
y += mxnet.nd.expand_dims(noise * mxnet.nd.random_normal(shape=(X.shape[0],)), axis=1)
return X, y
if __name__ == '__main__':
s1 = MetaTaskSynthetic()
s1.plot_sample()
batch_size = 20
train_tasks = s1.train_tasks
assert len(s1.train_tasks) == 3
for task in train_tasks:
tr_iterator = task.get_train_iterator(batch_size)
for data in tr_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
val_iterator = task.get_val_iterator(batch_size)
for data in val_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
dim = 2
num_tasks = 15
w = mxnet.nd.random_normal(shape=(num_tasks, dim))
b = mxnet.nd.random_normal(shape=num_tasks)
s2 = MetaTaskSynthetic(weights=w, bias=b)
s2.plot_sample(root="./sample_synth_w_b_given")
batch_size = 20
train_tasks = s2.train_tasks
assert len(train_tasks) == 3
for task in train_tasks:
tr_iterator = task.get_train_iterator(batch_size)
for data in tr_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
val_iterator = task.get_val_iterator(batch_size)
for data in val_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
| true | true |
f70f977c652da15ff9173d2b3951c11786d65299 | 6,282 | py | Python | solution/10. regular-expression-match.py | sundaycat/Leetcode-Practice | 65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a | [
"MIT"
] | null | null | null | solution/10. regular-expression-match.py | sundaycat/Leetcode-Practice | 65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a | [
"MIT"
] | null | null | null | solution/10. regular-expression-match.py | sundaycat/Leetcode-Practice | 65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a | [
"MIT"
] | null | null | null | '''
A: suffix solution
1. subproblems: define dp(i, j) = is_match(s[i:], p[j:]), suffix
2. guess,
2.1 the current char in p is a '*'
- use '*', repeat the char before it
- do not use '*', skip to next char after '*'
2.2 current char in s and p are match, s[i] == p[j] or p[j] == '.'
3. relate subproblems:
dp(i, j) = match(s[i:], s[j:])
dp(i, j) =
a. if j + 1 is in bound and p[j + 1] == '*', then
dp(i, j + 2) or (s[i] = p[j] or p[j] = '.' and dp(i + 1, j))
b. if s[i] == p[j] or p[j] == '.', then dp(i + 1, j + 1)
c. esle false
B: prefix solution
1. subproblems: define dp(i, j) = is_match(s[:i], p[:j]), prefix
2. guess,
2.1 current char in s and p are match, s[i] == p[j] or p[j] == '.'
2.2 the current char in p is a '*'
- use '*', repeat the char before it
- do not use '*', skip to next char after '*'
3. relate subproblems:
dp(i, j) = match(s[:i], s[:j])
dp(i, j) =
a. if s[i] == p[j] or p[j] == '.', then dp(i - 1, j - 1)
b. if p[j] == '*', then
dp(i, j - 2) or (s[i] = p[j - 1] or p[j - 1] = '.' and dp(i - 1, j))
c. else false
reference:
1. https://www.youtube.com/watch?v=HAA8mgxlov8 (use * or no use)
2. https://www.youtube.com/watch?v=l3hda49XcDE (dp solution)
'''
class Solution:
def isMatch(self, s: str, p: str) -> bool:
# Somtimes there still matches even s is out of bound, but p is still in bound(s:a, p: a*b*).
# But if p is out of bound, then we must return false
# return self.dfs_td(s, p, 0, 0, {})
# return self.dfs_prefix(s, p, len(s) - 1, len(p) - 1)
# return self.dp_bottome_up_prefix(s, p)
return self.dp_bottom_up_suffix(s, p)
# top down, dfs + memorization, suffix
def dfs_suffix(self, s, p, i, j, memo):
# base case
# if both i and j are out of boud, then we found our solution
if (i, j) in memo:
return memo[(i, j)]
if i >= len(s) and j >= len(p):
return True
# if i is in bound, but j is out of bound, return false.
if j >= len(p):
return False
# 注意括号的顺序, 在i没有超出数组下标的范围的情况下, 判断是否有匹配
match = i < len(s) and (s[i] == p[j] or p[j] == '.')
# if the next character in p is a star(need to prevent the j + 1 go byond the bounday)
if j + 1 < len(p) and p[j + 1] == '*':
# either repeating the current character in p and move to the next character in s
# or no repeating in p and skip to next character in p
memo[(i, j)] = (match and self.dfs_td(s, p, i + 1, j, memo)) or self.dfs_td(s, p, i, j + 2, memo)
return memo[(i, j)]
# if it is not a star but a match found in the current index of s and p
if match:
memo[(i, j)] = self.dfs_td(s, p, i + 1, j + 1, memo)
return memo[(i, j)]
# if no a match and next character is not star
memo[(i, j)] = False
return False
# bottom up solution, suffix.
def dp_bottom_up_suffix(self, s, p):
s_len = len(s)
p_len = len(p)
dp = [[False for col in range(p_len + 1)] for row in range(s_len + 1)]
dp[s_len][p_len] = True
# deal with the case like a*b*c* for the last row
for j in range(p_len - 2, -1, -1):
if p[j + 1] == '*':
dp[s_len][j] = dp[s_len][j + 2]
for i in range(s_len - 1, -1, -1):
for j in range(p_len - 1, -1, -1):
# for suffix, checking '*' goes first.
if j <= p_len - 2 and p[j + 1] == '*':
if s[i] == p[j] or p[j] == '.':
dp[i][j] = dp[i + 1][j]
dp[i][j] = (dp[i][j] or dp[i][j + 2])
continue
if s[i] == p[j] or p[j] == '.':
dp[i][j] = dp[i + 1][j + 1]
for i in dp:
print(i)
print()
return dp[0][0]
# top down solution, start at (n, n)
def dfs_prefix(self, s, p, i, j):
# base case
if i < 0 and j < 0:
return True
# if i is in bound, but j is out of bound, return false.
if j < 0:
return False
# if the current char is a star
if j >= 0 and p[j] == '*':
# check if there is a match of the current char in s and previous char in p(before *)
match = (i >= 0) and (s[i] == p[j - 1] or p[j - 1] == '.')
# if current charts match, then go dp(i-1, j), if no match, go check dp(i, j-2)
return (match and self.dfs_prefix(s, p, i - 1, j)) or self.dfs_prefix(s, p, i, j - 2)
# if there is a match of the current char in s and p
if i >= 0 and (s[i] == p[j] or p[j] == '.'):
return self.dfs_prefix(s, p, i - 1, j - 1)
return False
# bottom up algorithm, start from dp(0,0) -> dp(n, n)
def dp_bottome_up_prefix(self, s, p):
s_len, p_len = len(s), len(p)
dp = [[False for col in range(p_len + 1)] for row in range(s_len + 1)]
dp[0][0] = True
# handle the pattern like a*, a*b* or a*b*c* for the 0th row
for j in range(1, p_len + 1):
if p[j - 1] == '*':
dp[0][j] = dp[0][j - 2]
for i in range(1, s_len + 1):
for j in range(1, p_len + 1):
if s[i - 1] == p[j - 1] or p[j - 1] == '.':
dp[i][j] = dp[i - 1][j - 1]
continue
if p[j - 1] == '*':
if s[i - 1] == p[j - 2] or p[j - 2] == '.':
dp[i][j] = dp[i - 1][j]
dp[i][j] = (dp[i][j] or dp[i][j - 2])
for i in dp:
print(i)
print()
return dp[s_len][p_len]
s = 'aab'
p = 'c*a*b'
# s = 'aaa'
# p = 'aaaa'
# s = "a"
# p = ".*..a*"
s = 'aa'
p = 'a*'
sol = Solution()
print(sol.isMatch(s, p))
x = 'abc'
print(x[1:1]) | 32.381443 | 109 | 0.446832 | class Solution:
def isMatch(self, s: str, p: str) -> bool:
return self.dp_bottom_up_suffix(s, p)
def dfs_suffix(self, s, p, i, j, memo):
if (i, j) in memo:
return memo[(i, j)]
if i >= len(s) and j >= len(p):
return True
if j >= len(p):
return False
match = i < len(s) and (s[i] == p[j] or p[j] == '.')
if j + 1 < len(p) and p[j + 1] == '*':
memo[(i, j)] = (match and self.dfs_td(s, p, i + 1, j, memo)) or self.dfs_td(s, p, i, j + 2, memo)
return memo[(i, j)]
if match:
memo[(i, j)] = self.dfs_td(s, p, i + 1, j + 1, memo)
return memo[(i, j)]
memo[(i, j)] = False
return False
def dp_bottom_up_suffix(self, s, p):
s_len = len(s)
p_len = len(p)
dp = [[False for col in range(p_len + 1)] for row in range(s_len + 1)]
dp[s_len][p_len] = True
for j in range(p_len - 2, -1, -1):
if p[j + 1] == '*':
dp[s_len][j] = dp[s_len][j + 2]
for i in range(s_len - 1, -1, -1):
for j in range(p_len - 1, -1, -1):
if j <= p_len - 2 and p[j + 1] == '*':
if s[i] == p[j] or p[j] == '.':
dp[i][j] = dp[i + 1][j]
dp[i][j] = (dp[i][j] or dp[i][j + 2])
continue
if s[i] == p[j] or p[j] == '.':
dp[i][j] = dp[i + 1][j + 1]
for i in dp:
print(i)
print()
return dp[0][0]
def dfs_prefix(self, s, p, i, j):
if i < 0 and j < 0:
return True
if j < 0:
return False
if j >= 0 and p[j] == '*':
match = (i >= 0) and (s[i] == p[j - 1] or p[j - 1] == '.')
return (match and self.dfs_prefix(s, p, i - 1, j)) or self.dfs_prefix(s, p, i, j - 2)
if i >= 0 and (s[i] == p[j] or p[j] == '.'):
return self.dfs_prefix(s, p, i - 1, j - 1)
return False
def dp_bottome_up_prefix(self, s, p):
s_len, p_len = len(s), len(p)
dp = [[False for col in range(p_len + 1)] for row in range(s_len + 1)]
dp[0][0] = True
for j in range(1, p_len + 1):
if p[j - 1] == '*':
dp[0][j] = dp[0][j - 2]
for i in range(1, s_len + 1):
for j in range(1, p_len + 1):
if s[i - 1] == p[j - 1] or p[j - 1] == '.':
dp[i][j] = dp[i - 1][j - 1]
continue
if p[j - 1] == '*':
if s[i - 1] == p[j - 2] or p[j - 2] == '.':
dp[i][j] = dp[i - 1][j]
dp[i][j] = (dp[i][j] or dp[i][j - 2])
for i in dp:
print(i)
print()
return dp[s_len][p_len]
s = 'aab'
p = 'c*a*b'
s = 'aa'
p = 'a*'
sol = Solution()
print(sol.isMatch(s, p))
x = 'abc'
print(x[1:1]) | true | true |
f70f988e2189afa8f9091db3302eee4536752431 | 5,172 | py | Python | opentamp/src/policy_hooks/vae/run_training.py | Algorithmic-Alignment-Lab/openTAMP-legacy | 3b7c3be164cc968ad77a928286d6460cd70a670e | [
"MIT"
] | 2 | 2022-03-09T19:48:20.000Z | 2022-03-26T17:31:07.000Z | opentamp/src/policy_hooks/vae/run_training.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | null | null | null | opentamp/src/policy_hooks/vae/run_training.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | null | null | null | import argparse
import imp
import importlib
import random
from opentamp.src.policy_hooks.vae.vae_main import MultiProcessMain
def load_config(args, reload_module=None):
config_file = args.config
if config_file != '':
if reload_module is not None:
config_module = reload_module
imp.reload(config_module)
else:
config_module = importlib.import_module('policy_hooks.'+config_file)
config = config_module.config
else:
config_module = None
config = {}
config['use_local'] = not args.remote
config['num_conds'] = args.nconds if args.nconds > 0 else config['num_conds'] if 'num_conds' in config else 1
if 'common' in config:
config['common']['num_conds'] = config['num_conds']
config['num_objs'] = args.nobjs if args.nobjs > 0 else config['num_objs'] if 'num_objs' in config else 1
config['weight_dir'] = config['base_weight_dir'] + str(config['num_objs']) if 'base_weight_dir' in config else args.weight_dir
config['log_timing'] = args.timing
config['hl_timeout'] = 0
config['rollout_server'] = args.rollout_server or args.all_servers
config['vae_server'] = args.vae_server or args.all_servers
config['viewer'] = args.viewer
config['server_id'] = args.server_id if args.server_id != '' else str(random.randint(0,2**32))
config['n_rollout_servers'] = args.n_rollout_servers
config['no_child_process'] = args.no_child_process
config['rollout_len'] = args.rollout_len
config['train_vae'] = args.train_vae
config['unconditional'] = args.unconditional
config['train_reward'] = args.train_reward
config['load_step'] = args.load_step
config['train_params'] = {
'use_recurrent_dynamics': args.use_recurrent_dynamics,
'use_overshooting': args.use_overshooting,
'data_limit': args.train_samples if args.train_samples > 0 else None,
'beta': args.beta,
'overshoot_beta': args.overshoot_beta,
'dist_constraint': args.dist_constraint,
}
return config, config_module
def load_env(args, reload_module=None):
env_path = args.environment_path
if reload_module is not None:
module = reload_module
imp.reload(module)
else:
module = importlib.import_module(env_path)
env = args.environment
return getattr(module, env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='')
parser.add_argument('-wd', '--weight_dir', type=str, default='')
parser.add_argument('-nf', '--nofull', action='store_true', default=False)
parser.add_argument('-n', '--nconds', type=int, default=0)
parser.add_argument('-o', '--nobjs', type=int, default=0)
# parser.add_argument('-ptt', '--pretrain_timeout', type=int, default=300)
parser.add_argument('-hlt', '--hl_timeout', type=int, default=0)
parser.add_argument('-k', '--killall', action='store_true', default=True)
parser.add_argument('-r', '--remote', action='store_true', default=False)
parser.add_argument('-t', '--timing', action='store_true', default=False)
parser.add_argument('-vae', '--vae_server', action='store_true', default=False)
parser.add_argument('-sim', '--rollout_server', action='store_true', default=False)
parser.add_argument('-all', '--all_servers', action='store_true', default=False)
parser.add_argument('-v', '--viewer', action='store_true', default=False)
parser.add_argument('-id', '--server_id', type=str, default='')
parser.add_argument('-env_path', '--environment_path', type=str, default='')
parser.add_argument('-env', '--environment', type=str, default='')
parser.add_argument('-tamp', '--use_tamp', type=str, default='')
parser.add_argument('-nrs', '--n_rollout_servers', type=int, default=1)
parser.add_argument('-ncp', '--no_child_process', action='store_true', default=False)
parser.add_argument('-rl', '--rollout_len', type=int, default=0)
parser.add_argument('-tv', '--train_vae', action='store_true', default=False)
parser.add_argument('-uncond', '--unconditional', action='store_true', default=False)
parser.add_argument('-tr', '--train_reward', action='store_true', default=False)
parser.add_argument('-loadstep', '--load_step', type=int, default=-1)
parser.add_argument('-beta', '--beta', type=int, default=1)
parser.add_argument('-beta_d', '--overshoot_beta', type=int, default=1)
parser.add_argument('-nts', '--train_samples', type=int, default=-1)
parser.add_argument('-rnn', '--use_recurrent_dynamics', action='store_true', default=False)
parser.add_argument('-over', '--use_overshooting', action='store_true', default=False)
parser.add_argument('-dist', '--dist_constraint', action='store_true', default=False)
args = parser.parse_args()
config, config_module = load_config(args)
if args.config != '':
main = MultiProcessMain(config)
else:
env_cls = load_env(args)
main = MultiProcessMain.no_config_load(env_cls, args.environment, config)
main.start(kill_all=args.killall)
if __name__ == '__main__':
main()
| 45.769912 | 130 | 0.683875 | import argparse
import imp
import importlib
import random
from opentamp.src.policy_hooks.vae.vae_main import MultiProcessMain
def load_config(args, reload_module=None):
config_file = args.config
if config_file != '':
if reload_module is not None:
config_module = reload_module
imp.reload(config_module)
else:
config_module = importlib.import_module('policy_hooks.'+config_file)
config = config_module.config
else:
config_module = None
config = {}
config['use_local'] = not args.remote
config['num_conds'] = args.nconds if args.nconds > 0 else config['num_conds'] if 'num_conds' in config else 1
if 'common' in config:
config['common']['num_conds'] = config['num_conds']
config['num_objs'] = args.nobjs if args.nobjs > 0 else config['num_objs'] if 'num_objs' in config else 1
config['weight_dir'] = config['base_weight_dir'] + str(config['num_objs']) if 'base_weight_dir' in config else args.weight_dir
config['log_timing'] = args.timing
config['hl_timeout'] = 0
config['rollout_server'] = args.rollout_server or args.all_servers
config['vae_server'] = args.vae_server or args.all_servers
config['viewer'] = args.viewer
config['server_id'] = args.server_id if args.server_id != '' else str(random.randint(0,2**32))
config['n_rollout_servers'] = args.n_rollout_servers
config['no_child_process'] = args.no_child_process
config['rollout_len'] = args.rollout_len
config['train_vae'] = args.train_vae
config['unconditional'] = args.unconditional
config['train_reward'] = args.train_reward
config['load_step'] = args.load_step
config['train_params'] = {
'use_recurrent_dynamics': args.use_recurrent_dynamics,
'use_overshooting': args.use_overshooting,
'data_limit': args.train_samples if args.train_samples > 0 else None,
'beta': args.beta,
'overshoot_beta': args.overshoot_beta,
'dist_constraint': args.dist_constraint,
}
return config, config_module
def load_env(args, reload_module=None):
env_path = args.environment_path
if reload_module is not None:
module = reload_module
imp.reload(module)
else:
module = importlib.import_module(env_path)
env = args.environment
return getattr(module, env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='')
parser.add_argument('-wd', '--weight_dir', type=str, default='')
parser.add_argument('-nf', '--nofull', action='store_true', default=False)
parser.add_argument('-n', '--nconds', type=int, default=0)
parser.add_argument('-o', '--nobjs', type=int, default=0)
parser.add_argument('-hlt', '--hl_timeout', type=int, default=0)
parser.add_argument('-k', '--killall', action='store_true', default=True)
parser.add_argument('-r', '--remote', action='store_true', default=False)
parser.add_argument('-t', '--timing', action='store_true', default=False)
parser.add_argument('-vae', '--vae_server', action='store_true', default=False)
parser.add_argument('-sim', '--rollout_server', action='store_true', default=False)
parser.add_argument('-all', '--all_servers', action='store_true', default=False)
parser.add_argument('-v', '--viewer', action='store_true', default=False)
parser.add_argument('-id', '--server_id', type=str, default='')
parser.add_argument('-env_path', '--environment_path', type=str, default='')
parser.add_argument('-env', '--environment', type=str, default='')
parser.add_argument('-tamp', '--use_tamp', type=str, default='')
parser.add_argument('-nrs', '--n_rollout_servers', type=int, default=1)
parser.add_argument('-ncp', '--no_child_process', action='store_true', default=False)
parser.add_argument('-rl', '--rollout_len', type=int, default=0)
parser.add_argument('-tv', '--train_vae', action='store_true', default=False)
parser.add_argument('-uncond', '--unconditional', action='store_true', default=False)
parser.add_argument('-tr', '--train_reward', action='store_true', default=False)
parser.add_argument('-loadstep', '--load_step', type=int, default=-1)
parser.add_argument('-beta', '--beta', type=int, default=1)
parser.add_argument('-beta_d', '--overshoot_beta', type=int, default=1)
parser.add_argument('-nts', '--train_samples', type=int, default=-1)
parser.add_argument('-rnn', '--use_recurrent_dynamics', action='store_true', default=False)
parser.add_argument('-over', '--use_overshooting', action='store_true', default=False)
parser.add_argument('-dist', '--dist_constraint', action='store_true', default=False)
args = parser.parse_args()
config, config_module = load_config(args)
if args.config != '':
main = MultiProcessMain(config)
else:
env_cls = load_env(args)
main = MultiProcessMain.no_config_load(env_cls, args.environment, config)
main.start(kill_all=args.killall)
if __name__ == '__main__':
main()
| true | true |
f70f9b444405e0c9fb473ff45e8dc0b8422e10c7 | 2,325 | py | Python | tests/test_action_tag_category_create.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
] | null | null | null | tests/test_action_tag_category_create.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
] | 2 | 2019-03-25T18:03:02.000Z | 2019-03-26T13:13:59.000Z | tests/test_action_tag_category_create.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
] | 1 | 2021-03-05T10:12:21.000Z | 2021-03-05T10:12:21.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import mock
from tag_category_create import CategoryCreate
from vsphere_base_action_test_case import VsphereBaseActionTestCase
__all__ = [
'CategoryCreateTestCase'
]
class CategoryCreateTestCase(VsphereBaseActionTestCase):
__test__ = True
action_cls = CategoryCreate
@mock.patch("vmwarelib.actions.BaseAction.connect_rest")
def test_run(self, mock_connect):
action = self.get_action_instance(self.new_config)
# mock
expected_result = "result"
action.tagging = mock.Mock()
action.tagging.category_create.return_value = expected_result
# define test variables
category_name = "name"
category_description = "test description"
category_cardinality = "SINGLE"
category_types = []
vsphere = "default"
test_kwargs = {
"category_name": category_name,
"category_description": category_description,
"category_cardinality": category_cardinality,
"category_types": category_types,
"vsphere": vsphere
}
# invoke action with valid parameters
result = action.run(**test_kwargs)
self.assertEqual(result, expected_result)
action.tagging.category_create.assert_called_with(category_name,
category_description,
category_cardinality,
category_types)
mock_connect.assert_called_with(vsphere)
| 38.114754 | 79 | 0.669247 |
import mock
from tag_category_create import CategoryCreate
from vsphere_base_action_test_case import VsphereBaseActionTestCase
__all__ = [
'CategoryCreateTestCase'
]
class CategoryCreateTestCase(VsphereBaseActionTestCase):
__test__ = True
action_cls = CategoryCreate
@mock.patch("vmwarelib.actions.BaseAction.connect_rest")
def test_run(self, mock_connect):
action = self.get_action_instance(self.new_config)
expected_result = "result"
action.tagging = mock.Mock()
action.tagging.category_create.return_value = expected_result
category_name = "name"
category_description = "test description"
category_cardinality = "SINGLE"
category_types = []
vsphere = "default"
test_kwargs = {
"category_name": category_name,
"category_description": category_description,
"category_cardinality": category_cardinality,
"category_types": category_types,
"vsphere": vsphere
}
result = action.run(**test_kwargs)
self.assertEqual(result, expected_result)
action.tagging.category_create.assert_called_with(category_name,
category_description,
category_cardinality,
category_types)
mock_connect.assert_called_with(vsphere)
| true | true |
f70f9c881df168564cbf2431bbc2ebdf7e7f7ded | 18,480 | py | Python | tensorflow/contrib/data/python/ops/readers.py | idharmateja/tensorflow | 4e108ef30d7cd7ae5e1c550ec5ae27e79b8c6e39 | [
"Apache-2.0"
] | 13 | 2018-07-23T18:53:35.000Z | 2021-11-18T19:56:45.000Z | tensorflow/contrib/data/python/ops/readers.py | DandelionCN/tensorflow | 1712002ad02f044f7569224bf465e0ea00e6a6c4 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/data/python/ops/readers.py | DandelionCN/tensorflow | 1712002ad02f044f7569224bf465e0ea00e6a6c4 | [
"Apache-2.0"
] | 13 | 2018-09-07T13:28:38.000Z | 2020-07-17T15:06:24.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import deprecation
_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.string)
def make_csv_dataset(
file_pattern,
batch_size,
column_keys,
column_defaults,
label_key=None,
field_delim=",",
use_quote_delim=True,
skip=0,
filter_fn=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=1,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See @{tf.gfile.Glob} for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
column_keys: A list of strings that corresponds to the CSV columns, in
order. One per column of the input record.
column_defaults: A list of default values for the CSV fields. One item per
column of the input record. Each item in the list is either one of the
following dtypes: float32, float64, int32, int64, or string, or a
`Tensor` with one of the aforementioned types. One item per column of
the input record, with either scalar default value for that column if it
is required, or, if the column is required, an empty `Tensor` or a dtype.
label_key: A optional string corresponding to the label column. If provided,
the data for this column is returned as a separate `Tensor` from the
features dictionary, so that the dataset complies with the format expected
by a `tf.Estimator.train` or `tf.Estimator.evaluate` input function.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
skip: An integer that corresponds to the number of lines to skip at the
head of each CSV file. Defaults to 0.
filter_fn: A callable function that takes in a CSV string and returns a
boolean that corresponds to whether the record should be included. If
None, does not filter records.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but would increase memory usage and startup
time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature batches to
prefetch for performance improvement. Recommended value is the number of
batches consumed per training step.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_key`.
"""
filenames = _get_file_names(file_pattern, False)
column_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if label_key is not None:
assert label_key in column_keys
def filename_to_dataset(filename):
ds = core_readers.TextLineDataset(filename)
if skip > 0:
ds = ds.skip(skip)
if filter_fn is not None:
ds = ds.filter(filter_fn)
return ds
def decode_csv(line):
"""Decodes csv line into features.
Args:
line: String tensor corresponding to one csv record.
Returns:
A dictionary of feature names to values for that particular record. If
label_key is provided, extracts the label feature to be returned as the
second element of the tuple.
"""
columns = parsing_ops.decode_csv(
line,
column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim)
features = dict(zip(column_keys, columns))
if label_key is not None:
label = features.pop(label_key)
return features, label
return features
# TODO(rachelim): interleave records from files for better shuffling
dataset = dataset.flat_map(filename_to_dataset)
# TODO(rachelim): use fused shuffle_and_repeat for perf
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
if num_epochs != 1:
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(decode_csv)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
def make_batched_features_dataset(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=1,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step (default is 1).
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
Returns:
A dataset of `dict` elements. Each `dict` maps feature keys to
`Tensor` or `SparseTensor` objects.
"""
# Create dataset of all matching filenames
if shuffle:
dataset = dataset_ops.Dataset.list_files(file_pattern, shuffle=True)
else:
# TODO(b/73959787): Use Dataset.list_files() once ordering is deterministic.
filenames = _get_file_names(file_pattern, shuffle)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
# Read `Example` records from files as tensor objects.
if reader_args is None:
reader_args = []
# Read files sequentially (if reader_num_threads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
sloppy=sloppy_ordering))
# Extract values if the `Example` tensors are stored as key-value tuples.
if dataset.output_types == (dtypes.string, dtypes.string):
dataset = dataset.map(lambda _, v: v)
# Apply dataset repeat and shuffle transformations.
repeat_dataset = (num_epochs != 1)
if repeat_dataset and shuffle:
# Used fused shuffle_and_repeat operation for better performance
dataset = dataset.apply(
shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
shuffle_seed))
elif repeat_dataset:
dataset = dataset.repeat(num_epochs)
elif shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
dataset = dataset.batch(batch_size)
# Parse `Example` tensors to a dictionary of `Feature` tensors.
dataset = dataset.map(
lambda x: parsing_ops.parse_example(x, features),
num_parallel_calls=parser_num_threads)
# TODO(rachelim): Add an optional label_key argument for extracting the label
# from the features dictionary, to comply with the type expected by the
# input_fn to a `tf.Estimator.train` or `tf.Estimator.evaluate` function.
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@deprecation.deprecated(None,
"Use `tf.contrib.data.make_batched_features_dataset`")
def read_batch_features(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
"""Reads batches of Examples.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
reader_args: Additional arguments to pass to the reader class.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever.
capacity: Buffer size of the ShuffleDataset. A large capacity ensures better
shuffling but would increase memory usage and startup time.
Returns:
A dict from keys in features to `Tensor` or `SparseTensor` objects.
"""
dataset = make_batched_features_dataset(
file_pattern,
batch_size,
features,
reader=reader,
reader_args=reader_args,
shuffle=randomize_input,
num_epochs=num_epochs,
shuffle_buffer_size=capacity)
iterator = dataset.make_one_shot_iterator()
outputs = iterator.get_next()
return outputs
def _get_file_names(file_pattern, shuffle):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
shuffle: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
# Sort files so it will be deterministic for unit tests.
if not shuffle:
file_names = sorted(file_names)
return file_names
class SqlDataset(dataset_ops.Dataset):
"""A `Dataset` consisting of the results from a SQL query."""
def __init__(self, driver_name, data_source_name, query, output_types):
"""Creates a `SqlDataset`.
`SqlDataset` allows a user to read data from the result set of a SQL query.
For example:
```python
dataset = tf.contrib.data.SqlDataset("sqlite", "/foo/bar.sqlite3",
"SELECT name, age FROM people",
(tf.string, tf.int32))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the rows of the result set of the above query.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
driver_name: A 0-D `tf.string` tensor containing the database type.
Currently, the only supported value is 'sqlite'.
data_source_name: A 0-D `tf.string` tensor containing a connection string
to connect to the database.
query: A 0-D `tf.string` tensor containing the SQL query to execute.
output_types: A tuple of `tf.DType` objects representing the types of the
columns returned by `query`.
"""
super(SqlDataset, self).__init__()
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sql_dataset(self._driver_name,
self._data_source_name, self._query,
nest.flatten(self.output_types),
nest.flatten(self.output_shapes))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return nest.map_structure(lambda _: tensor_shape.TensorShape([]),
self._output_types)
@property
def output_types(self):
return self._output_types
| 39.152542 | 80 | 0.676082 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import deprecation
_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.string)
def make_csv_dataset(
file_pattern,
batch_size,
column_keys,
column_defaults,
label_key=None,
field_delim=",",
use_quote_delim=True,
skip=0,
filter_fn=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=1,
):
filenames = _get_file_names(file_pattern, False)
column_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if label_key is not None:
assert label_key in column_keys
def filename_to_dataset(filename):
ds = core_readers.TextLineDataset(filename)
if skip > 0:
ds = ds.skip(skip)
if filter_fn is not None:
ds = ds.filter(filter_fn)
return ds
def decode_csv(line):
columns = parsing_ops.decode_csv(
line,
column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim)
features = dict(zip(column_keys, columns))
if label_key is not None:
label = features.pop(label_key)
return features, label
return features
dataset = dataset.flat_map(filename_to_dataset)
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
if num_epochs != 1:
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(decode_csv)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
def make_batched_features_dataset(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=1,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False):
if shuffle:
dataset = dataset_ops.Dataset.list_files(file_pattern, shuffle=True)
else:
filenames = _get_file_names(file_pattern, shuffle)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if reader_args is None:
reader_args = []
dataset = dataset.apply(
interleave_ops.parallel_interleave(
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
sloppy=sloppy_ordering))
if dataset.output_types == (dtypes.string, dtypes.string):
dataset = dataset.map(lambda _, v: v)
repeat_dataset = (num_epochs != 1)
if repeat_dataset and shuffle:
dataset = dataset.apply(
shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
shuffle_seed))
elif repeat_dataset:
dataset = dataset.repeat(num_epochs)
elif shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
dataset = dataset.batch(batch_size)
dataset = dataset.map(
lambda x: parsing_ops.parse_example(x, features),
num_parallel_calls=parser_num_threads)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@deprecation.deprecated(None,
"Use `tf.contrib.data.make_batched_features_dataset`")
def read_batch_features(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
dataset = make_batched_features_dataset(
file_pattern,
batch_size,
features,
reader=reader,
reader_args=reader_args,
shuffle=randomize_input,
num_epochs=num_epochs,
shuffle_buffer_size=capacity)
iterator = dataset.make_one_shot_iterator()
outputs = iterator.get_next()
return outputs
def _get_file_names(file_pattern, shuffle):
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
if not shuffle:
file_names = sorted(file_names)
return file_names
class SqlDataset(dataset_ops.Dataset):
def __init__(self, driver_name, data_source_name, query, output_types):
super(SqlDataset, self).__init__()
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sql_dataset(self._driver_name,
self._data_source_name, self._query,
nest.flatten(self.output_types),
nest.flatten(self.output_shapes))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return nest.map_structure(lambda _: tensor_shape.TensorShape([]),
self._output_types)
@property
def output_types(self):
return self._output_types
| true | true |
f70f9fbd792e49e3bb17519f8daf955fc3b614b8 | 3,041 | py | Python | projects/api/main.py | lborro/projects | c971c2fc65cdb09310d0b3782cd7119203aa4db3 | [
"Apache-2.0"
] | null | null | null | projects/api/main.py | lborro/projects | c971c2fc65cdb09310d0b3782cd7119203aa4db3 | [
"Apache-2.0"
] | null | null | null | projects/api/main.py | lborro/projects | c971c2fc65cdb09310d0b3782cd7119203aa4db3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""WSGI server."""
import argparse
import sys
from flask import Flask, jsonify
from flask_cors import CORS
from werkzeug.exceptions import BadRequest, NotFound, MethodNotAllowed, \
Forbidden, InternalServerError
from projects.api.compare_results import bp as compare_results_blueprint
from projects.api.experiments import bp as experiments_blueprint
from projects.api.json_encoder import CustomJSONEncoder
from projects.api.operators import bp as operators_blueprint
from projects.api.parameters import bp as parameters_blueprint
from projects.api.projects import bp as projects_blueprint
from projects.api.tasks import bp as tasks_blueprint
from projects.api.templates import bp as templates_blueprint
from projects.database import db_session, init_db
from projects.samples import init_tasks
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
app.register_blueprint(projects_blueprint, url_prefix="/projects")
app.register_blueprint(compare_results_blueprint, url_prefix="/projects/<project_id>/comparisons")
app.register_blueprint(experiments_blueprint, url_prefix="/projects/<project_id>/experiments")
app.register_blueprint(tasks_blueprint, url_prefix="/tasks")
app.register_blueprint(parameters_blueprint, url_prefix="/tasks/<task_id>/parameters")
app.register_blueprint(operators_blueprint,
url_prefix="/projects/<project_id>/experiments/<experiment_id>/operators")
app.register_blueprint(templates_blueprint, url_prefix="/templates")
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.route("/", methods=["GET"])
def ping():
"""Handles GET requests to /."""
return "pong"
@app.errorhandler(BadRequest)
@app.errorhandler(NotFound)
@app.errorhandler(MethodNotAllowed)
@app.errorhandler(Forbidden)
@app.errorhandler(InternalServerError)
def handle_errors(e):
"""Handles exceptions raised by the API."""
return jsonify({"message": e.description}), e.code
def parse_args(args):
"""Takes argv and parses API options."""
parser = argparse.ArgumentParser(
description="Projects API"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--enable-cors", action="count")
parser.add_argument(
"--debug", action="count", help="Enable debug"
)
parser.add_argument(
"--init-db", action="count", help="Create database and tables before the HTTP server starts"
)
parser.add_argument(
"--samples-config", help="Path to sample tasks config file."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
# Enable CORS if required
if args.enable_cors:
CORS(app)
# Initializes DB if required
if args.init_db:
init_db()
# Install sample tasks if required
if args.samples_config:
init_tasks(args.samples_config)
app.run(host="0.0.0.0", port=args.port, debug=args.debug)
| 33.054348 | 100 | 0.74515 |
import argparse
import sys
from flask import Flask, jsonify
from flask_cors import CORS
from werkzeug.exceptions import BadRequest, NotFound, MethodNotAllowed, \
Forbidden, InternalServerError
from projects.api.compare_results import bp as compare_results_blueprint
from projects.api.experiments import bp as experiments_blueprint
from projects.api.json_encoder import CustomJSONEncoder
from projects.api.operators import bp as operators_blueprint
from projects.api.parameters import bp as parameters_blueprint
from projects.api.projects import bp as projects_blueprint
from projects.api.tasks import bp as tasks_blueprint
from projects.api.templates import bp as templates_blueprint
from projects.database import db_session, init_db
from projects.samples import init_tasks
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
app.register_blueprint(projects_blueprint, url_prefix="/projects")
app.register_blueprint(compare_results_blueprint, url_prefix="/projects/<project_id>/comparisons")
app.register_blueprint(experiments_blueprint, url_prefix="/projects/<project_id>/experiments")
app.register_blueprint(tasks_blueprint, url_prefix="/tasks")
app.register_blueprint(parameters_blueprint, url_prefix="/tasks/<task_id>/parameters")
app.register_blueprint(operators_blueprint,
url_prefix="/projects/<project_id>/experiments/<experiment_id>/operators")
app.register_blueprint(templates_blueprint, url_prefix="/templates")
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.route("/", methods=["GET"])
def ping():
return "pong"
@app.errorhandler(BadRequest)
@app.errorhandler(NotFound)
@app.errorhandler(MethodNotAllowed)
@app.errorhandler(Forbidden)
@app.errorhandler(InternalServerError)
def handle_errors(e):
return jsonify({"message": e.description}), e.code
def parse_args(args):
parser = argparse.ArgumentParser(
description="Projects API"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--enable-cors", action="count")
parser.add_argument(
"--debug", action="count", help="Enable debug"
)
parser.add_argument(
"--init-db", action="count", help="Create database and tables before the HTTP server starts"
)
parser.add_argument(
"--samples-config", help="Path to sample tasks config file."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
if args.enable_cors:
CORS(app)
if args.init_db:
init_db()
if args.samples_config:
init_tasks(args.samples_config)
app.run(host="0.0.0.0", port=args.port, debug=args.debug)
| true | true |
f70fa0d1d8aad41a969517b69bc42c6dd87cbd53 | 2,411 | py | Python | codes/preprocess/collect_noise.py | yicrane/Real-SR | a6e380b791129b80fe58bf282089c0cfd9159b36 | [
"Apache-2.0"
] | null | null | null | codes/preprocess/collect_noise.py | yicrane/Real-SR | a6e380b791129b80fe58bf282089c0cfd9159b36 | [
"Apache-2.0"
] | null | null | null | codes/preprocess/collect_noise.py | yicrane/Real-SR | a6e380b791129b80fe58bf282089c0cfd9159b36 | [
"Apache-2.0"
] | 1 | 2021-07-07T13:56:30.000Z | 2021-07-07T13:56:30.000Z | from PIL import Image
import numpy as np
import os.path as osp
import glob
import os
import argparse
import yaml
parser = argparse.ArgumentParser(description='create a dataset')
parser.add_argument('--dataset', default='df2k', type=str, help='selecting different datasets')
parser.add_argument('--artifacts', default='', type=str, help='selecting different artifacts type')
parser.add_argument('--cleanup_factor', default=2, type=int, help='downscaling factor for image cleanup')
parser.add_argument('--upscale_factor', default=4, type=int, choices=[4], help='super resolution upscale factor')
opt = parser.parse_args()
# define input and target directories
with open('./preprocess/paths.yml', 'r') as stream:
PATHS = yaml.load(stream)
def noise_patch(rgb_img, sp, max_var, min_mean):
img = rgb_img.convert('L')
rgb_img = np.array(rgb_img)
img = np.array(img)
w, h = img.shape
collect_patchs = []
for i in range(0, w - sp, sp):
for j in range(0, h - sp, sp):
patch = img[i:i + sp, j:j + sp]
var_global = np.var(patch)
mean_global = np.mean(patch)
if var_global < max_var and mean_global > min_mean:
rgb_patch = rgb_img[i:i + sp, j:j + sp, :]
collect_patchs.append(rgb_patch)
return collect_patchs
if __name__ == '__main__':
if opt.dataset == 'df2k':
img_dir = PATHS[opt.dataset][opt.artifacts]['source']
noise_dir = PATHS['datasets']['df2k'] + '/Corrupted_noise'
sp = 256
max_var = 20
min_mean = 0
else:
img_dir = PATHS[opt.dataset][opt.artifacts]['hr']['train']
noise_dir = PATHS['datasets']['dped'] + '/DPEDiphone_noise_sp32v20m50'
sp = 256
max_var = 20
min_mean = 50
assert not os.path.exists(noise_dir)
os.mkdir(noise_dir)
img_paths = sorted(glob.glob(osp.join(img_dir, '*.png')))
cnt = 0
for path in img_paths:
img_name = osp.splitext(osp.basename(path))[0]
print('**********', img_name, '**********')
img = Image.open(path).convert('RGB')
patchs = noise_patch(img, sp, max_var, min_mean)
for idx, patch in enumerate(patchs):
save_path = osp.join(noise_dir, '{}_{:03}.png'.format(img_name, idx))
cnt += 1
print('collect:', cnt, save_path)
Image.fromarray(patch).save(save_path)
| 33.957746 | 113 | 0.623393 | from PIL import Image
import numpy as np
import os.path as osp
import glob
import os
import argparse
import yaml
parser = argparse.ArgumentParser(description='create a dataset')
parser.add_argument('--dataset', default='df2k', type=str, help='selecting different datasets')
parser.add_argument('--artifacts', default='', type=str, help='selecting different artifacts type')
parser.add_argument('--cleanup_factor', default=2, type=int, help='downscaling factor for image cleanup')
parser.add_argument('--upscale_factor', default=4, type=int, choices=[4], help='super resolution upscale factor')
opt = parser.parse_args()
with open('./preprocess/paths.yml', 'r') as stream:
PATHS = yaml.load(stream)
def noise_patch(rgb_img, sp, max_var, min_mean):
img = rgb_img.convert('L')
rgb_img = np.array(rgb_img)
img = np.array(img)
w, h = img.shape
collect_patchs = []
for i in range(0, w - sp, sp):
for j in range(0, h - sp, sp):
patch = img[i:i + sp, j:j + sp]
var_global = np.var(patch)
mean_global = np.mean(patch)
if var_global < max_var and mean_global > min_mean:
rgb_patch = rgb_img[i:i + sp, j:j + sp, :]
collect_patchs.append(rgb_patch)
return collect_patchs
if __name__ == '__main__':
if opt.dataset == 'df2k':
img_dir = PATHS[opt.dataset][opt.artifacts]['source']
noise_dir = PATHS['datasets']['df2k'] + '/Corrupted_noise'
sp = 256
max_var = 20
min_mean = 0
else:
img_dir = PATHS[opt.dataset][opt.artifacts]['hr']['train']
noise_dir = PATHS['datasets']['dped'] + '/DPEDiphone_noise_sp32v20m50'
sp = 256
max_var = 20
min_mean = 50
assert not os.path.exists(noise_dir)
os.mkdir(noise_dir)
img_paths = sorted(glob.glob(osp.join(img_dir, '*.png')))
cnt = 0
for path in img_paths:
img_name = osp.splitext(osp.basename(path))[0]
print('**********', img_name, '**********')
img = Image.open(path).convert('RGB')
patchs = noise_patch(img, sp, max_var, min_mean)
for idx, patch in enumerate(patchs):
save_path = osp.join(noise_dir, '{}_{:03}.png'.format(img_name, idx))
cnt += 1
print('collect:', cnt, save_path)
Image.fromarray(patch).save(save_path)
| true | true |
f70fa0e5384e444c718b501e35ff39db46d5b99a | 6,872 | py | Python | pennylane/transforms/__init__.py | XanaduAI/pennylane | 0620b8a8bb56ff55bfc2130619fa0a5a1af2b2a4 | [
"Apache-2.0"
] | 539 | 2018-11-13T08:45:42.000Z | 2020-07-27T18:17:16.000Z | pennylane/transforms/__init__.py | XanaduAI/pennylane | 0620b8a8bb56ff55bfc2130619fa0a5a1af2b2a4 | [
"Apache-2.0"
] | 588 | 2018-11-14T10:21:47.000Z | 2020-07-28T06:27:14.000Z | pennylane/transforms/__init__.py | XanaduAI/pennylane | 0620b8a8bb56ff55bfc2130619fa0a5a1af2b2a4 | [
"Apache-2.0"
] | 165 | 2018-11-13T18:58:56.000Z | 2020-07-27T17:18:17.000Z | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This subpackage contains QNode, quantum function, device, and tape transforms.
.. currentmodule:: pennylane
Transforms
----------
Transforms that act on QNodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept QNodes, and return new transformed functions
that compute the desired quantity.
.. autosummary::
:toctree: api
~transforms.classical_jacobian
~batch_params
~batch_input
~metric_tensor
~adjoint_metric_tensor
~specs
~transforms.mitigate_with_zne
~transforms.split_non_commuting
Transforms that act on quantum functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept quantum functions (Python functions
containing quantum operations) that are used to construct QNodes.
.. autosummary::
:toctree: api
~adjoint
~ctrl
~transforms.cond
~defer_measurements
~apply_controlled_Q
~quantum_monte_carlo
~transforms.insert
Transforms for circuit compilation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This set of transforms accept quantum functions, and perform basic circuit compilation tasks.
.. autosummary::
:toctree: api
~compile
~transforms.cancel_inverses
~transforms.commute_controlled
~transforms.merge_rotations
~transforms.single_qubit_fusion
~transforms.unitary_to_rot
~transforms.merge_amplitude_embedding
~transforms.remove_barrier
~transforms.undo_swaps
~transforms.pattern_matching_optimization
~transforms.transpile
There are also utility functions and decompositions available that assist with
both transforms, and decompositions within the larger PennyLane codebase.
.. autosummary::
:toctree: api
~transforms.zyz_decomposition
~transforms.two_qubit_decomposition
~transforms.set_decomposition
~transforms.simplify
~transforms.pattern_matching
There are also utility functions that take a circuit and return a DAG.
.. autosummary::
:toctree: api
~transforms.commutation_dag
~transforms.CommutationDAG
~transforms.CommutationDAGNode
Transform for circuit cutting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :func:`~.cut_circuit` transform accepts a QNode and returns a new function that cuts the original circuit,
allowing larger circuits to be split into smaller circuits that are compatible with devices that
have a restricted number of qubits.
.. autosummary::
:toctree: api
~cut_circuit
The :func:`~.cut_circuit_mc` transform is designed to be used for cutting circuits which contain :func:`~.sample`
measurements and is implemented using a Monte Carlo method. Similarly to the :func:`~.cut_circuit`
transform, this transform accepts a QNode and returns a new function that cuts the original circuit.
This transform can also accept an optional classical processing function to calculate an
expectation value.
.. autosummary::
:toctree: api
~cut_circuit_mc
There are also low-level functions that can be used to build up the circuit cutting functionalities:
.. autosummary::
:toctree: api
~transforms.qcut.tape_to_graph
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tape
~transforms.qcut.expand_fragment_tapes_mc
~transforms.qcut.qcut_processing_fn
~transforms.qcut.qcut_processing_fn_sample
~transforms.qcut.qcut_processing_fn_mc
~transforms.qcut.CutStrategy
~transforms.qcut.kahypar_cut
~transforms.qcut.place_wire_cuts
~transforms.qcut.find_and_place_cuts
Transforms that act on tapes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept quantum tapes, and return one or
more tapes as well as a classical processing function.
.. autosummary::
:toctree: api
~transforms.measurement_grouping
~transforms.hamiltonian_expand
Decorators and utility functions
--------------------------------
The following decorators and convenience functions are provided
to help build custom QNode, quantum function, and tape transforms:
.. autosummary::
:toctree: api
~single_tape_transform
~batch_transform
~qfunc_transform
~op_transform
~transforms.make_tape
~transforms.map_batch_transform
~transforms.create_expand_fn
~transforms.create_decomp_expand_fn
~transforms.expand_invalid_trainable
~transforms.expand_multipar
~transforms.expand_trainable_multipar
~transforms.expand_nonunitary_gen
"""
# Import the decorators first to prevent circular imports when used in other transforms
from .batch_transform import batch_transform, map_batch_transform
from .qfunc_transforms import make_tape, single_tape_transform, qfunc_transform
from .op_transforms import op_transform
from .adjoint import adjoint
from .batch_params import batch_params
from .batch_input import batch_input
from .classical_jacobian import classical_jacobian
from .condition import cond, Conditional
from .compile import compile
from .control import ControlledOperation, ctrl
from .decompositions import zyz_decomposition, two_qubit_decomposition
from .defer_measurements import defer_measurements
from .hamiltonian_expand import hamiltonian_expand
from .split_non_commuting import split_non_commuting
from .measurement_grouping import measurement_grouping
from .metric_tensor import metric_tensor
from .adjoint_metric_tensor import adjoint_metric_tensor
from .insert_ops import insert
from .mitigate import mitigate_with_zne
from .optimization import (
cancel_inverses,
commute_controlled,
merge_rotations,
single_qubit_fusion,
merge_amplitude_embedding,
remove_barrier,
undo_swaps,
pattern_matching,
pattern_matching_optimization,
)
from .specs import specs
from .qmc import apply_controlled_Q, quantum_monte_carlo
from .unitary_to_rot import unitary_to_rot
from .commutation_dag import (
commutation_dag,
is_commuting,
CommutationDAG,
CommutationDAGNode,
simplify,
)
from .tape_expand import (
expand_invalid_trainable,
expand_multipar,
expand_nonunitary_gen,
expand_trainable_multipar,
create_expand_fn,
create_decomp_expand_fn,
set_decomposition,
)
from .transpile import transpile
from . import qcut
from .qcut import cut_circuit, cut_circuit_mc
| 30.008734 | 113 | 0.766589 |
from .batch_transform import batch_transform, map_batch_transform
from .qfunc_transforms import make_tape, single_tape_transform, qfunc_transform
from .op_transforms import op_transform
from .adjoint import adjoint
from .batch_params import batch_params
from .batch_input import batch_input
from .classical_jacobian import classical_jacobian
from .condition import cond, Conditional
from .compile import compile
from .control import ControlledOperation, ctrl
from .decompositions import zyz_decomposition, two_qubit_decomposition
from .defer_measurements import defer_measurements
from .hamiltonian_expand import hamiltonian_expand
from .split_non_commuting import split_non_commuting
from .measurement_grouping import measurement_grouping
from .metric_tensor import metric_tensor
from .adjoint_metric_tensor import adjoint_metric_tensor
from .insert_ops import insert
from .mitigate import mitigate_with_zne
from .optimization import (
cancel_inverses,
commute_controlled,
merge_rotations,
single_qubit_fusion,
merge_amplitude_embedding,
remove_barrier,
undo_swaps,
pattern_matching,
pattern_matching_optimization,
)
from .specs import specs
from .qmc import apply_controlled_Q, quantum_monte_carlo
from .unitary_to_rot import unitary_to_rot
from .commutation_dag import (
commutation_dag,
is_commuting,
CommutationDAG,
CommutationDAGNode,
simplify,
)
from .tape_expand import (
expand_invalid_trainable,
expand_multipar,
expand_nonunitary_gen,
expand_trainable_multipar,
create_expand_fn,
create_decomp_expand_fn,
set_decomposition,
)
from .transpile import transpile
from . import qcut
from .qcut import cut_circuit, cut_circuit_mc
| true | true |
f70fa191b2427b7e00ffd7084a2710be0b35e10c | 10,735 | py | Python | src/m3_calling_functions_returning_values.py | DavidMutchler/03-AccumulatorsAndFunctionsWithParameters | 548b9a527357e4a18c6ab3e0cc84c907c6e33d87 | [
"MIT"
] | null | null | null | src/m3_calling_functions_returning_values.py | DavidMutchler/03-AccumulatorsAndFunctionsWithParameters | 548b9a527357e4a18c6ab3e0cc84c907c6e33d87 | [
"MIT"
] | null | null | null | src/m3_calling_functions_returning_values.py | DavidMutchler/03-AccumulatorsAndFunctionsWithParameters | 548b9a527357e4a18c6ab3e0cc84c907c6e33d87 | [
"MIT"
] | 66 | 2018-03-08T12:57:23.000Z | 2020-11-09T18:59:08.000Z | """
This module demonstrates and practices:
-- using ARGUMENTs in function CALLs,
-- having PARAMETERs in function DEFINITIONs, and
-- RETURNING a value from a function,
possibly CAPTURING the RETURNED VALUE in a VARIABLE.
-- UNIT TESTING.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import m3t_tester
def main():
""" Calls the TEST functions in this module. """
run_test_sum_of_digits()
run_test_digits_in_cube()
run_test_digits_in_power()
run_test_fancy_sums_of_digits()
# ------------------------------------------------------------------
# TODO: 9. DO THIS LAST!
# -- Uncomment the line of code below to run the main function
# in m3t_tester.py (do not make changes to it).
# It runs OUR tests on your code.
# -- Check to see whether all test cases indicate they
# "COMPLETED SUCCESSFULLY!"
# -- If your code fails any of OUR tests but passes YOUR tests,
# then you are likely not TESTING the methods correctly.
# ** Ask a TA or your professor for help in that case. **
# ------------------------------------------------------------------
# m3t_tester.main()
def run_test_sum_of_digits():
""" Tests the sum_of_digits function. """
# ------------------------------------------------------------------
# TODO: 2. Implement this TEST function, as follows:
#
# Step 1: This TEST function tests the sum_of_digits function.
# So read the doc-string of the sum_of_digits function
# defined below. Be sure that you understand from the
# doc-string what the sum_of_digits function SHOULD return.
#
# Step 2: Pick a test case: a number that you could send as
# an actual argument to the sum_of_digits function.
# - For example, you could pick the test case 826.
#
# Step 3: Figure out the CORRECT (EXPECTED) answer for your
# test case. In the example of 826 the correct answer
# for the sum of its digits is 8 + 2 + 6, which is 16.
#
# Step 4: Write code that prints both the EXPECTED answer
# and the ACTUAL answer returned when you call the function.
# See the example below.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_of_digits function:')
print('--------------------------------------------------')
# Test 1:
expected = 16
answer = sum_of_digits(826)
print('Test 1 expected:', expected)
print(' actual: ', answer)
# ------------------------------------------------------------------
# TO DO: 2 (continued).
# Below this comment, add 3 more test cases of your own choosing.
# ------------------------------------------------------------------
def sum_of_digits(number):
"""
What comes in: An integer.
What goes out: The sum of the digits in the given integer.
Side effects: None.
Example:
If the integer is 83135,
this function returns (8 + 3 + 1 + 3 + 5), which is 20.
"""
# ------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
#
# Ask for help if you are unsure what it means to CALL a function.
# The ONLY part of this function that you need to understand is
# the doc-string above. Treat this function as a black box.
# ------------------------------------------------------------------
if number < 0:
number = -number
digit_sum = 0
while True:
if number == 0:
break
digit_sum = digit_sum + (number % 10)
number = number // 10
return digit_sum
def run_test_digits_in_cube():
""" Tests the digits_in_cube function. """
# ------------------------------------------------------------------
# TODO: 3. Implement this function.
# It TESTS the digits_in_cube function defined below.
# Include at least ** 3 ** tests.
#
# To implement this TEST function, use the same 4 steps as above:
#
# Step 1: Read the doc-string of digits_in_cube below.
# Understand what that function SHOULD return.
#
# Step 2: Pick a test case: a number(s) that you could send as
# actual argument(s) to the digits_in_cube function.
#
# Step 3: Figure out the CORRECT (EXPECTED) answer for your test case.
#
# Step 4: Write code that prints both the EXPECTED answer
# and the ACTUAL answer returned when you call the function.
# Follow the same form as in previous examples.
#
# Include at least ** 3 ** tests.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------')
print('Testing the digits_in_cube function:')
print('-----------------------------------------------------')
def digits_in_cube(n):
"""
What comes in: A positive integer.
What goes out: The sum of the digits in the CUBE of the integer.
Side effects: None.
Example:
If the integer (n) is 5 (so n cubed is 125),
this function returns (1 + 2 + 5), which is 8.
"""
# ------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
####################################################################
# IMPORTANT: CALL, as many times as needed,
# the sum_of_digits function that is DEFINED ABOVE.
####################################################################
# ------------------------------------------------------------------
def run_test_digits_in_power():
""" Tests the digits_in_power function. """
# ------------------------------------------------------------------
# TODO: 5. Implement this function.
# It TESTS the digits_in_power function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the digits_in_power function:')
print('--------------------------------------------------')
def digits_in_power(n, k):
"""
What comes in: Two positive integers, n and k.
What goes out:
The sum of the digits in x, where x is n raised to the kth power.
Side effects: None.
Example:
If the arguments are 12 and 3, respectively,
this function returns 18
since 12 to the 3rd power is 1728 (whose digits sum to 18).
"""
# ------------------------------------------------------------------
# TODO: 6. Implement and test this function.
#
####################################################################
# IMPORTANT: CALL, as many times as needed,
# the sum_of_digits function that is DEFINED ABOVE.
####################################################################
# ------------------------------------------------------------------
def run_test_fancy_sums_of_digits():
""" Tests the fancy_sums_of_digits function. """
# ------------------------------------------------------------------
# TODO: 7. Implement this function.
# It TESTS the fancy_sums_of_digits function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing the previous
# TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the fancy_sums_of_digits function:')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# HINT: For your 1st test, consider n=10. Figure out BY HAND
# the correct (expected) answer for that test case. (It's easy.)
# The doc-string below gives test cases you can use for
# your 2nd and 3rd tests but READ THOSE TEST CASES CAREFULLY
# in the doc-string to be sure that you understand the specification.
# ------------------------------------------------------------------
def fancy_sums_of_digits(n):
"""
What comes in: A positive integer n.
What goes out:
-- Let X denote the sum of the digits in (n ** 1000).
-- Let Y denote the sum of the digits in (n ** 999).
This function RETURNs the sum of the digits in (X ** Y).
Side effects: None.
Examples:
-- If n is 2, then:
-- the sum of the digits in n ** 1000 is 1366 (trust me!).
-- the sum of the digits in n ** 999 is 1367 (trust me!).
-- so X ** Y is VERY LARGE in this case
(don't try to print it!)
-- the sum of the digits in (X ** Y) is 19084 (trust me!)
-- so this function returns 19084.
-- If n is 35, then:
-- the sum of the digits in n ** 1000 is 7021 (trust me!).
-- the sum of the digits in n ** 999 is 7145 (trust me!).
-- so X ** Y is VERY LARGE in this case
(don't try to print it!)
-- the sum of the digits in (X ** Y) is 124309 (trust me!)
-- so this function returns 124309.
"""
# ------------------------------------------------------------------
# TODO: 8. Implement and test this function.
#
####################################################################
# IMPORTANT: CALL, as many times as needed,
# the sum_of_digits function that is DEFINED ABOVE.
####################################################################
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# This unusual form is necessary for the special testing we provided.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| 41.447876 | 77 | 0.469213 |
import m3t_tester
def main():
run_test_sum_of_digits()
run_test_digits_in_cube()
run_test_digits_in_power()
run_test_fancy_sums_of_digits()
def run_test_sum_of_digits():
print()
print('--------------------------------------------------')
print('Testing the sum_of_digits function:')
print('--------------------------------------------------')
expected = 16
answer = sum_of_digits(826)
print('Test 1 expected:', expected)
print(' actual: ', answer)
def sum_of_digits(number):
if number < 0:
number = -number
digit_sum = 0
while True:
if number == 0:
break
digit_sum = digit_sum + (number % 10)
number = number // 10
return digit_sum
def run_test_digits_in_cube():
print()
print('-----------------------------------------------------')
print('Testing the digits_in_cube function:')
print('-----------------------------------------------------')
def digits_in_cube(n):
| true | true |
f70fa2944e0e3fcde401f3381a70b6633f36db93 | 5,384 | py | Python | indieauth/views.py | wivn/feed-reader | 1b4524fcdfc79391a5cf982ce9c5681e600f4303 | [
"MIT"
] | null | null | null | indieauth/views.py | wivn/feed-reader | 1b4524fcdfc79391a5cf982ce9c5681e600f4303 | [
"MIT"
] | null | null | null | indieauth/views.py | wivn/feed-reader | 1b4524fcdfc79391a5cf982ce9c5681e600f4303 | [
"MIT"
] | null | null | null | from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
from urllib.parse import urlencode, unquote
import requests
from bs4 import BeautifulSoup
from django.utils.crypto import get_random_string
from django.contrib import messages
from urllib.parse import urlparse, urljoin
from django.contrib.auth import get_user_model
from django.contrib.auth import login as login_auth
def redirect_logged_in_users(function):
def _function(request,*args, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("feed:index"))
return function(request, *args, **kwargs)
return _function
@redirect_logged_in_users
def index(request):
cleanup(request)
return render(request, 'indieauth/index.html', {})
def login(request):
try:
if request.method == 'POST':
site = request.POST.get("site", None)
url_data = urlparse(site)
if site and url_data.netloc != '' and (url_data.scheme == 'http' or url_data.scheme == 'https'):
if url_data.path == '':
site = site + '/'
print(site)
r = requests.get(site)
soup = BeautifulSoup(r.text, 'html.parser')
unique_id = get_random_string(length=32)
for link in soup.find_all('link'):
if link.get('rel')[0] == "authorization_endpoint":
authorization_endpoint = link.get('href')
# if relative URL, this will attach it to the end of the redirected url
authorization_endpoint = urljoin(r.url, authorization_endpoint)
if r.headers.get('Link', None):
links = r.headers['Link']
print(links)
for link in links.split(","):
possible_url = link.split(";")[0].strip()
possible_url = possible_url[1:len(possible_url)-1]
possible_rel = link.split(";")[1].strip()
if possible_rel == "rel=authorization_endpoint":
authorization_endpoint = urljoin(r.url, possible_url)
# after redirects, the final URL will be contained in the response
site = r.url
print(r.history)
searchHistory = True
i = -1
# ensure that if there's temp redirects that the "me" url is always the last permanent redirect
while searchHistory and (i*-1) <= len(r.history):
history_piece = r.history[i]
if history_piece.status_code == 301:
site = history_piece.url
i -= 1
# If ALL of them are temporary redirects than use the initial value
if all(i.status_code == 302 for i in r.history):
site = request.POST.get("site", None)
if authorization_endpoint:
request.session['authorization_endpoint']=authorization_endpoint
request.session['client_id'] = site
request.session['state'] = unique_id
payload = {'me': site,
'redirect_uri': request.build_absolute_uri(reverse('indieauth:redirect')),
'client_id': f'{request.scheme}://{ request.get_host() }/indieauth/application_info',
'state': unique_id,
'response_type': 'id'}
redirect_site = authorization_endpoint + "?" + urlencode(payload)
return HttpResponseRedirect(redirect_site)
else:
cleanup(request)
messages.error(request, 'No authorization_endpoint found.')
return HttpResponseRedirect(reverse('indieauth:index'))
except Exception as e:
print(e)
messages.error(request, 'Error in retrieving url.')
return HttpResponseRedirect(reverse('indieauth:index'))
messages.error(request, 'No site submitted or the URL submitted was not valid.')
return HttpResponseRedirect(reverse('indieauth:index'))
def redirect(request):
if request.GET.get('state', None) == request.session.get('state', None) and request.session.get('state', None) != None:
client_id = request.session['client_id']
authorization_endpoint = request.session['authorization_endpoint']
redirect_uri = request.build_absolute_uri(reverse('indieauth:redirect'))
code = request.GET.get('code')
r = requests.post(authorization_endpoint, data = {'code':code, 'client_id':client_id, 'redirect_uri': redirect_uri})
if r.headers['content-type'] == "application/x-www-form-urlencoded":
user_site = unquote(r.text)[3:]
elif r.headers['content-type'] == "application/json":
user_site = r.text['me']
else:
user_site = None
user_site_matches_domain = urlparse(client_id).netloc == urlparse(user_site).netloc
print(urlparse(client_id).netloc, urlparse(user_site).netloc)
if r.status_code == 200 and user_site and user_site_matches_domain:
messages.success(request, 'Your URL is: ' + user_site)
user_model = get_user_model()
user = user_model.objects.filter(site=user_site)
if user:
login_auth(request, user[0])
else:
user = user_model.objects.create_user(username=user_site, site=user_site)
user.set_unusable_password()
login_auth(request, user)
cleanup(request)
return HttpResponseRedirect(reverse('feed:index'))
else:
messages.error(request, 'Error in URL. Please try again.')
cleanup(request)
return HttpResponseRedirect(reverse('indieauth:index'))
else:
messages.error(request, 'Major error. Likely timeout. Please try again.')
cleanup(request)
return HttpResponseRedirect(reverse('indieauth:index'))
def cleanup(request):
try:
del request.session['authorization_endpoint']
del request.session['state']
del request.session['client_id']
except KeyError:
pass
def application_info(request):
return render(request, "indieauth/application_info.html") | 40.481203 | 120 | 0.717125 | from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
from urllib.parse import urlencode, unquote
import requests
from bs4 import BeautifulSoup
from django.utils.crypto import get_random_string
from django.contrib import messages
from urllib.parse import urlparse, urljoin
from django.contrib.auth import get_user_model
from django.contrib.auth import login as login_auth
def redirect_logged_in_users(function):
def _function(request,*args, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("feed:index"))
return function(request, *args, **kwargs)
return _function
@redirect_logged_in_users
def index(request):
cleanup(request)
return render(request, 'indieauth/index.html', {})
def login(request):
try:
if request.method == 'POST':
site = request.POST.get("site", None)
url_data = urlparse(site)
if site and url_data.netloc != '' and (url_data.scheme == 'http' or url_data.scheme == 'https'):
if url_data.path == '':
site = site + '/'
print(site)
r = requests.get(site)
soup = BeautifulSoup(r.text, 'html.parser')
unique_id = get_random_string(length=32)
for link in soup.find_all('link'):
if link.get('rel')[0] == "authorization_endpoint":
authorization_endpoint = link.get('href')
authorization_endpoint = urljoin(r.url, authorization_endpoint)
if r.headers.get('Link', None):
links = r.headers['Link']
print(links)
for link in links.split(","):
possible_url = link.split(";")[0].strip()
possible_url = possible_url[1:len(possible_url)-1]
possible_rel = link.split(";")[1].strip()
if possible_rel == "rel=authorization_endpoint":
authorization_endpoint = urljoin(r.url, possible_url)
site = r.url
print(r.history)
searchHistory = True
i = -1
while searchHistory and (i*-1) <= len(r.history):
history_piece = r.history[i]
if history_piece.status_code == 301:
site = history_piece.url
i -= 1
# If ALL of them are temporary redirects than use the initial value
if all(i.status_code == 302 for i in r.history):
site = request.POST.get("site", None)
if authorization_endpoint:
request.session['authorization_endpoint']=authorization_endpoint
request.session['client_id'] = site
request.session['state'] = unique_id
payload = {'me': site,
'redirect_uri': request.build_absolute_uri(reverse('indieauth:redirect')),
'client_id': f'{request.scheme}://{ request.get_host() }/indieauth/application_info',
'state': unique_id,
'response_type': 'id'}
redirect_site = authorization_endpoint + "?" + urlencode(payload)
return HttpResponseRedirect(redirect_site)
else:
cleanup(request)
messages.error(request, 'No authorization_endpoint found.')
return HttpResponseRedirect(reverse('indieauth:index'))
except Exception as e:
print(e)
messages.error(request, 'Error in retrieving url.')
return HttpResponseRedirect(reverse('indieauth:index'))
messages.error(request, 'No site submitted or the URL submitted was not valid.')
return HttpResponseRedirect(reverse('indieauth:index'))
def redirect(request):
if request.GET.get('state', None) == request.session.get('state', None) and request.session.get('state', None) != None:
client_id = request.session['client_id']
authorization_endpoint = request.session['authorization_endpoint']
redirect_uri = request.build_absolute_uri(reverse('indieauth:redirect'))
code = request.GET.get('code')
r = requests.post(authorization_endpoint, data = {'code':code, 'client_id':client_id, 'redirect_uri': redirect_uri})
if r.headers['content-type'] == "application/x-www-form-urlencoded":
user_site = unquote(r.text)[3:]
elif r.headers['content-type'] == "application/json":
user_site = r.text['me']
else:
user_site = None
user_site_matches_domain = urlparse(client_id).netloc == urlparse(user_site).netloc
print(urlparse(client_id).netloc, urlparse(user_site).netloc)
if r.status_code == 200 and user_site and user_site_matches_domain:
messages.success(request, 'Your URL is: ' + user_site)
user_model = get_user_model()
user = user_model.objects.filter(site=user_site)
if user:
login_auth(request, user[0])
else:
user = user_model.objects.create_user(username=user_site, site=user_site)
user.set_unusable_password()
login_auth(request, user)
cleanup(request)
return HttpResponseRedirect(reverse('feed:index'))
else:
messages.error(request, 'Error in URL. Please try again.')
cleanup(request)
return HttpResponseRedirect(reverse('indieauth:index'))
else:
messages.error(request, 'Major error. Likely timeout. Please try again.')
cleanup(request)
return HttpResponseRedirect(reverse('indieauth:index'))
def cleanup(request):
try:
del request.session['authorization_endpoint']
del request.session['state']
del request.session['client_id']
except KeyError:
pass
def application_info(request):
return render(request, "indieauth/application_info.html") | true | true |
f70fa2ec4e8ddf2e3938910c093b9fd6af4215c2 | 8,957 | py | Python | mpf/devices/switch.py | cloudjor/mpf | 1cf6bf18b0d81120383b0b128b0ebbfa1c62717c | [
"MIT"
] | null | null | null | mpf/devices/switch.py | cloudjor/mpf | 1cf6bf18b0d81120383b0b128b0ebbfa1c62717c | [
"MIT"
] | null | null | null | mpf/devices/switch.py | cloudjor/mpf | 1cf6bf18b0d81120383b0b128b0ebbfa1c62717c | [
"MIT"
] | null | null | null | """Contains the Switch parent class."""
import asyncio
from functools import partial
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.machine import MachineController
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
from mpf.core.platform import SwitchConfig
from mpf.devices.device_mixins import DevicePositionMixin
MYPY = False
if MYPY: # pragma: no cover
from mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface
from mpf.core.platform import SwitchPlatform
@DeviceMonitor("state", "recycle_jitter_count")
class Switch(SystemWideDevice, DevicePositionMixin):
"""A switch in a pinball machine."""
config_section = 'switches'
collection = 'switches'
class_label = 'switch'
__slots__ = ["hw_switch", "platform", "state", "hw_state", "invert", "recycle_secs", "recycle_clear_time",
"recycle_jitter_count", "_events_to_post", "last_change"]
def __init__(self, machine: MachineController, name: str) -> None:
"""Initialise switch."""
self.hw_switch = None # type: SwitchPlatformInterface
self.platform = None # type: SwitchPlatform
super().__init__(machine, name)
self.state = 0
""" The logical state of a switch. 1 = active, 0 = inactive. This takes
into consideration the NC or NO settings for the switch."""
self.hw_state = 0
""" The physical hardware state of the switch. 1 = active,
0 = inactive. This is what the actual hardware is reporting and does
not consider whether a switch is NC or NO."""
self.invert = 0
self.recycle_secs = 0
self.recycle_clear_time = None
self.recycle_jitter_count = 0
self._events_to_post = {0: [], 1: []}
self.last_change = -100000
# register switch so other devices can add handlers to it
self.machine.switch_controller.register_switch(self)
@classmethod
def device_class_init(cls, machine: MachineController):
"""Register handler for duplicate switch number checks."""
machine.events.add_handler("init_phase_4",
cls._check_duplicate_switch_numbers,
machine=machine)
@staticmethod
def _check_duplicate_switch_numbers(machine, **kwargs):
del kwargs
check_set = set()
for switch in machine.switches:
key = (switch.platform, switch.hw_switch.number)
if key in check_set:
raise AssertionError(
"Duplicate switch number {} for switch {}".format(
switch.hw_switch.number, switch))
check_set.add(key)
def validate_and_parse_config(self, config, is_mode_config, debug_prefix: str = None):
"""Validate switch config."""
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
platform = self.machine.get_platform_sections(
'switches', getattr(config, "platform", None))
config['platform_settings'] = platform.validate_switch_section(
self, config.get('platform_settings', None))
self._configure_device_logging(config)
return config
def _create_activation_event(self, event_str: str, state: int):
if "|" in event_str:
event, ev_time = event_str.split("|")
ms = Util.string_to_ms(ev_time)
self.machine.switch_controller.add_switch_handler(
switch_name=self.name,
state=state,
callback=partial(self.machine.events.post, event=event),
ms=ms
)
else:
self._events_to_post[state].append(event_str)
def _recycle_passed(self, state):
self.recycle_clear_time = None
# only post event if the switch toggled
if self.state != state:
self._post_events(self.state)
def _post_events_with_recycle(self, state):
# if recycle is ongoing do nothing
if not self.recycle_clear_time:
# calculate clear time
self.recycle_clear_time = self.machine.clock.get_time() + self.recycle_secs
self.machine.clock.loop.call_at(self.recycle_clear_time, partial(self._recycle_passed, state))
# post event
self._post_events(state)
def _post_events(self, state):
for event in self._events_to_post[state]:
if self.machine.events.does_event_exist(event):
self.machine.events.post(event)
@asyncio.coroutine
def _initialize(self):
yield from super()._initialize()
self.platform = self.machine.get_platform_sections(
'switches', self.config['platform'])
if self.config['type'].upper() == 'NC':
self.invert = 1
self.recycle_secs = self.config['ignore_window_ms'] / 1000.0
config = SwitchConfig(invert=self.invert,
debounce=self.config['debounce'])
try:
self.hw_switch = self.platform.configure_switch(
self.config['number'], config, self.config['platform_settings'])
except AssertionError as e:
raise AssertionError("Failed to configure switch {} in platform. See error above".format(self.name)) from e
if self.recycle_secs:
self.add_handler(state=1, callback=self._post_events_with_recycle, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events_with_recycle, callback_kwargs={"state": 0})
else:
self.add_handler(state=1, callback=self._post_events, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events, callback_kwargs={"state": 0})
if self.machine.config['mpf']['auto_create_switch_events']:
self._create_activation_event(
self.machine.config['mpf']['switch_event_active'].replace(
'%', self.name), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_event_inactive'].replace(
'%', self.name), 0)
for tag in self.tags:
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_active", 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_inactive", 0)
for event in Util.string_to_lowercase_list(
self.config['events_when_activated']):
self._create_activation_event(event, 1)
for event in Util.string_to_lowercase_list(
self.config['events_when_deactivated']):
self._create_activation_event(event, 0)
# pylint: disable-msg=too-many-arguments
def add_handler(self, callback, state=1, ms=0, return_info=False,
callback_kwargs=None):
"""Add switch handler (callback) for this switch which is called when this switch state changes.
Note that this method just calls the
:doc:`Switch Controller's <self.machine.switch_controller>`
``add_switch_handler()`` method behind the scenes.
Args:
callback: A callable method that will be called when the switch
state changes.
state: The state that the switch which change into which triggers
the callback to be called. Values are 0 or 1, with 0 meaning
the switch changed to inactive, and 1 meaning the switch
changed to an active state.
ms: How many milliseconds the switch needs to be in the new state
before the callback is called. Default is 0 which means that
the callback will be called immediately. You can use this
setting as a form of software debounce, as the switch needs to
be in the state consistently before the callback is called.
return_info: If True, the switch controller will pass the
parameters of the switch handler as arguments to the callback,
including switch_name, state, and ms.
callback_kwargs: Additional kwargs that will be passed with the
callback.
"""
return self.machine.switch_controller.add_switch_handler(
self.name, callback, state, ms, return_info, callback_kwargs)
def remove_handler(self, callback, state=1, ms=0):
"""Remove switch handler for this switch."""
return self.machine.switch_controller.remove_switch_handler(
self.name, callback, state, ms)
| 43.692683 | 119 | 0.635927 | import asyncio
from functools import partial
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.machine import MachineController
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
from mpf.core.platform import SwitchConfig
from mpf.devices.device_mixins import DevicePositionMixin
MYPY = False
if MYPY:
from mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface
from mpf.core.platform import SwitchPlatform
@DeviceMonitor("state", "recycle_jitter_count")
class Switch(SystemWideDevice, DevicePositionMixin):
config_section = 'switches'
collection = 'switches'
class_label = 'switch'
__slots__ = ["hw_switch", "platform", "state", "hw_state", "invert", "recycle_secs", "recycle_clear_time",
"recycle_jitter_count", "_events_to_post", "last_change"]
def __init__(self, machine: MachineController, name: str) -> None:
self.hw_switch = None
self.platform = None
super().__init__(machine, name)
self.state = 0
self.hw_state = 0
self.invert = 0
self.recycle_secs = 0
self.recycle_clear_time = None
self.recycle_jitter_count = 0
self._events_to_post = {0: [], 1: []}
self.last_change = -100000
self.machine.switch_controller.register_switch(self)
@classmethod
def device_class_init(cls, machine: MachineController):
machine.events.add_handler("init_phase_4",
cls._check_duplicate_switch_numbers,
machine=machine)
@staticmethod
def _check_duplicate_switch_numbers(machine, **kwargs):
del kwargs
check_set = set()
for switch in machine.switches:
key = (switch.platform, switch.hw_switch.number)
if key in check_set:
raise AssertionError(
"Duplicate switch number {} for switch {}".format(
switch.hw_switch.number, switch))
check_set.add(key)
def validate_and_parse_config(self, config, is_mode_config, debug_prefix: str = None):
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
platform = self.machine.get_platform_sections(
'switches', getattr(config, "platform", None))
config['platform_settings'] = platform.validate_switch_section(
self, config.get('platform_settings', None))
self._configure_device_logging(config)
return config
def _create_activation_event(self, event_str: str, state: int):
if "|" in event_str:
event, ev_time = event_str.split("|")
ms = Util.string_to_ms(ev_time)
self.machine.switch_controller.add_switch_handler(
switch_name=self.name,
state=state,
callback=partial(self.machine.events.post, event=event),
ms=ms
)
else:
self._events_to_post[state].append(event_str)
def _recycle_passed(self, state):
self.recycle_clear_time = None
if self.state != state:
self._post_events(self.state)
def _post_events_with_recycle(self, state):
if not self.recycle_clear_time:
self.recycle_clear_time = self.machine.clock.get_time() + self.recycle_secs
self.machine.clock.loop.call_at(self.recycle_clear_time, partial(self._recycle_passed, state))
self._post_events(state)
def _post_events(self, state):
for event in self._events_to_post[state]:
if self.machine.events.does_event_exist(event):
self.machine.events.post(event)
@asyncio.coroutine
def _initialize(self):
yield from super()._initialize()
self.platform = self.machine.get_platform_sections(
'switches', self.config['platform'])
if self.config['type'].upper() == 'NC':
self.invert = 1
self.recycle_secs = self.config['ignore_window_ms'] / 1000.0
config = SwitchConfig(invert=self.invert,
debounce=self.config['debounce'])
try:
self.hw_switch = self.platform.configure_switch(
self.config['number'], config, self.config['platform_settings'])
except AssertionError as e:
raise AssertionError("Failed to configure switch {} in platform. See error above".format(self.name)) from e
if self.recycle_secs:
self.add_handler(state=1, callback=self._post_events_with_recycle, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events_with_recycle, callback_kwargs={"state": 0})
else:
self.add_handler(state=1, callback=self._post_events, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events, callback_kwargs={"state": 0})
if self.machine.config['mpf']['auto_create_switch_events']:
self._create_activation_event(
self.machine.config['mpf']['switch_event_active'].replace(
'%', self.name), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_event_inactive'].replace(
'%', self.name), 0)
for tag in self.tags:
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_active", 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_inactive", 0)
for event in Util.string_to_lowercase_list(
self.config['events_when_activated']):
self._create_activation_event(event, 1)
for event in Util.string_to_lowercase_list(
self.config['events_when_deactivated']):
self._create_activation_event(event, 0)
def add_handler(self, callback, state=1, ms=0, return_info=False,
callback_kwargs=None):
return self.machine.switch_controller.add_switch_handler(
self.name, callback, state, ms, return_info, callback_kwargs)
def remove_handler(self, callback, state=1, ms=0):
return self.machine.switch_controller.remove_switch_handler(
self.name, callback, state, ms)
| true | true |
f70fa4716a8ad9de69464eb03d1b8acc667bcf25 | 8,629 | py | Python | lucida/speechrecognition/kaldi_gstreamer_asr/kaldigstserver/decoder2.py | extremenelson/sirius | 0bad428bb763fe404d01db5d9e08ee33a8f3776c | [
"BSD-3-Clause"
] | 1,808 | 2015-12-23T09:38:57.000Z | 2022-03-24T05:55:03.000Z | lucida/speechrecognition/kaldi_gstreamer_asr/kaldigstserver/decoder2.py | extremenelson/sirius | 0bad428bb763fe404d01db5d9e08ee33a8f3776c | [
"BSD-3-Clause"
] | 164 | 2015-12-22T17:32:16.000Z | 2022-01-30T16:19:28.000Z | lucida/speechrecognition/kaldi_gstreamer_asr/kaldigstserver/decoder2.py | mrinformatics/lucida1604 | f17fba20be9765c3464437f40e97278bba29b9d5 | [
"BSD-3-Clause"
] | 554 | 2015-12-23T11:29:34.000Z | 2022-02-08T05:31:49.000Z | """
Created on May 17, 2013
@author: tanel
"""
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
import logging
import thread
import os
logger = logging.getLogger(__name__)
import pdb
class DecoderPipeline2(object):
def __init__(self, conf={}):
logger.info("Creating decoder using conf: %s" % conf)
self.create_pipeline(conf)
self.outdir = conf.get("out-dir", None)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
elif not os.path.isdir(self.outdir):
raise Exception("Output directory %s already exists as a file" % self.outdir)
self.result_handler = None
self.full_result_handler = None
self.eos_handler = None
self.error_handler = None
self.request_id = "<undefined>"
def create_pipeline(self, conf):
self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc")
self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
self.audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
self.audioresample = Gst.ElementFactory.make("audioresample", "audioresample")
self.tee = Gst.ElementFactory.make("tee", "tee")
self.queue1 = Gst.ElementFactory.make("queue", "queue1")
self.filesink = Gst.ElementFactory.make("filesink", "filesink")
self.queue2 = Gst.ElementFactory.make("queue", "queue2")
self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr")
self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
# This needs to be set first
if "use-threaded-decoder" in conf["decoder"]:
self.asr.set_property("use-threaded-decoder", conf["decoder"]["use-threaded-decoder"])
for (key, val) in conf.get("decoder", {}).iteritems():
if key != "use-threaded-decoder":
logger.info("Setting decoder property: %s = %s" % (key, val))
self.asr.set_property(key, val)
self.appsrc.set_property("is-live", True)
self.filesink.set_property("location", "/dev/null")
logger.info('Created GStreamer elements')
self.pipeline = Gst.Pipeline()
for element in [self.appsrc, self.decodebin, self.audioconvert, self.audioresample, self.tee,
self.queue1, self.filesink,
self.queue2, self.asr, self.fakesink]:
logger.debug("Adding %s to the pipeline" % element)
self.pipeline.add(element)
logger.info('Linking GStreamer elements')
self.appsrc.link(self.decodebin)
#self.appsrc.link(self.audioconvert)
self.decodebin.connect('pad-added', self._connect_decoder)
self.audioconvert.link(self.audioresample)
self.audioresample.link(self.tee)
self.tee.link(self.queue1)
self.queue1.link(self.filesink)
self.tee.link(self.queue2)
self.queue2.link(self.asr)
self.asr.link(self.fakesink)
# Create bus and connect several handlers
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.enable_sync_message_emission()
self.bus.connect('message::eos', self._on_eos)
self.bus.connect('message::error', self._on_error)
#self.bus.connect('message::cutter', self._on_cutter)
self.asr.connect('partial-result', self._on_partial_result)
self.asr.connect('final-result', self._on_final_result)
self.asr.connect('full-final-result', self._on_full_final_result)
logger.info("Setting pipeline to READY")
self.pipeline.set_state(Gst.State.READY)
logger.info("Set pipeline to READY")
def _connect_decoder(self, element, pad):
logger.info("%s: Connecting audio decoder" % self.request_id)
pad.link(self.audioconvert.get_static_pad("sink"))
logger.info("%s: Connected audio decoder" % self.request_id)
def _on_partial_result(self, asr, hyp):
logger.info("%s: Got partial result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, False)
def _on_final_result(self, asr, hyp):
logger.info("%s: Got final result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, True)
def _on_full_final_result(self, asr, result_json):
logger.info("%s: Got full final result: %s" % (self.request_id, result_json.decode('utf8')))
if self.full_result_handler:
self.full_result_handler(result_json)
def _on_error(self, bus, msg):
self.error = msg.parse_error()
logger.error(self.error)
self.finish_request()
if self.error_handler:
self.error_handler(self.error[0].message)
def _on_eos(self, bus, msg):
logger.info('%s: Pipeline received eos signal' % self.request_id)
#self.decodebin.unlink(self.audioconvert)
self.finish_request()
if self.eos_handler:
self.eos_handler[0](self.eos_handler[1])
def get_adaptation_state(self):
return self.asr.get_property("adaptation-state")
def set_adaptation_state(self, adaptation_state):
"""Sets the adaptation state to a certian value, previously retrieved using get_adaptation_state()
Should be called after init_request(..)
"""
return self.asr.set_property("adaptation-state", adaptation_state)
def finish_request(self):
logger.info("%s: Resetting decoder state" % self.request_id)
if self.outdir:
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "/dev/null")
self.filesink.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.NULL)
self.request_id = "<undefined>"
def init_request(self, id, caps_str):
self.request_id = id
logger.info("%s: Initializing request" % (self.request_id))
if caps_str and len(caps_str) > 0:
logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
caps = Gst.caps_from_string(caps_str)
self.appsrc.set_property("caps", caps)
else:
#caps = Gst.caps_from_string("")
self.appsrc.set_property("caps", None)
#self.pipeline.set_state(Gst.State.READY)
pass
#self.appsrc.set_state(Gst.State.PAUSED)
if self.outdir:
self.pipeline.set_state(Gst.State.PAUSED)
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "%s/%s.raw" % (self.outdir, id))
self.filesink.set_state(Gst.State.PLAYING)
#self.filesink.set_state(Gst.State.PLAYING)
#self.decodebin.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.PLAYING)
self.filesink.set_state(Gst.State.PLAYING)
# push empty buffer (to avoid hang on client diconnect)
#buf = Gst.Buffer.new_allocate(None, 0, None)
#self.appsrc.emit("push-buffer", buf)
# reset adaptation state
self.set_adaptation_state("")
def process_data(self, data):
logger.debug('%s: Pushing buffer of size %d to pipeline' % (self.request_id, len(data)))
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
self.appsrc.emit("push-buffer", buf)
logger.debug('%s: Pushing buffer done' % self.request_id)
def end_request(self):
logger.info("%s: Pushing EOS to pipeline" % self.request_id)
self.appsrc.emit("end-of-stream")
def set_result_handler(self, handler):
self.result_handler = handler
def set_full_result_handler(self, handler):
self.full_result_handler = handler
def set_eos_handler(self, handler, user_data=None):
self.eos_handler = (handler, user_data)
def set_error_handler(self, handler):
self.error_handler = handler
def cancel(self):
logger.info("%s: Sending EOS to pipeline in order to cancel processing" % self.request_id)
self.appsrc.emit("end-of-stream")
#self.asr.set_property("silent", True)
#self.pipeline.set_state(Gst.State.NULL)
#if (self.pipeline.get_state() == Gst.State.PLAYING):
#logger.debug("Sending EOS to pipeline")
#self.pipeline.send_event(Gst.Event.new_eos())
#self.pipeline.set_state(Gst.State.READY)
logger.info("%s: Cancelled pipeline" % self.request_id)
| 38.013216 | 106 | 0.644918 | import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
import logging
import thread
import os
logger = logging.getLogger(__name__)
import pdb
class DecoderPipeline2(object):
def __init__(self, conf={}):
logger.info("Creating decoder using conf: %s" % conf)
self.create_pipeline(conf)
self.outdir = conf.get("out-dir", None)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
elif not os.path.isdir(self.outdir):
raise Exception("Output directory %s already exists as a file" % self.outdir)
self.result_handler = None
self.full_result_handler = None
self.eos_handler = None
self.error_handler = None
self.request_id = "<undefined>"
def create_pipeline(self, conf):
self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc")
self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
self.audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
self.audioresample = Gst.ElementFactory.make("audioresample", "audioresample")
self.tee = Gst.ElementFactory.make("tee", "tee")
self.queue1 = Gst.ElementFactory.make("queue", "queue1")
self.filesink = Gst.ElementFactory.make("filesink", "filesink")
self.queue2 = Gst.ElementFactory.make("queue", "queue2")
self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr")
self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if "use-threaded-decoder" in conf["decoder"]:
self.asr.set_property("use-threaded-decoder", conf["decoder"]["use-threaded-decoder"])
for (key, val) in conf.get("decoder", {}).iteritems():
if key != "use-threaded-decoder":
logger.info("Setting decoder property: %s = %s" % (key, val))
self.asr.set_property(key, val)
self.appsrc.set_property("is-live", True)
self.filesink.set_property("location", "/dev/null")
logger.info('Created GStreamer elements')
self.pipeline = Gst.Pipeline()
for element in [self.appsrc, self.decodebin, self.audioconvert, self.audioresample, self.tee,
self.queue1, self.filesink,
self.queue2, self.asr, self.fakesink]:
logger.debug("Adding %s to the pipeline" % element)
self.pipeline.add(element)
logger.info('Linking GStreamer elements')
self.appsrc.link(self.decodebin)
self.decodebin.connect('pad-added', self._connect_decoder)
self.audioconvert.link(self.audioresample)
self.audioresample.link(self.tee)
self.tee.link(self.queue1)
self.queue1.link(self.filesink)
self.tee.link(self.queue2)
self.queue2.link(self.asr)
self.asr.link(self.fakesink)
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.enable_sync_message_emission()
self.bus.connect('message::eos', self._on_eos)
self.bus.connect('message::error', self._on_error)
self.asr.connect('partial-result', self._on_partial_result)
self.asr.connect('final-result', self._on_final_result)
self.asr.connect('full-final-result', self._on_full_final_result)
logger.info("Setting pipeline to READY")
self.pipeline.set_state(Gst.State.READY)
logger.info("Set pipeline to READY")
def _connect_decoder(self, element, pad):
logger.info("%s: Connecting audio decoder" % self.request_id)
pad.link(self.audioconvert.get_static_pad("sink"))
logger.info("%s: Connected audio decoder" % self.request_id)
def _on_partial_result(self, asr, hyp):
logger.info("%s: Got partial result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, False)
def _on_final_result(self, asr, hyp):
logger.info("%s: Got final result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, True)
def _on_full_final_result(self, asr, result_json):
logger.info("%s: Got full final result: %s" % (self.request_id, result_json.decode('utf8')))
if self.full_result_handler:
self.full_result_handler(result_json)
def _on_error(self, bus, msg):
self.error = msg.parse_error()
logger.error(self.error)
self.finish_request()
if self.error_handler:
self.error_handler(self.error[0].message)
def _on_eos(self, bus, msg):
logger.info('%s: Pipeline received eos signal' % self.request_id)
self.finish_request()
if self.eos_handler:
self.eos_handler[0](self.eos_handler[1])
def get_adaptation_state(self):
return self.asr.get_property("adaptation-state")
def set_adaptation_state(self, adaptation_state):
return self.asr.set_property("adaptation-state", adaptation_state)
def finish_request(self):
logger.info("%s: Resetting decoder state" % self.request_id)
if self.outdir:
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "/dev/null")
self.filesink.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.NULL)
self.request_id = "<undefined>"
def init_request(self, id, caps_str):
self.request_id = id
logger.info("%s: Initializing request" % (self.request_id))
if caps_str and len(caps_str) > 0:
logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
caps = Gst.caps_from_string(caps_str)
self.appsrc.set_property("caps", caps)
else:
self.appsrc.set_property("caps", None)
pass
if self.outdir:
self.pipeline.set_state(Gst.State.PAUSED)
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "%s/%s.raw" % (self.outdir, id))
self.filesink.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.PLAYING)
self.filesink.set_state(Gst.State.PLAYING)
self.set_adaptation_state("")
def process_data(self, data):
logger.debug('%s: Pushing buffer of size %d to pipeline' % (self.request_id, len(data)))
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
self.appsrc.emit("push-buffer", buf)
logger.debug('%s: Pushing buffer done' % self.request_id)
def end_request(self):
logger.info("%s: Pushing EOS to pipeline" % self.request_id)
self.appsrc.emit("end-of-stream")
def set_result_handler(self, handler):
self.result_handler = handler
def set_full_result_handler(self, handler):
self.full_result_handler = handler
def set_eos_handler(self, handler, user_data=None):
self.eos_handler = (handler, user_data)
def set_error_handler(self, handler):
self.error_handler = handler
def cancel(self):
logger.info("%s: Sending EOS to pipeline in order to cancel processing" % self.request_id)
self.appsrc.emit("end-of-stream")
logger.info("%s: Cancelled pipeline" % self.request_id)
| true | true |
f70fa47cf19e266644db800bdfe629f660dc9d2e | 419 | py | Python | Mundo 2 - Exercicios/61Exercicio.py | andrezzadede/Curso_Python_Guanabara_Mundo_2 | e4ebf171f74809f8a65e846c59978db95c5d3b1b | [
"MIT"
] | null | null | null | Mundo 2 - Exercicios/61Exercicio.py | andrezzadede/Curso_Python_Guanabara_Mundo_2 | e4ebf171f74809f8a65e846c59978db95c5d3b1b | [
"MIT"
] | null | null | null | Mundo 2 - Exercicios/61Exercicio.py | andrezzadede/Curso_Python_Guanabara_Mundo_2 | e4ebf171f74809f8a65e846c59978db95c5d3b1b | [
"MIT"
] | null | null | null | print('---------- Bem vindo ao exercicio 61 ------')
print('\033[32m Reçaca o desafio 51. Lendo o primeiro termo e a razao de uma PA. Mostrando os 10 primeiros termos da progressa usando a estrutura while\033[m')
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
termo = primeiro
c = 1
while c <= 10:
print('{} -> '.format(termo), end='')
termo += razao
c += 1
print('Fim')
| 18.217391 | 159 | 0.618138 | print('---------- Bem vindo ao exercicio 61 ------')
print('\033[32m Reçaca o desafio 51. Lendo o primeiro termo e a razao de uma PA. Mostrando os 10 primeiros termos da progressa usando a estrutura while\033[m')
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
termo = primeiro
c = 1
while c <= 10:
print('{} -> '.format(termo), end='')
termo += razao
c += 1
print('Fim')
| true | true |
f70fa4df361d80f9ecacef1088f93b318fd49fe6 | 2,056 | py | Python | docs/conf.py | Moody-Tunes/spotify-client | 496c72d915d92c29795a31a18cc9af26a9015b4b | [
"MIT"
] | 1 | 2020-12-21T02:35:18.000Z | 2020-12-21T02:35:18.000Z | docs/conf.py | Moody-Tunes/spotify-client | 496c72d915d92c29795a31a18cc9af26a9015b4b | [
"MIT"
] | 9 | 2020-09-04T15:35:23.000Z | 2021-04-24T02:10:56.000Z | docs/conf.py | Moody-Tunes/spotify-client | 496c72d915d92c29795a31a18cc9af26a9015b4b | [
"MIT"
] | 2 | 2020-12-21T02:35:24.000Z | 2020-12-29T07:38:16.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'spotify-client'
copyright = '2020, MoodyTunes'
author = 'MoodyTunes'
# The full version, including alpha/beta/rc tags
with open("../VERSION", "r") as version_file:
version = version_file.read().strip()
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
pygments_style = 'sphinx'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 35.448276 | 79 | 0.669747 |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
project = 'spotify-client'
copyright = '2020, MoodyTunes'
author = 'MoodyTunes'
with open("../VERSION", "r") as version_file:
version = version_file.read().strip()
release = version
extensions = ['sphinx.ext.autodoc']
pygments_style = 'sphinx'
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'alabaster'
html_static_path = ['_static']
| true | true |
f70fa5c21d44574a534aa9a438155cd0428d003d | 946 | py | Python | decorator/send_commands_netmiko.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-05T09:30:23.000Z | 2022-03-09T13:27:56.000Z | decorator/send_commands_netmiko.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | null | null | null | decorator/send_commands_netmiko.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-06T03:44:35.000Z | 2022-03-04T21:20:40.000Z | from netmiko import ConnectHandler
import yaml
from pprint import pprint
def send_show_command(device, show_command):
with ConnectHandler(**device) as ssh:
ssh.enable()
result = ssh.send_command(show_command)
return result
def send_config_commands(device, config_commands):
with ConnectHandler(**device) as ssh:
ssh.enable()
result = ssh.send_config_set(config_commands)
return result
def send_commands(device, config=None, show=None):
if show:
return send_show_command(device_list, show)
elif config:
return send_config_commands(device_list, config)
if __name__ == "__main__":
commands = ["logging 10.255.255.1", "logging buffered 20010", "no logging console"]
show_command = "sh ip int br"
with open("devices.yaml") as f:
dev_list = yaml.safe_load(f)
send_commands(dev_list, config=commands)
send_commands(dev_list, show=show_command)
| 27.028571 | 87 | 0.707188 | from netmiko import ConnectHandler
import yaml
from pprint import pprint
def send_show_command(device, show_command):
with ConnectHandler(**device) as ssh:
ssh.enable()
result = ssh.send_command(show_command)
return result
def send_config_commands(device, config_commands):
with ConnectHandler(**device) as ssh:
ssh.enable()
result = ssh.send_config_set(config_commands)
return result
def send_commands(device, config=None, show=None):
if show:
return send_show_command(device_list, show)
elif config:
return send_config_commands(device_list, config)
if __name__ == "__main__":
commands = ["logging 10.255.255.1", "logging buffered 20010", "no logging console"]
show_command = "sh ip int br"
with open("devices.yaml") as f:
dev_list = yaml.safe_load(f)
send_commands(dev_list, config=commands)
send_commands(dev_list, show=show_command)
| true | true |
f70fa6b243087d007e8f52414d27fa83ded92b20 | 2,335 | py | Python | venv/Lib/site-packages/IPython/html/widgets/widget_container.py | Tyranicangel/dtrans | a5e23d200a310701bb357bff09e35a5629a3f7a3 | [
"BSD-3-Clause"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | IPython/html/widgets/widget_container.py | khinsen/ipython | dfd5cb1d3e34048593ba537dacdbef08fe766624 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | IPython/html/widgets/widget_container.py | khinsen/ipython | dfd5cb1d3e34048593ba537dacdbef08fe766624 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | """ContainerWidget class.
Represents a container that can be used to group other widgets.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
# Child widgets in the container.
# Using a tuple here to force reassignment to update the list.
# When a proper notifying-list trait exists, that is what should be used here.
children = Tuple()
_children = Tuple(sync=True)
def __init__(self, **kwargs):
super(ContainerWidget, self).__init__(**kwargs)
self.on_displayed(ContainerWidget._fire_children_displayed)
def _fire_children_displayed(self):
for child in self._children:
child._handle_displayed()
def _children_changed(self, name, old, new):
"""Validate children list.
Makes sure only one instance of any given model can exist in the
children list.
An excellent post on uniqifiers is available at
http://www.peterbe.com/plog/uniqifiers-benchmark
which provides the inspiration for using this implementation. Below
I've implemented the `f5` algorithm using Python comprehensions."""
if new is not None:
seen = {}
def add_item(i):
seen[i.model_id] = True
return i
self._children = [add_item(i) for i in new if not i.model_id in seen]
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
description = Unicode(sync=True)
button_text = Unicode(sync=True)
| 37.063492 | 82 | 0.544325 |
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, TraitError
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
children = Tuple()
_children = Tuple(sync=True)
def __init__(self, **kwargs):
super(ContainerWidget, self).__init__(**kwargs)
self.on_displayed(ContainerWidget._fire_children_displayed)
def _fire_children_displayed(self):
for child in self._children:
child._handle_displayed()
def _children_changed(self, name, old, new):
if new is not None:
seen = {}
def add_item(i):
seen[i.model_id] = True
return i
self._children = [add_item(i) for i in new if not i.model_id in seen]
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
description = Unicode(sync=True)
button_text = Unicode(sync=True)
| true | true |
f70fa6ee938b2345b273950c3d615a8cb65e74f1 | 3,036 | py | Python | inst/CnaAnnotator.py | jalavery/gnomeR | b0031bd5eb1c8c5636910d0b779a8808947245f5 | [
"MIT"
] | 15 | 2020-02-20T18:20:15.000Z | 2021-12-23T08:49:09.000Z | inst/CnaAnnotator.py | jalavery/gnomeR | b0031bd5eb1c8c5636910d0b779a8808947245f5 | [
"MIT"
] | 36 | 2020-02-21T20:23:41.000Z | 2022-03-04T21:12:44.000Z | inst/CnaAnnotator.py | MSKCC-Epi-Bio/gnomeR | 4f165774eb3c5f442881a915ee70e18a5f33b387 | [
"MIT"
] | 9 | 2020-02-17T23:43:35.000Z | 2022-03-21T12:01:36.000Z | import argparse
# from AnnotatorCore import *
import sys
import csv
import requests
import os.path
import logging
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import date
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('CnaAnnotator')
def main(argv):
if argv.help:
log.info('\n'
'CnaAnnotator.py -i <input CNA file> -o <output CNA file> [-p previous results] [-c <input clinical file>] [-s sample list filter] [-t <default tumor type>] [-u oncokb-base-url] [-b oncokb_api_bear_token] [-z annotate_gain_loss]\n'
' Input CNA file should follow the GISTIC output (https://docs.cbioportal.org/5.1-data-loading/data-loading/file-formats#data-file-1)\n'
' Essential clinical columns:\n'
' SAMPLE_ID: sample ID\n'
' Cancer type will be assigned based on the following priority:\n'
' 1) ONCOTREE_CODE in clinical data file\n'
' 2) ONCOTREE_CODE exist in MAF\n'
' 3) default tumor type (-t)\n'
' We do not annotate Gain and Loss by default, add -z to include the analysis. See https://github.com/oncokb/oncokb-annotator/issues/51 for more information.\n'
' Default OncoKB base url is https://www.oncokb.org')
sys.exit()
if argv.input_file == '' or argv.output_file == '' or argv.oncokb_api_bearer_token == '':
log.info('for help: python CnaAnnotator.py -h')
sys.exit(2)
if argv.sample_ids_filter:
setsampleidsfileterfile(argv.sample_ids_filter)
if argv.oncokb_api_url:
setoncokbbaseurl(argv.oncokb_api_url)
setoncokbapitoken(argv.oncokb_api_bearer_token)
cancertypemap = {}
if argv.input_clinical_file:
readCancerTypes(argv.input_clinical_file, cancertypemap)
log.info('annotating %s ...' % argv.input_file)
processcnagisticdata(argv.input_file, argv.output_file, argv.previous_result_file, argv.default_cancer_type,
cancertypemap, argv.annotate_gain_loss)
log.info('done!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='help', action="store_true", default=False)
parser.add_argument('-i', dest='input_file', default='', type=str)
parser.add_argument('-o', dest='output_file', default='', type=str)
parser.add_argument('-p', dest='previous_result_file', default='', type=str)
parser.add_argument('-c', dest='input_clinical_file', default='', type=str)
parser.add_argument('-s', dest='sample_ids_filter', default='', type=str)
parser.add_argument('-t', dest='default_cancer_type', default='', type=str)
parser.add_argument('-u', dest='oncokb_api_url', default='', type=str)
parser.add_argument('-b', dest='oncokb_api_bearer_token', default='', type=str)
parser.add_argument('-z', dest='annotate_gain_loss', action="store_true", default=False)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
| 44.647059 | 239 | 0.693676 | import argparse
import sys
import csv
import requests
import os.path
import logging
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import date
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('CnaAnnotator')
def main(argv):
if argv.help:
log.info('\n'
'CnaAnnotator.py -i <input CNA file> -o <output CNA file> [-p previous results] [-c <input clinical file>] [-s sample list filter] [-t <default tumor type>] [-u oncokb-base-url] [-b oncokb_api_bear_token] [-z annotate_gain_loss]\n'
' Input CNA file should follow the GISTIC output (https://docs.cbioportal.org/5.1-data-loading/data-loading/file-formats#data-file-1)\n'
' Essential clinical columns:\n'
' SAMPLE_ID: sample ID\n'
' Cancer type will be assigned based on the following priority:\n'
' 1) ONCOTREE_CODE in clinical data file\n'
' 2) ONCOTREE_CODE exist in MAF\n'
' 3) default tumor type (-t)\n'
' We do not annotate Gain and Loss by default, add -z to include the analysis. See https://github.com/oncokb/oncokb-annotator/issues/51 for more information.\n'
' Default OncoKB base url is https://www.oncokb.org')
sys.exit()
if argv.input_file == '' or argv.output_file == '' or argv.oncokb_api_bearer_token == '':
log.info('for help: python CnaAnnotator.py -h')
sys.exit(2)
if argv.sample_ids_filter:
setsampleidsfileterfile(argv.sample_ids_filter)
if argv.oncokb_api_url:
setoncokbbaseurl(argv.oncokb_api_url)
setoncokbapitoken(argv.oncokb_api_bearer_token)
cancertypemap = {}
if argv.input_clinical_file:
readCancerTypes(argv.input_clinical_file, cancertypemap)
log.info('annotating %s ...' % argv.input_file)
processcnagisticdata(argv.input_file, argv.output_file, argv.previous_result_file, argv.default_cancer_type,
cancertypemap, argv.annotate_gain_loss)
log.info('done!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='help', action="store_true", default=False)
parser.add_argument('-i', dest='input_file', default='', type=str)
parser.add_argument('-o', dest='output_file', default='', type=str)
parser.add_argument('-p', dest='previous_result_file', default='', type=str)
parser.add_argument('-c', dest='input_clinical_file', default='', type=str)
parser.add_argument('-s', dest='sample_ids_filter', default='', type=str)
parser.add_argument('-t', dest='default_cancer_type', default='', type=str)
parser.add_argument('-u', dest='oncokb_api_url', default='', type=str)
parser.add_argument('-b', dest='oncokb_api_bearer_token', default='', type=str)
parser.add_argument('-z', dest='annotate_gain_loss', action="store_true", default=False)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
| true | true |
f70fa803280ab9f216b7ca2a38b01efb67cd1f5e | 3,693 | py | Python | tmot/matching.py | JunweiLiang/Object_Detection_Tracking | f86caaec97669a6da56f1b402cca4e179a85d2f0 | [
"MIT"
] | 328 | 2019-05-27T03:09:02.000Z | 2022-03-31T05:12:04.000Z | tmot/matching.py | AnjaliPC/Object_Detection_Tracking | f86caaec97669a6da56f1b402cca4e179a85d2f0 | [
"MIT"
] | 43 | 2019-06-05T14:04:09.000Z | 2022-01-25T03:16:39.000Z | tmot/matching.py | AnjaliPC/Object_Detection_Tracking | f86caaec97669a6da56f1b402cca4e179a85d2f0 | [
"MIT"
] | 107 | 2019-05-27T06:26:38.000Z | 2022-03-25T03:32:58.000Z | import numpy as np
import scipy
from scipy.spatial.distance import cdist
import lap # 0.4.0
from cython_bbox import bbox_overlaps as bbox_ious
from . import kalman_filter
def merge_matches(m1, m2, shape):
O,P,Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1*M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def linear_assignment(cost_matrix, thresh):
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
matches, unmatched_a, unmatched_b = [], [], []
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
for ix, mx in enumerate(x):
if mx >= 0:
matches.append([ix, mx])
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
matches = np.asarray(matches)
return matches, unmatched_a, unmatched_b
def ious(atlbrs, btlbrs):
"""
Compute cost based on IoU
:type atlbrs: list[tlbr] | np.ndarray
:type atlbrs: list[tlbr] | np.ndarray
:rtype ious np.ndarray
"""
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float)
)
return ious
def iou_distance(atracks, btracks):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def embedding_distance(tracks, detections, metric='cosine'):
"""
:param tracks: list[STrack]
:param detections: list[BaseTrack]
:param metric:
:return: cost_matrix np.ndarray
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
cost_matrix = np.maximum(0.0, cdist(track_features, det_features)) # Nomalized features
return cost_matrix
def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, metric='maha')
cost_matrix[row, gating_distance > gating_threshold] = np.inf
cost_matrix[row] = lambda_ * cost_matrix[row] + (1-lambda_)* gating_distance
return cost_matrix
| 33.572727 | 125 | 0.638776 | import numpy as np
import scipy
from scipy.spatial.distance import cdist
import lap
from cython_bbox import bbox_overlaps as bbox_ious
from . import kalman_filter
def merge_matches(m1, m2, shape):
O,P,Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1*M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def linear_assignment(cost_matrix, thresh):
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
matches, unmatched_a, unmatched_b = [], [], []
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
for ix, mx in enumerate(x):
if mx >= 0:
matches.append([ix, mx])
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
matches = np.asarray(matches)
return matches, unmatched_a, unmatched_b
def ious(atlbrs, btlbrs):
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float)
)
return ious
def iou_distance(atracks, btracks):
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def embedding_distance(tracks, detections, metric='cosine'):
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
cost_matrix = np.maximum(0.0, cdist(track_features, det_features))
return cost_matrix
def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, metric='maha')
cost_matrix[row, gating_distance > gating_threshold] = np.inf
cost_matrix[row] = lambda_ * cost_matrix[row] + (1-lambda_)* gating_distance
return cost_matrix
| true | true |
f70fa88268e7f2a8c55619f7c4dd3c7747d1770a | 149 | py | Python | tardis/io/setup_package.py | chvogl/tardis | e444ffeebef92811165ec982a5c23785932a7f8e | [
"BSD-3-Clause"
] | 1 | 2016-03-24T13:14:25.000Z | 2016-03-24T13:14:25.000Z | tardis/io/setup_package.py | chvogl/tardis | e444ffeebef92811165ec982a5c23785932a7f8e | [
"BSD-3-Clause"
] | 6 | 2015-03-16T10:31:40.000Z | 2019-02-21T17:56:55.000Z | tardis/io/setup_package.py | chvogl/tardis | e444ffeebef92811165ec982a5c23785932a7f8e | [
"BSD-3-Clause"
] | 5 | 2015-03-17T18:56:20.000Z | 2019-02-12T12:53:15.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {'tardis.io.tests':['data/*.dat', 'data/*.yml']}
| 29.8 | 63 | 0.677852 |
def get_package_data():
return {'tardis.io.tests':['data/*.dat', 'data/*.yml']}
| true | true |
f70fa8b722e119309bc970211e06b4c03d657183 | 1,320 | py | Python | tests/test_stat.py | cailab-tamu/scTenifoldXct | d25ded8dfb7f2951217a30ab71eccd6b060178f6 | [
"MIT"
] | null | null | null | tests/test_stat.py | cailab-tamu/scTenifoldXct | d25ded8dfb7f2951217a30ab71eccd6b060178f6 | [
"MIT"
] | null | null | null | tests/test_stat.py | cailab-tamu/scTenifoldXct | d25ded8dfb7f2951217a30ab71eccd6b060178f6 | [
"MIT"
] | null | null | null | import pytest
import itertools
import pandas as pd
import numpy as np
from scTenifoldXct.core import null_test
def generate_fake_df_nn(n_ligand=3000, n_receptors=3000, n_cands=200):
gene_names = [f"GENE{i}" for i in range(max(n_ligand, n_receptors))]
iteration = itertools.product(gene_names, gene_names)
inds, ligands, receptors = [], [], []
for i, j in iteration:
inds.append(f"{i}_{j}")
ligands.append(i)
receptors.append(j)
df = pd.DataFrame({"ligand": ligands,
"receptor": receptors,
"dist": np.random.chisquare(1, (n_ligand * n_receptors,)),
"correspondence": np.random.lognormal(0, 4, size=(n_ligand * n_receptors,))},
index=inds)
return df, np.random.choice(df.index, size=(n_cands,), replace=False)
@pytest.mark.parametrize("df_nn,candidates", [
generate_fake_df_nn(3000, 3000, 200),
generate_fake_df_nn(1000, 1000, 200),
])
@pytest.mark.parametrize("filter_zeros", [True])
def test_null_test(df_nn, candidates, filter_zeros):
null_test(df_nn=df_nn, candidates=candidates, filter_zeros=filter_zeros)
def test_chi2_test(xct_skin):
xct_skin.train_nn(n_steps= 1000, lr = 0.001)
xct_skin.chi2_test(dof=3, pval=0.05, cal_FDR=True, plot_result=True) | 34.736842 | 100 | 0.666667 | import pytest
import itertools
import pandas as pd
import numpy as np
from scTenifoldXct.core import null_test
def generate_fake_df_nn(n_ligand=3000, n_receptors=3000, n_cands=200):
gene_names = [f"GENE{i}" for i in range(max(n_ligand, n_receptors))]
iteration = itertools.product(gene_names, gene_names)
inds, ligands, receptors = [], [], []
for i, j in iteration:
inds.append(f"{i}_{j}")
ligands.append(i)
receptors.append(j)
df = pd.DataFrame({"ligand": ligands,
"receptor": receptors,
"dist": np.random.chisquare(1, (n_ligand * n_receptors,)),
"correspondence": np.random.lognormal(0, 4, size=(n_ligand * n_receptors,))},
index=inds)
return df, np.random.choice(df.index, size=(n_cands,), replace=False)
@pytest.mark.parametrize("df_nn,candidates", [
generate_fake_df_nn(3000, 3000, 200),
generate_fake_df_nn(1000, 1000, 200),
])
@pytest.mark.parametrize("filter_zeros", [True])
def test_null_test(df_nn, candidates, filter_zeros):
null_test(df_nn=df_nn, candidates=candidates, filter_zeros=filter_zeros)
def test_chi2_test(xct_skin):
xct_skin.train_nn(n_steps= 1000, lr = 0.001)
xct_skin.chi2_test(dof=3, pval=0.05, cal_FDR=True, plot_result=True) | true | true |
f70fa8cb9d7f1e49ef9b56b7615433e16e26661d | 852 | py | Python | dataExtractor.py | bdburak/AmazonReviewSentimentAnalysis | 8e68d27f5ecd6c5e1b0c153f79c8b3ea1767ea50 | [
"MIT"
] | null | null | null | dataExtractor.py | bdburak/AmazonReviewSentimentAnalysis | 8e68d27f5ecd6c5e1b0c153f79c8b3ea1767ea50 | [
"MIT"
] | 1 | 2021-04-28T18:26:41.000Z | 2021-04-28T18:26:41.000Z | dataExtractor.py | bdburak/AmazonReviewSentimentAnalysis | 8e68d27f5ecd6c5e1b0c153f79c8b3ea1767ea50 | [
"MIT"
] | null | null | null | #Review Seperator
def reviewToList(strDataLocation): #reviewToList(str_DataLocation)
file = open(strDataLocation)
listFile=(file.readlines())
firstReviewItem=0
lastReviewItem=0
listReviews = []
reviewText =""
for item in range(len(listFile)):
if('<review_text>\n'==listFile[item]):
firstReviewItem = item+1
if('</review_text>\n'==listFile[item]):
ReviewItemRange = item - firstReviewItem
for i in range(ReviewItemRange):
reviewText = reviewText + (listFile[firstReviewItem])
firstReviewItem = firstReviewItem + 1
reviewText = reviewText.rstrip('\n')
listReviews.append(reviewText)
reviewText =""
return listReviews
| 25.818182 | 84 | 0.564554 |
def reviewToList(strDataLocation):
file = open(strDataLocation)
listFile=(file.readlines())
firstReviewItem=0
lastReviewItem=0
listReviews = []
reviewText =""
for item in range(len(listFile)):
if('<review_text>\n'==listFile[item]):
firstReviewItem = item+1
if('</review_text>\n'==listFile[item]):
ReviewItemRange = item - firstReviewItem
for i in range(ReviewItemRange):
reviewText = reviewText + (listFile[firstReviewItem])
firstReviewItem = firstReviewItem + 1
reviewText = reviewText.rstrip('\n')
listReviews.append(reviewText)
reviewText =""
return listReviews
| true | true |
f70fa8ec2ca8e6448f8e67c80257b049471e9c70 | 6,343 | py | Python | lab_4/main.py | SoullessDark/2020-2-level-labs | 9555eb5a86a0f330b2f99e991928b0337f519b7a | [
"MIT"
] | null | null | null | lab_4/main.py | SoullessDark/2020-2-level-labs | 9555eb5a86a0f330b2f99e991928b0337f519b7a | [
"MIT"
] | null | null | null | lab_4/main.py | SoullessDark/2020-2-level-labs | 9555eb5a86a0f330b2f99e991928b0337f519b7a | [
"MIT"
] | null | null | null | """
Lab 4
"""
import re
from ngrams.ngram_trie import NGramTrie
def tokenize_by_sentence(text: str) -> tuple:
if not isinstance(text, str):
raise ValueError
sents = re.split(r'[.?!]', text)
tokenized_sent = []
for sent in sents:
tokens = re.sub(r'[^a-z \n]', '', sent.lower()).split()
if tokens:
tokenized_sent += tokens + ['<END>']
return tuple(tokenized_sent)
class WordStorage:
def __init__(self):
self.storage = {}
def _put_word(self, word: str):
if not isinstance(word, str) or not word:
raise ValueError
if word not in self.storage:
self.storage[word] = len(self.storage) + 1
return self.storage[word]
def get_id(self, word: str) -> int:
if not isinstance(word, str) or not word:
raise ValueError
if word not in self.storage:
raise KeyError
return self.storage[word]
def get_word(self, word_id: int) -> str:
if not isinstance(word_id, int):
raise ValueError
for key, value in self.storage.items():
if value == word_id:
return key
raise KeyError
def update(self, corpus: tuple):
if not isinstance(corpus, tuple):
raise ValueError
for word in corpus:
self._put_word(word)
def encode_text(storage: WordStorage, text: tuple) -> tuple:
if not isinstance(storage, WordStorage) or not isinstance(text, tuple):
raise ValueError
encoded_text = [storage.get_id(word) for word in text]
return tuple(encoded_text)
class NGramTextGenerator:
def __init__(self, word_storage: WordStorage, n_gram_trie: NGramTrie):
self._word_storage = word_storage
self._n_gram_trie = n_gram_trie
def _generate_next_word(self, context: tuple) -> int:
if not isinstance(context, tuple) or len(context) + 1 != self._n_gram_trie.size:
raise ValueError
top_word = ''
word_freq = 0
for n_gram, n_gram_freq in self._n_gram_trie.n_gram_frequencies.items():
if context == n_gram[:-1] and n_gram_freq > word_freq:
top_word = n_gram[-1]
word_freq = n_gram_freq
if not top_word:
top_word = max(self._n_gram_trie.uni_grams, key=self._n_gram_trie.uni_grams.get)[0]
return top_word
def _generate_sentence(self, context: tuple) -> tuple:
if not isinstance(context, tuple):
raise ValueError
sent = self.sent_is(context)
for _ in range(20):
sent.append(self._generate_next_word(context))
context = tuple(list(context) + sent)[-len(context):]
if sent[-1] == self._word_storage.get_id('<END>'):
return tuple(sent)
sent.append(self._word_storage.get_id('<END>'))
return tuple(sent)
def sent_is(self, context):
if context[-1] == self._word_storage.get_id('<END>'):
sent = []
else:
sent = list(context)
return sent
def generate_text(self, context: tuple, number_of_sentences: int) -> tuple:
if not isinstance(context, tuple) or not isinstance(number_of_sentences, int) \
or isinstance(number_of_sentences, bool):
raise ValueError
text = []
for _ in range(number_of_sentences):
sentence = self._generate_sentence(context)
text.extend(sentence)
context = tuple(text[-len(context):])
return tuple(text)
class LikelihoodBasedTextGenerator(NGramTextGenerator):
def _calculate_maximum_likelihood(self, word: int, context: tuple) -> float:
type_check = [isinstance(word, int),
isinstance(context, tuple)]
if not all(type_check) or word not in self._word_storage.storage.values() or \
len([wrd for wrd in context if wrd in self._word_storage.storage.values()]) != len(context):
raise ValueError
wrd_freq = 0
avrg_freq = 0
length = self._n_gram_trie.size - 1
for n_gram in self._n_gram_trie.n_grams:
if context == n_gram[:length]:
avrg_freq += 1
if word == n_gram[-1]:
wrd_freq += 1
try:
likelihood = wrd_freq / avrg_freq
except ZeroDivisionError:
likelihood = 0.0
return likelihood
def _generate_next_word(self, context: tuple) -> int:
if not isinstance(context, tuple) or \
len([w for w in context if w in self._word_storage.storage.values()]) != len(context):
raise ValueError
next_wrd = 0
word_freq = 0.0
for word in self._word_storage.storage.values():
frequency = self._calculate_maximum_likelihood(word, context)
if frequency > word_freq:
word_freq = frequency
next_wrd = word
next_word = self.if_not_freq(next_wrd, word_freq)
return next_word
def if_not_freq(self, next_wrd, word_freq):
if not word_freq:
next_wrd = max(self._n_gram_trie.uni_grams, key=self._n_gram_trie.uni_grams.get)[0]
return next_wrd
class BackOffGenerator(NGramTextGenerator):
def __init__(self, word_storage: WordStorage, n_gram_trie: NGramTrie, *args):
super().__init__(word_storage, n_gram_trie)
def _generate_next_word(self, context: tuple) -> int:
pass
def decode_text(storage: WordStorage, encoded_text: tuple) -> tuple:
if not isinstance(storage, WordStorage) or not isinstance(encoded_text, tuple) or not encoded_text:
raise ValueError
decoded_text = [[]]
for encoded_word in encoded_text:
decoded_word = storage.get_word(encoded_word)
if decoded_word == '<END>':
decoded_text.append([])
else:
decoded_text[-1].append(decoded_word)
decoded_text = [sentence[0][0].upper() + sentence[0][1:] + ' ' + ' '.join(sentence[1:])
for sentence in decoded_text if sentence]
return tuple(decoded_text)
def save_model(model: NGramTextGenerator, path_to_saved_model: str):
pass
def load_model(path_to_saved_model: str) -> NGramTextGenerator:
pass
| 29.365741 | 108 | 0.612329 | import re
from ngrams.ngram_trie import NGramTrie
def tokenize_by_sentence(text: str) -> tuple:
if not isinstance(text, str):
raise ValueError
sents = re.split(r'[.?!]', text)
tokenized_sent = []
for sent in sents:
tokens = re.sub(r'[^a-z \n]', '', sent.lower()).split()
if tokens:
tokenized_sent += tokens + ['<END>']
return tuple(tokenized_sent)
class WordStorage:
def __init__(self):
self.storage = {}
def _put_word(self, word: str):
if not isinstance(word, str) or not word:
raise ValueError
if word not in self.storage:
self.storage[word] = len(self.storage) + 1
return self.storage[word]
def get_id(self, word: str) -> int:
if not isinstance(word, str) or not word:
raise ValueError
if word not in self.storage:
raise KeyError
return self.storage[word]
def get_word(self, word_id: int) -> str:
if not isinstance(word_id, int):
raise ValueError
for key, value in self.storage.items():
if value == word_id:
return key
raise KeyError
def update(self, corpus: tuple):
if not isinstance(corpus, tuple):
raise ValueError
for word in corpus:
self._put_word(word)
def encode_text(storage: WordStorage, text: tuple) -> tuple:
if not isinstance(storage, WordStorage) or not isinstance(text, tuple):
raise ValueError
encoded_text = [storage.get_id(word) for word in text]
return tuple(encoded_text)
class NGramTextGenerator:
def __init__(self, word_storage: WordStorage, n_gram_trie: NGramTrie):
self._word_storage = word_storage
self._n_gram_trie = n_gram_trie
def _generate_next_word(self, context: tuple) -> int:
if not isinstance(context, tuple) or len(context) + 1 != self._n_gram_trie.size:
raise ValueError
top_word = ''
word_freq = 0
for n_gram, n_gram_freq in self._n_gram_trie.n_gram_frequencies.items():
if context == n_gram[:-1] and n_gram_freq > word_freq:
top_word = n_gram[-1]
word_freq = n_gram_freq
if not top_word:
top_word = max(self._n_gram_trie.uni_grams, key=self._n_gram_trie.uni_grams.get)[0]
return top_word
def _generate_sentence(self, context: tuple) -> tuple:
if not isinstance(context, tuple):
raise ValueError
sent = self.sent_is(context)
for _ in range(20):
sent.append(self._generate_next_word(context))
context = tuple(list(context) + sent)[-len(context):]
if sent[-1] == self._word_storage.get_id('<END>'):
return tuple(sent)
sent.append(self._word_storage.get_id('<END>'))
return tuple(sent)
def sent_is(self, context):
if context[-1] == self._word_storage.get_id('<END>'):
sent = []
else:
sent = list(context)
return sent
def generate_text(self, context: tuple, number_of_sentences: int) -> tuple:
if not isinstance(context, tuple) or not isinstance(number_of_sentences, int) \
or isinstance(number_of_sentences, bool):
raise ValueError
text = []
for _ in range(number_of_sentences):
sentence = self._generate_sentence(context)
text.extend(sentence)
context = tuple(text[-len(context):])
return tuple(text)
class LikelihoodBasedTextGenerator(NGramTextGenerator):
def _calculate_maximum_likelihood(self, word: int, context: tuple) -> float:
type_check = [isinstance(word, int),
isinstance(context, tuple)]
if not all(type_check) or word not in self._word_storage.storage.values() or \
len([wrd for wrd in context if wrd in self._word_storage.storage.values()]) != len(context):
raise ValueError
wrd_freq = 0
avrg_freq = 0
length = self._n_gram_trie.size - 1
for n_gram in self._n_gram_trie.n_grams:
if context == n_gram[:length]:
avrg_freq += 1
if word == n_gram[-1]:
wrd_freq += 1
try:
likelihood = wrd_freq / avrg_freq
except ZeroDivisionError:
likelihood = 0.0
return likelihood
def _generate_next_word(self, context: tuple) -> int:
if not isinstance(context, tuple) or \
len([w for w in context if w in self._word_storage.storage.values()]) != len(context):
raise ValueError
next_wrd = 0
word_freq = 0.0
for word in self._word_storage.storage.values():
frequency = self._calculate_maximum_likelihood(word, context)
if frequency > word_freq:
word_freq = frequency
next_wrd = word
next_word = self.if_not_freq(next_wrd, word_freq)
return next_word
def if_not_freq(self, next_wrd, word_freq):
if not word_freq:
next_wrd = max(self._n_gram_trie.uni_grams, key=self._n_gram_trie.uni_grams.get)[0]
return next_wrd
class BackOffGenerator(NGramTextGenerator):
def __init__(self, word_storage: WordStorage, n_gram_trie: NGramTrie, *args):
super().__init__(word_storage, n_gram_trie)
def _generate_next_word(self, context: tuple) -> int:
pass
def decode_text(storage: WordStorage, encoded_text: tuple) -> tuple:
if not isinstance(storage, WordStorage) or not isinstance(encoded_text, tuple) or not encoded_text:
raise ValueError
decoded_text = [[]]
for encoded_word in encoded_text:
decoded_word = storage.get_word(encoded_word)
if decoded_word == '<END>':
decoded_text.append([])
else:
decoded_text[-1].append(decoded_word)
decoded_text = [sentence[0][0].upper() + sentence[0][1:] + ' ' + ' '.join(sentence[1:])
for sentence in decoded_text if sentence]
return tuple(decoded_text)
def save_model(model: NGramTextGenerator, path_to_saved_model: str):
pass
def load_model(path_to_saved_model: str) -> NGramTextGenerator:
pass
| true | true |
f70fa93743ec576b39568159579d16544248a1ec | 83 | py | Python | conf/apps.py | invernoa/Conferences | 9e821948311dc9c28323ede8a26764899fc05255 | [
"MIT"
] | 41 | 2019-01-02T09:36:54.000Z | 2022-02-20T13:13:05.000Z | conf/apps.py | invernoa/Conferences | 9e821948311dc9c28323ede8a26764899fc05255 | [
"MIT"
] | 15 | 2019-09-30T05:40:20.000Z | 2022-02-17T19:28:41.000Z | conf/apps.py | invernoa/Conferences | 9e821948311dc9c28323ede8a26764899fc05255 | [
"MIT"
] | 23 | 2019-02-18T10:50:10.000Z | 2022-01-06T07:53:18.000Z | from django.apps import AppConfig
class ConfConfig(AppConfig):
name = 'conf'
| 13.833333 | 33 | 0.73494 | from django.apps import AppConfig
class ConfConfig(AppConfig):
name = 'conf'
| true | true |
f70facb47d95871efcad46d686499cc18bc105f0 | 937 | py | Python | Rendering/Core/Testing/Python/TestCgShader.py | collects/VTK | 004944f0d54df673c38b3d4016a4bee74fa7d813 | [
"BSD-3-Clause"
] | null | null | null | Rendering/Core/Testing/Python/TestCgShader.py | collects/VTK | 004944f0d54df673c38b3d4016a4bee74fa7d813 | [
"BSD-3-Clause"
] | null | null | null | Rendering/Core/Testing/Python/TestCgShader.py | collects/VTK | 004944f0d54df673c38b3d4016a4bee74fa7d813 | [
"BSD-3-Clause"
] | 2 | 2019-09-09T22:42:12.000Z | 2020-10-22T07:10:08.000Z | #!/usr/bin/env python
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer = vtk.vtkRenderer()
renWin.AddRenderer(renderer)
src1 = vtk.vtkSphereSource()
src1.SetRadius(5)
src1.SetPhiResolution(20)
src1.SetThetaResolution(20)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(src1.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Load the material. Here, we are loading a material
# defined in the Vtk Library. One can also specify
# a filename to a material description xml.
actor.GetProperty().LoadMaterial("CgTwisted")
# Turn shading on. Otherwise, shaders are not used.
actor.GetProperty().ShadingOn()
# Pass a shader variable need by CgTwisted.
actor.GetProperty().AddShaderVariable("Rate",1.0)
renderer.AddActor(actor)
renWin.Render()
renderer.GetActiveCamera().Azimuth(-50)
renderer.GetActiveCamera().Roll(70)
renWin.Render()
# --- end of script --
| 31.233333 | 52 | 0.781217 |
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer = vtk.vtkRenderer()
renWin.AddRenderer(renderer)
src1 = vtk.vtkSphereSource()
src1.SetRadius(5)
src1.SetPhiResolution(20)
src1.SetThetaResolution(20)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(src1.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().LoadMaterial("CgTwisted")
actor.GetProperty().ShadingOn()
actor.GetProperty().AddShaderVariable("Rate",1.0)
renderer.AddActor(actor)
renWin.Render()
renderer.GetActiveCamera().Azimuth(-50)
renderer.GetActiveCamera().Roll(70)
renWin.Render()
| true | true |
f70fad1a401b33614f837e3bfd9c10fa8a5570aa | 1,451 | py | Python | tests/lib/utils.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 532 | 2016-11-07T22:01:00.000Z | 2022-03-30T17:11:40.000Z | tests/lib/utils.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 528 | 2016-11-22T01:42:19.000Z | 2022-03-24T02:27:15.000Z | tests/lib/utils.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 169 | 2016-11-18T15:12:26.000Z | 2022-03-24T01:22:08.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module holds util functions that are used in more than one test module.
from mobly import records
def validate_test_result(result):
"""Validate basic properties of a test result.
The records in each bucket of the test result should have the corresponding
result enum.
Args:
result: The `records.TestResult` object to validate.
"""
buckets = [
(result.passed, records.TestResultEnums.TEST_RESULT_PASS),
(result.failed, records.TestResultEnums.TEST_RESULT_FAIL),
(result.error, records.TestResultEnums.TEST_RESULT_ERROR),
(result.skipped, records.TestResultEnums.TEST_RESULT_SKIP),
]
for bucket_list, expected_enum in buckets:
for record in bucket_list:
if record.result != expected_enum:
raise AssertionError('Expected result %s, got %s.' %
(expected_enum, record.result))
| 36.275 | 78 | 0.731909 |
from mobly import records
def validate_test_result(result):
buckets = [
(result.passed, records.TestResultEnums.TEST_RESULT_PASS),
(result.failed, records.TestResultEnums.TEST_RESULT_FAIL),
(result.error, records.TestResultEnums.TEST_RESULT_ERROR),
(result.skipped, records.TestResultEnums.TEST_RESULT_SKIP),
]
for bucket_list, expected_enum in buckets:
for record in bucket_list:
if record.result != expected_enum:
raise AssertionError('Expected result %s, got %s.' %
(expected_enum, record.result))
| true | true |
f70fad449b902120499a1dec1a4d6c495074a31f | 607 | py | Python | venv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/applications/resnet/__init__.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/applications/resnet/__init__.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/applications/resnet/__init__.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | 1 | 2020-06-28T11:47:47.000Z | 2020-06-28T11:47:47.000Z | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""ResNet models for Keras.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.applications.resnet import ResNet101
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
del _print_function
| 35.705882 | 82 | 0.84514 |
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.applications.resnet import ResNet101
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
del _print_function
| true | true |
f70faf3f1a8e372280b0a707e88d882149db4909 | 3,167 | py | Python | batch_netmeta.py | gesiscss/Homophilic_Directed_ScaleFree_Networks | a0d27b44eaafdda46b6d3379859fa428398ef476 | [
"Apache-2.0"
] | 1 | 2022-03-23T15:34:38.000Z | 2022-03-23T15:34:38.000Z | batch_netmeta.py | gesiscss/Homophilic_Directed_ScaleFree_Networks | a0d27b44eaafdda46b6d3379859fa428398ef476 | [
"Apache-2.0"
] | 2 | 2019-02-02T13:54:53.000Z | 2019-02-04T09:15:51.000Z | batch_netmeta.py | gesiscss/Homophilic_Directed_ScaleFree_Networks | a0d27b44eaafdda46b6d3379859fa428398ef476 | [
"Apache-2.0"
] | null | null | null | ################################################################
# System's dependencies
################################################################
import os
import sys
import time
import argparse
################################################################
# Local dependencies
################################################################
from org.gesis.lib import io
from org.gesis.lib import graph
from org.gesis.lib import homophily
################################################################
# Constants
################################################################
DATASETS = ['aps','hate','blogs','wikipedia']
################################################################
# Main
################################################################
def run(datapath, dataset, steps, njobs, output):
if dataset not in DATASETS:
raise Exception("dataset " + dataset +" does not exist.")
print(dataset, steps, njobs)
g = graph.get_graph(datapath, dataset)
N, fm, d, plo_M, plo_m, pli_M, pli_m, EMM, EMm, EmM, Emm, hMM, hmm, _N, _d, _mindiff = homophily.get_metadata(g, steps,
njobs=njobs, verbose=True, seed=None)
print("N:{}".format(N))
print("fm:{}".format(fm))
print("d:{}".format(d))
print("plo_M:{}".format(plo_M))
print("plo_m:{}".format(plo_m))
print("pli_M:{}".format(pli_M))
print("pli_m:{}".format(pli_m))
print("EMM:{}".format(EMM))
print("EMm:{}".format(EMm))
print("EmM:{}".format(EmM))
print("Emm:{}".format(Emm))
print("hMM:{}".format(hMM))
print("hmm:{}".format(hmm))
print("_N:{}".format(_N))
print("_d:{}".format(_d))
print("_mindiff:{}".format(_mindiff))
### Storing metadata info into .csv file
t1 = "dataset,N,fm,d,plo_M,plo_m,pli_M,pli_m,EMM,EMm,EmM,Emm,hMM,hmm,_N,_d,_mindiff"
t2 = ",".join([dataset, str(N), str(fm), str(d), str(plo_M), str(plo_m), str(pli_M), str(pli_m),
str(EMM), str(EMm), str(EmM), str(Emm), str(hMM), str(hmm), str(_N), str(_d), str(_mindiff)])
path = os.path.join(output,dataset,"network_metadata.csv")
io.save_text("{}\n{}".format(t1,t2), path)
################################################################
# Main
################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", help=",".join(DATASETS), type=str, required=True)
parser.add_argument("--steps", help="decimals (eg. 0.01, 0.05) to compute homophily", type=float, required=True)
parser.add_argument("--njobs", help="parallel jobs", type=int, default=1)
parser.add_argument("--datapath", help="path/folder where the .gpickle files are.", type=str, required=True)
parser.add_argument("--output", help="path/folder where to store csv file", type=str, default='.')
args = parser.parse_args()
start_time = time.time()
run(args.datapath, args.dataset, args.steps, args.njobs, args.output)
print("--- %s seconds ---" % (time.time() - start_time)) | 43.383562 | 128 | 0.487843 | MM), str(hmm), str(_N), str(_d), str(_mindiff)])
path = os.path.join(output,dataset,"network_metadata.csv")
io.save_text("{}\n{}".format(t1,t2), path)
################################################################
# Main
################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", help=",".join(DATASETS), type=str, required=True)
parser.add_argument("--steps", help="decimals (eg. 0.01, 0.05) to compute homophily", type=float, required=True)
parser.add_argument("--njobs", help="parallel jobs", type=int, default=1)
parser.add_argument("--datapath", help="path/folder where the .gpickle files are.", type=str, required=True)
parser.add_argument("--output", help="path/folder where to store csv file", type=str, default='.')
args = parser.parse_args()
start_time = time.time()
run(args.datapath, args.dataset, args.steps, args.njobs, args.output)
print("--- %s seconds ---" % (time.time() - start_time)) | true | true |
f70fb01bb8bfa089b4844ff0143d97c5770f6f22 | 2,502 | py | Python | scripts/rpc/__init__.py | kacperg/spdk | 4906323d47b1bf5290152e85b9a6fac1970cdfed | [
"BSD-3-Clause"
] | null | null | null | scripts/rpc/__init__.py | kacperg/spdk | 4906323d47b1bf5290152e85b9a6fac1970cdfed | [
"BSD-3-Clause"
] | null | null | null | scripts/rpc/__init__.py | kacperg/spdk | 4906323d47b1bf5290152e85b9a6fac1970cdfed | [
"BSD-3-Clause"
] | null | null | null | import json
import sys
from . import app
from . import bdev
from . import iscsi
from . import log
from . import lvol
from . import nbd
from . import net
from . import nvmf
from . import pmem
from . import subsystem
from . import vhost
def start_subsystem_init(client):
return client.call('start_subsystem_init')
def get_rpc_methods(client, args):
params = {}
if args.current:
params['current'] = args.current
return client.call('get_rpc_methods', params)
def save_config(client, args):
config = {
'subsystems': []
}
for elem in client.call('get_subsystems'):
cfg = {
'subsystem': elem['subsystem'],
'config': client.call('get_subsystem_config', {"name": elem['subsystem']})
}
config['subsystems'].append(cfg)
indent = args.indent
if args.filename is None:
if indent is None:
indent = 2
elif indent < 0:
indent = None
json.dump(config, sys.stdout, indent=indent)
sys.stdout.write('\n')
else:
if indent is None or indent < 0:
indent = None
with open(args.filename, 'w') as file:
json.dump(config, file, indent=indent)
file.write('\n')
def load_config(client, args):
if not args.filename or args.filename == '-':
json_config = json.load(sys.stdin)
else:
with open(args.filename, 'r') as file:
json_config = json.load(file)
subsystems = json_config['subsystems']
while subsystems:
allowed_methods = client.call('get_rpc_methods', {'current': True})
allowed_found = False
for subsystem in list(subsystems):
if not subsystem['config']:
subsystems.remove(subsystem)
continue
config = subsystem['config']
for elem in list(config):
if not elem or 'method' not in elem or elem['method'] not in allowed_methods:
continue
client.call(elem['method'], elem['params'])
config.remove(elem)
allowed_found = True
if not config:
subsystems.remove(subsystem)
if 'start_subsystem_init' in allowed_methods:
client.call('start_subsystem_init')
allowed_found = True
if subsystems and not allowed_found:
raise JSONRPCException("Some config left but did not found any allowed method to execute")
| 26.903226 | 102 | 0.593525 | import json
import sys
from . import app
from . import bdev
from . import iscsi
from . import log
from . import lvol
from . import nbd
from . import net
from . import nvmf
from . import pmem
from . import subsystem
from . import vhost
def start_subsystem_init(client):
return client.call('start_subsystem_init')
def get_rpc_methods(client, args):
params = {}
if args.current:
params['current'] = args.current
return client.call('get_rpc_methods', params)
def save_config(client, args):
config = {
'subsystems': []
}
for elem in client.call('get_subsystems'):
cfg = {
'subsystem': elem['subsystem'],
'config': client.call('get_subsystem_config', {"name": elem['subsystem']})
}
config['subsystems'].append(cfg)
indent = args.indent
if args.filename is None:
if indent is None:
indent = 2
elif indent < 0:
indent = None
json.dump(config, sys.stdout, indent=indent)
sys.stdout.write('\n')
else:
if indent is None or indent < 0:
indent = None
with open(args.filename, 'w') as file:
json.dump(config, file, indent=indent)
file.write('\n')
def load_config(client, args):
if not args.filename or args.filename == '-':
json_config = json.load(sys.stdin)
else:
with open(args.filename, 'r') as file:
json_config = json.load(file)
subsystems = json_config['subsystems']
while subsystems:
allowed_methods = client.call('get_rpc_methods', {'current': True})
allowed_found = False
for subsystem in list(subsystems):
if not subsystem['config']:
subsystems.remove(subsystem)
continue
config = subsystem['config']
for elem in list(config):
if not elem or 'method' not in elem or elem['method'] not in allowed_methods:
continue
client.call(elem['method'], elem['params'])
config.remove(elem)
allowed_found = True
if not config:
subsystems.remove(subsystem)
if 'start_subsystem_init' in allowed_methods:
client.call('start_subsystem_init')
allowed_found = True
if subsystems and not allowed_found:
raise JSONRPCException("Some config left but did not found any allowed method to execute")
| true | true |
f70fb0261784177b0d3669369a858adf97f9b9a9 | 5,250 | py | Python | practices/week6/assignment_exercise_3.py | andreyyec/Texas_Tech_AI | e4e8e41c65b41a1a684f1f65d21cf5427abdb046 | [
"MIT"
] | null | null | null | practices/week6/assignment_exercise_3.py | andreyyec/Texas_Tech_AI | e4e8e41c65b41a1a684f1f65d21cf5427abdb046 | [
"MIT"
] | 5 | 2020-01-28T22:57:31.000Z | 2022-02-10T00:37:58.000Z | practices/week6/assignment_exercise_3.py | andreyyec/Texas_Tech_AI | e4e8e41c65b41a1a684f1f65d21cf5427abdb046 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import sklearn.metrics
import matplotlib.pyplot as plt
# Load the training and test data from the Pickle file
with open("../datasets/credit_card_default_dataset.pickle", "rb") as f:
train_data, train_labels, test_data, test_labels = pickle.load(f)
# Get some lengths
n_inputs = train_data.shape[1]
nsamples = train_data.shape[0]
# Training constants
n_nodes_l1 = 5
batch_size = 32
learning_rate = .001 # Initial rate for Adam
n_epochs = 1000
eval_step = 5
n_batches = int(np.ceil(nsamples / batch_size))
# Print the configuration
print("Batch size: {} Num batches: {} Num epochs: {} Learning rate: {}".format(batch_size, n_batches, n_epochs, learning_rate))
print("Num nodes in L1: {} Activation function: ELU".format(n_nodes_l1))
# TensorFlow constants
# Input vector placeholders. Length is unspecified.
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
Y = tf.placeholder(tf.float32, shape=(None, 1), name="Y")
# Hidden layer 1:
# Inputs: n_inputs
# Outputs: n_nodes_l1
# Activation: ELU
W_L1 = tf.Variable(tf.truncated_normal([n_inputs, n_nodes_l1], stddev=2/np.sqrt(n_inputs)))
b_L1 = tf.Variable(tf.zeros(n_nodes_l1))
Y_L1 = tf.nn.elu(tf.add(tf.matmul(X, W_L1), b_L1))
#Y_L1 = tf.nn.relu(tf.add(tf.matmul(X, W_L1), b_L1))
# Output layer:
# Inputs: n_nodes_l1
# Outputs: 1
# Activation: logistic
W_L2 = tf.Variable(tf.truncated_normal([n_nodes_l1, 1], stddev=1/np.sqrt(n_nodes_l1)))
b_L2 = tf.Variable(tf.zeros(1))
Y_L2_linear = tf.add(tf.matmul(Y_L1, W_L2), b_L2)
# Cost function, plus the sigmoid part of the prediction
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = Y_L2_linear, labels = Y))
# Optimize cost through gradient descent
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
update_op = optimizer.minimize(cost)
# Prediction probability values
Y_pred_proba_calc = tf.nn.sigmoid(Y_L2_linear)
# Create TensorFlow session and initialize it
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Initialize lists to hold the history of metrics per epoch
trn_cost_hist = []
test_cost_hist = []
trn_auroc_hist = []
test_auroc_hist = []
epoch = 0
while epoch < n_epochs:
batch = 0
# Save a vector of cost values per batch
cost_vals = np.zeros(n_batches)
while batch < n_batches:
# Select the data for the next batch
dataidx = batch * batch_size
X_batch = train_data[dataidx:(dataidx+batch_size)]
Y_batch = train_labels[dataidx:(dataidx+batch_size)].values.reshape(-1,1)
feed_dict = {X: X_batch, Y: Y_batch}
# Run one iteration of the computation session to update coefficients
_, cost_vals[batch] = sess.run([update_op, cost], feed_dict=feed_dict)
batch += 1
# Evaluate and print the results so far
if (epoch % eval_step == 0):
# Compute the average cost for all mini-batches in this epoch
trn_cost_avg = np.mean(cost_vals)
# Compute the ROC AUC against the full training data
feed_dict = {X: train_data, Y: train_labels.values.reshape(-1,1)}
Y_pred_proba_train = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)
train_auroc = sklearn.metrics.roc_auc_score(train_labels, Y_pred_proba_train)
# Compute the cost and ROC AUC against the test data
feed_dict = {X: test_data, Y: test_labels.values.reshape(-1,1)}
Y_pred_proba_test = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)
test_cost = sess.run(cost, feed_dict=feed_dict)
test_auroc = sklearn.metrics.roc_auc_score(test_labels, Y_pred_proba_test)
print("Epoch: {:4d} trn_cost: {:.5f} test_cost: {:.5f} trn_auroc: {:.4f} test_auroc: {:.4f}".\
format(epoch, trn_cost_avg, test_cost, train_auroc, test_auroc))
# Save the metrics to the history
trn_cost_hist.append(trn_cost_avg)
test_cost_hist.append(test_cost)
trn_auroc_hist.append(train_auroc)
test_auroc_hist.append(test_auroc)
epoch += 1
# Print the best results (as if we had done early stopping)
epoch_hist = [i for i in range(0, n_epochs, eval_step)]
best_idx = test_auroc_hist.index(max(test_auroc_hist))
print("Max test ROC AUC: {:.4f} at epoch: {}".format(test_auroc_hist[best_idx], epoch_hist[best_idx]))
best_idx = trn_auroc_hist.index(max(trn_auroc_hist))
print("Max train ROC AUC: {:.4f} at epoch: {}".format(trn_auroc_hist[best_idx], epoch_hist[best_idx]))
best_idx = test_cost_hist.index(min(test_cost_hist))
print("Min test cost: {:.5f} at epoch: {}".format(test_cost_hist[best_idx], epoch_hist[best_idx]))
best_idx = trn_cost_hist.index(min(trn_cost_hist))
print("Min train cost: {:.5f} at epoch: {}".format(trn_cost_hist[best_idx], epoch_hist[best_idx]))
# Plot the metrics history
plt.plot(epoch_hist, trn_cost_hist, "b")
plt.plot(epoch_hist, test_cost_hist, "r")
plt.xlabel("epoch")
plt.ylabel("cost")
plt.title("Cost vs. epoch")
plt.figure()
plt.plot(epoch_hist, trn_auroc_hist, "b")
plt.plot(epoch_hist, test_auroc_hist, "r")
plt.xlabel("epoch")
plt.ylabel("ROC AUC")
plt.title("ROC AUC vs. epoch")
plt.show()
| 35 | 127 | 0.719238 | import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import sklearn.metrics
import matplotlib.pyplot as plt
with open("../datasets/credit_card_default_dataset.pickle", "rb") as f:
train_data, train_labels, test_data, test_labels = pickle.load(f)
n_inputs = train_data.shape[1]
nsamples = train_data.shape[0]
n_nodes_l1 = 5
batch_size = 32
learning_rate = .001
n_epochs = 1000
eval_step = 5
n_batches = int(np.ceil(nsamples / batch_size))
print("Batch size: {} Num batches: {} Num epochs: {} Learning rate: {}".format(batch_size, n_batches, n_epochs, learning_rate))
print("Num nodes in L1: {} Activation function: ELU".format(n_nodes_l1))
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
Y = tf.placeholder(tf.float32, shape=(None, 1), name="Y")
W_L1 = tf.Variable(tf.truncated_normal([n_inputs, n_nodes_l1], stddev=2/np.sqrt(n_inputs)))
b_L1 = tf.Variable(tf.zeros(n_nodes_l1))
Y_L1 = tf.nn.elu(tf.add(tf.matmul(X, W_L1), b_L1))
W_L2 = tf.Variable(tf.truncated_normal([n_nodes_l1, 1], stddev=1/np.sqrt(n_nodes_l1)))
b_L2 = tf.Variable(tf.zeros(1))
Y_L2_linear = tf.add(tf.matmul(Y_L1, W_L2), b_L2)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = Y_L2_linear, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate)
update_op = optimizer.minimize(cost)
Y_pred_proba_calc = tf.nn.sigmoid(Y_L2_linear)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
trn_cost_hist = []
test_cost_hist = []
trn_auroc_hist = []
test_auroc_hist = []
epoch = 0
while epoch < n_epochs:
batch = 0
cost_vals = np.zeros(n_batches)
while batch < n_batches:
dataidx = batch * batch_size
X_batch = train_data[dataidx:(dataidx+batch_size)]
Y_batch = train_labels[dataidx:(dataidx+batch_size)].values.reshape(-1,1)
feed_dict = {X: X_batch, Y: Y_batch}
_, cost_vals[batch] = sess.run([update_op, cost], feed_dict=feed_dict)
batch += 1
if (epoch % eval_step == 0):
trn_cost_avg = np.mean(cost_vals)
feed_dict = {X: train_data, Y: train_labels.values.reshape(-1,1)}
Y_pred_proba_train = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)
train_auroc = sklearn.metrics.roc_auc_score(train_labels, Y_pred_proba_train)
feed_dict = {X: test_data, Y: test_labels.values.reshape(-1,1)}
Y_pred_proba_test = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)
test_cost = sess.run(cost, feed_dict=feed_dict)
test_auroc = sklearn.metrics.roc_auc_score(test_labels, Y_pred_proba_test)
print("Epoch: {:4d} trn_cost: {:.5f} test_cost: {:.5f} trn_auroc: {:.4f} test_auroc: {:.4f}".\
format(epoch, trn_cost_avg, test_cost, train_auroc, test_auroc))
trn_cost_hist.append(trn_cost_avg)
test_cost_hist.append(test_cost)
trn_auroc_hist.append(train_auroc)
test_auroc_hist.append(test_auroc)
epoch += 1
epoch_hist = [i for i in range(0, n_epochs, eval_step)]
best_idx = test_auroc_hist.index(max(test_auroc_hist))
print("Max test ROC AUC: {:.4f} at epoch: {}".format(test_auroc_hist[best_idx], epoch_hist[best_idx]))
best_idx = trn_auroc_hist.index(max(trn_auroc_hist))
print("Max train ROC AUC: {:.4f} at epoch: {}".format(trn_auroc_hist[best_idx], epoch_hist[best_idx]))
best_idx = test_cost_hist.index(min(test_cost_hist))
print("Min test cost: {:.5f} at epoch: {}".format(test_cost_hist[best_idx], epoch_hist[best_idx]))
best_idx = trn_cost_hist.index(min(trn_cost_hist))
print("Min train cost: {:.5f} at epoch: {}".format(trn_cost_hist[best_idx], epoch_hist[best_idx]))
plt.plot(epoch_hist, trn_cost_hist, "b")
plt.plot(epoch_hist, test_cost_hist, "r")
plt.xlabel("epoch")
plt.ylabel("cost")
plt.title("Cost vs. epoch")
plt.figure()
plt.plot(epoch_hist, trn_auroc_hist, "b")
plt.plot(epoch_hist, test_auroc_hist, "r")
plt.xlabel("epoch")
plt.ylabel("ROC AUC")
plt.title("ROC AUC vs. epoch")
plt.show()
| true | true |
f70fb188e60fc3f2ee8fa90f4379d87b3fd93cae | 169 | py | Python | src/user/views/__init__.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/user/views/__init__.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/user/views/__init__.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | # flake8: noqa
from user.views.user_views import *
from user.views.gatekeeper_view import GatekeeperViewSet
from user.views.organization_view import OrganizationViewSet
| 33.8 | 60 | 0.857988 |
from user.views.user_views import *
from user.views.gatekeeper_view import GatekeeperViewSet
from user.views.organization_view import OrganizationViewSet
| true | true |
f70fb191003034cfa869b208b1b4a32aa36da6d7 | 4,198 | py | Python | python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_spec.py | JPeer264/dagster-fork | 32cc87a36134be7c442fa85d6867eb1d3301aea0 | [
"Apache-2.0"
] | 1 | 2020-09-19T16:35:59.000Z | 2020-09-19T16:35:59.000Z | python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_spec.py | JPeer264/dagster-fork | 32cc87a36134be7c442fa85d6867eb1d3301aea0 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_spec.py | JPeer264/dagster-fork | 32cc87a36134be7c442fa85d6867eb1d3301aea0 | [
"Apache-2.0"
] | null | null | null | import pytest
from dagster import DagsterInvalidConfigDefinitionError, Noneable, Selector, execute_solid, solid
def test_kitchen_sink():
@solid(
config_schema={
'str_field': str,
'int_field': int,
'list_int': [int],
'list_list_int': [[int]],
'dict_field': {'a_string': str},
'list_dict_field': [{'an_int': int}],
'selector_of_things': Selector(
{'select_list_dict_field': [{'an_int': int}], 'select_int': int}
),
# this is a good argument to use () instead of [] for type parameterization in
# the config system
'optional_list_of_optional_string': Noneable([Noneable(str)]),
}
)
def kitchen_sink(context):
return context.solid_config
solid_config_one = {
'str_field': 'kjf',
'int_field': 2,
'list_int': [3],
'list_list_int': [[1], [2, 3]],
'dict_field': {'a_string': 'kdjfkd'},
'list_dict_field': [{'an_int': 2}, {'an_int': 4}],
'selector_of_things': {'select_int': 3},
'optional_list_of_optional_string': ['foo', None],
}
assert (
execute_solid(
kitchen_sink, run_config={'solids': {'kitchen_sink': {'config': solid_config_one}}},
).output_value()
== solid_config_one
)
solid_config_two = {
'str_field': 'kjf',
'int_field': 2,
'list_int': [3],
'list_list_int': [[1], [2, 3]],
'dict_field': {'a_string': 'kdjfkd'},
'list_dict_field': [{'an_int': 2}, {'an_int': 4}],
'selector_of_things': {'select_list_dict_field': [{'an_int': 5}]},
'optional_list_of_optional_string': None,
}
assert (
execute_solid(
kitchen_sink, run_config={'solids': {'kitchen_sink': {'config': solid_config_two}}},
).output_value()
== solid_config_two
)
def test_bad_solid_config_argument():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config='dkjfkd')
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: 'dkjfkd'. 'dkjfkd' cannot be resolved."
)
def test_bad_solid_config_argument_nested():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'field': 'kdjkfjd'})
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'field': 'kdjkfjd'}. "
"Error at stack path :field. 'kdjkfjd' cannot be resolved."
)
def test_bad_solid_config_argument_list_wrong_length():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_list': []})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': []}. "
"Error at stack path :bad_list. [] cannot be resolved. "
"Reason: List must be of length 1."
)
def test_bad_solid_config_argument_list_bad_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_list': ['kdjfkd']})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': ['kdjfkd']}. "
"Error at stack path :bad_list. ['kdjfkd'] cannot be resolved. "
"Reason: List have a single item and contain a valid type i.e. [int]. "
"Got item 'kdjfkd'."
)
def test_bad_solid_config_argument_list_bad_nested_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_nested_list': [{'bad_field': 'kjdkfd'}]})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_nested_list': "
"[{'bad_field': 'kjdkfd'}]}. Error at stack path "
":bad_nested_list:bad_field. 'kjdkfd' cannot be resolved."
)
| 32.045802 | 97 | 0.610052 | import pytest
from dagster import DagsterInvalidConfigDefinitionError, Noneable, Selector, execute_solid, solid
def test_kitchen_sink():
@solid(
config_schema={
'str_field': str,
'int_field': int,
'list_int': [int],
'list_list_int': [[int]],
'dict_field': {'a_string': str},
'list_dict_field': [{'an_int': int}],
'selector_of_things': Selector(
{'select_list_dict_field': [{'an_int': int}], 'select_int': int}
),
'optional_list_of_optional_string': Noneable([Noneable(str)]),
}
)
def kitchen_sink(context):
return context.solid_config
solid_config_one = {
'str_field': 'kjf',
'int_field': 2,
'list_int': [3],
'list_list_int': [[1], [2, 3]],
'dict_field': {'a_string': 'kdjfkd'},
'list_dict_field': [{'an_int': 2}, {'an_int': 4}],
'selector_of_things': {'select_int': 3},
'optional_list_of_optional_string': ['foo', None],
}
assert (
execute_solid(
kitchen_sink, run_config={'solids': {'kitchen_sink': {'config': solid_config_one}}},
).output_value()
== solid_config_one
)
solid_config_two = {
'str_field': 'kjf',
'int_field': 2,
'list_int': [3],
'list_list_int': [[1], [2, 3]],
'dict_field': {'a_string': 'kdjfkd'},
'list_dict_field': [{'an_int': 2}, {'an_int': 4}],
'selector_of_things': {'select_list_dict_field': [{'an_int': 5}]},
'optional_list_of_optional_string': None,
}
assert (
execute_solid(
kitchen_sink, run_config={'solids': {'kitchen_sink': {'config': solid_config_two}}},
).output_value()
== solid_config_two
)
def test_bad_solid_config_argument():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config='dkjfkd')
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: 'dkjfkd'. 'dkjfkd' cannot be resolved."
)
def test_bad_solid_config_argument_nested():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'field': 'kdjkfjd'})
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'field': 'kdjkfjd'}. "
"Error at stack path :field. 'kdjkfjd' cannot be resolved."
)
def test_bad_solid_config_argument_list_wrong_length():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_list': []})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': []}. "
"Error at stack path :bad_list. [] cannot be resolved. "
"Reason: List must be of length 1."
)
def test_bad_solid_config_argument_list_bad_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_list': ['kdjfkd']})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': ['kdjfkd']}. "
"Error at stack path :bad_list. ['kdjfkd'] cannot be resolved. "
"Reason: List have a single item and contain a valid type i.e. [int]. "
"Got item 'kdjfkd'."
)
def test_bad_solid_config_argument_list_bad_nested_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_nested_list': [{'bad_field': 'kjdkfd'}]})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_nested_list': "
"[{'bad_field': 'kjdkfd'}]}. Error at stack path "
":bad_nested_list:bad_field. 'kjdkfd' cannot be resolved."
)
| true | true |
f70fb27cfab9dd0b2e6b5adae0010670d24a0187 | 1,579 | py | Python | test_Calculator/testing/test_cal_plus.py | XuXuClassMate/My_Test_PyProject | 5822455af47f5855d1db4c388c2c973c440a4d3f | [
"Apache-2.0"
] | null | null | null | test_Calculator/testing/test_cal_plus.py | XuXuClassMate/My_Test_PyProject | 5822455af47f5855d1db4c388c2c973c440a4d3f | [
"Apache-2.0"
] | null | null | null | test_Calculator/testing/test_cal_plus.py | XuXuClassMate/My_Test_PyProject | 5822455af47f5855d1db4c388c2c973c440a4d3f | [
"Apache-2.0"
] | null | null | null | """
1、case顺序:加-除-减-乘
2、fixture方法在case前打印【开始计算】,结束后打印【计算结束】
3、fixture方法存在在conftest.py,设置scope=module
4、控制case只执行顺序为:加-减-乘-除
5、结合allure生成本地测试报告
"""
import allure
import pytest
import yaml
from test_Calculator.src.calculator import Calculator
def get_data():
with open('./data.yml') as data_x:
data = yaml.safe_load(data_x)
data_data = data['datas']
data_name = data['ids']
return [data_data, data_name]
data = get_data()
get_cal = Calculator()
@pytest.mark.feature("测试方法")
class Test_Calculator:
@pytest.mark.story('加法测试')
@pytest.mark.run(order=0)
@pytest.mark.usefixtures("prints")
@pytest.mark.parametrize("a, b, result", data[0]['data_add'], ids=data[1]['ids_add'])
def test_add(self, a, b, result):
assert get_cal.add(a, b) == result
@pytest.mark.story('除法测试')
@pytest.mark.run(order=3)
@pytest.mark.parametrize("a, b, result", data[0]['data_div'], ids=data[1]['ids_div'])
def test_div(self, a, b, result):
assert get_cal.div(a, b) == result
@pytest.mark.story('减法测试')
@pytest.mark.run(order=1)
@pytest.mark.parametrize("a, b, result", data[0]['data_sub'], ids=data[1]['ids_sub'])
def test_sub(self, a, b, result):
assert get_cal.sub(a, b) == result
@pytest.mark.story('乘法测试')
@pytest.mark.run(order=2)
@pytest.mark.parametrize("a, b, result", data[0]['data_mul'], ids=data[1]['ids_mul'])
def test_mul(self, a, b, result):
assert get_cal.mul(a, b) == result
if __name__ == '__main__':
pytest.main('test_cal_plus.py', '-vs')
| 27.701754 | 89 | 0.645345 | import allure
import pytest
import yaml
from test_Calculator.src.calculator import Calculator
def get_data():
with open('./data.yml') as data_x:
data = yaml.safe_load(data_x)
data_data = data['datas']
data_name = data['ids']
return [data_data, data_name]
data = get_data()
get_cal = Calculator()
@pytest.mark.feature("测试方法")
class Test_Calculator:
@pytest.mark.story('加法测试')
@pytest.mark.run(order=0)
@pytest.mark.usefixtures("prints")
@pytest.mark.parametrize("a, b, result", data[0]['data_add'], ids=data[1]['ids_add'])
def test_add(self, a, b, result):
assert get_cal.add(a, b) == result
@pytest.mark.story('除法测试')
@pytest.mark.run(order=3)
@pytest.mark.parametrize("a, b, result", data[0]['data_div'], ids=data[1]['ids_div'])
def test_div(self, a, b, result):
assert get_cal.div(a, b) == result
@pytest.mark.story('减法测试')
@pytest.mark.run(order=1)
@pytest.mark.parametrize("a, b, result", data[0]['data_sub'], ids=data[1]['ids_sub'])
def test_sub(self, a, b, result):
assert get_cal.sub(a, b) == result
@pytest.mark.story('乘法测试')
@pytest.mark.run(order=2)
@pytest.mark.parametrize("a, b, result", data[0]['data_mul'], ids=data[1]['ids_mul'])
def test_mul(self, a, b, result):
assert get_cal.mul(a, b) == result
if __name__ == '__main__':
pytest.main('test_cal_plus.py', '-vs')
| true | true |
f70fb2c5d6c94d72c11d58d67c3da8ca3e2648c3 | 2,641 | py | Python | siamesenetwork/siamesePreTrainedEmbeddings.py | pengfei99/openfood | 2b65af02ce34bf8193d357ef3661da749d2d9671 | [
"MIT"
] | 2 | 2021-09-13T14:46:24.000Z | 2021-09-13T14:46:35.000Z | siamesenetwork/siamesePreTrainedEmbeddings.py | pengfei99/openfood | 2b65af02ce34bf8193d357ef3661da749d2d9671 | [
"MIT"
] | null | null | null | siamesenetwork/siamesePreTrainedEmbeddings.py | pengfei99/openfood | 2b65af02ce34bf8193d357ef3661da749d2d9671 | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Define the siamese network for one-shot learning,
for french short labels
02/06/2021
@author: milena-git, from jeremylhour courtesy
"""
import torch
import torch.nn as nn
def _createEmbeddingLayer(weights_matrix, non_trainable=False):
"""
_createEmbeddingLayer:
create a layer from pre-trained embeddings
@param weights_matrix (np.array):
@param non_trainable (bool):
"""
weights_matrix = torch.tensor(weights_matrix)
num_embeddings, embedding_dim = weights_matrix.size()
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({'weight': weights_matrix})
if non_trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
class SiamesePreTrainedQuadruplet(nn.Module):
def __init__(self, weights_matrix, length, dim=100):
"""
Initialize the siamese network with pre-trained embeddings
@param weights_matrix (torch.tensor):
@param length (int): longueur des inputs
@param dim (int): dimension of the output embedding space
"""
super(SiamesePreTrainedQuadruplet, self).__init__()
self.dim = dim
self.length = length
self.embedding = nn.Embedding.from_pretrained(weights_matrix, padding_idx=0)
self.fc1 = nn.Sequential(
nn.Linear(self.length * weights_matrix.size()[1], 1000),
nn.ReLU(inplace=True),
nn.Linear(1000, 800),
nn.Dropout(0.2),
nn.Linear(800, 500),
nn.Dropout(0.2),
nn.Linear(500, self.dim)
)
def forward_once(self, x):
"""
Run one of the network on a single image
@param x (): img output from SiameseNetworkDataset
"""
embedded = self.embedding(x)
embedded = torch.reshape(embedded, (embedded.size()[0], embedded.size()[1] * embedded.size()[2]))
output = self.fc1(embedded)
return output
def forward(self, anchor, positive, negative1, negative2):
"""
Run the model forward, by applying forward_once to each inputs
Main forward that is used during train, wraps forward_once().
@param anchor, positive, negative1, negative2 (): output from SiameseNetworkDataset
"""
anchor_o, positive_o, negative1_o, negative2_o = self.forward_once(anchor), self.forward_once(
positive), self.forward_once(negative1), self.forward_once(negative2)
return anchor_o, positive_o, negative1_o, negative2_o
if __name__ == '__main__':
pass
| 33.43038 | 105 | 0.658463 |
import torch
import torch.nn as nn
def _createEmbeddingLayer(weights_matrix, non_trainable=False):
weights_matrix = torch.tensor(weights_matrix)
num_embeddings, embedding_dim = weights_matrix.size()
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({'weight': weights_matrix})
if non_trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
class SiamesePreTrainedQuadruplet(nn.Module):
def __init__(self, weights_matrix, length, dim=100):
super(SiamesePreTrainedQuadruplet, self).__init__()
self.dim = dim
self.length = length
self.embedding = nn.Embedding.from_pretrained(weights_matrix, padding_idx=0)
self.fc1 = nn.Sequential(
nn.Linear(self.length * weights_matrix.size()[1], 1000),
nn.ReLU(inplace=True),
nn.Linear(1000, 800),
nn.Dropout(0.2),
nn.Linear(800, 500),
nn.Dropout(0.2),
nn.Linear(500, self.dim)
)
def forward_once(self, x):
embedded = self.embedding(x)
embedded = torch.reshape(embedded, (embedded.size()[0], embedded.size()[1] * embedded.size()[2]))
output = self.fc1(embedded)
return output
def forward(self, anchor, positive, negative1, negative2):
anchor_o, positive_o, negative1_o, negative2_o = self.forward_once(anchor), self.forward_once(
positive), self.forward_once(negative1), self.forward_once(negative2)
return anchor_o, positive_o, negative1_o, negative2_o
if __name__ == '__main__':
pass
| true | true |
f70fb3476d36e16a6599d538e8c7c982416ef57c | 2,165 | py | Python | test/PR_test/unit_test/op/numpyop/univariate/test_autocontrast.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | 57 | 2019-05-21T21:29:26.000Z | 2022-02-23T05:55:21.000Z | test/PR_test/unit_test/op/numpyop/univariate/test_autocontrast.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 93 | 2019-05-23T18:36:07.000Z | 2022-03-23T17:15:55.000Z | test/PR_test/unit_test/op/numpyop/univariate/test_autocontrast.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 47 | 2019-05-09T15:41:37.000Z | 2022-03-26T17:00:08.000Z | # Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
from fastestimator.op.numpyop.univariate import AutoContrast
class TestAutoContrast(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.single_input = [np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)]
cls.single_output_shape = (28, 28, 3)
cls.multi_input = [
np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8),
np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)
]
cls.multi_output_shape = (28, 28, 3)
def test_single_input(self):
autocontrast = AutoContrast(inputs='x', outputs='x')
output = autocontrast.forward(data=self.single_input, state={})
with self.subTest('Check output type'):
self.assertEqual(type(output), list)
with self.subTest('Check output image shape'):
self.assertEqual(output[0].shape, self.single_output_shape)
def test_multi_input(self):
autocontrast = AutoContrast(inputs='x', outputs='x')
output = autocontrast.forward(data=self.multi_input, state={})
with self.subTest('Check output type'):
self.assertEqual(type(output), list)
with self.subTest('Check output list length'):
self.assertEqual(len(output), 2)
for img_output in output:
with self.subTest('Check output image shape'):
self.assertEqual(img_output.shape, self.multi_output_shape)
| 42.45098 | 89 | 0.655427 |
import unittest
import numpy as np
from fastestimator.op.numpyop.univariate import AutoContrast
class TestAutoContrast(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.single_input = [np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)]
cls.single_output_shape = (28, 28, 3)
cls.multi_input = [
np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8),
np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)
]
cls.multi_output_shape = (28, 28, 3)
def test_single_input(self):
autocontrast = AutoContrast(inputs='x', outputs='x')
output = autocontrast.forward(data=self.single_input, state={})
with self.subTest('Check output type'):
self.assertEqual(type(output), list)
with self.subTest('Check output image shape'):
self.assertEqual(output[0].shape, self.single_output_shape)
def test_multi_input(self):
autocontrast = AutoContrast(inputs='x', outputs='x')
output = autocontrast.forward(data=self.multi_input, state={})
with self.subTest('Check output type'):
self.assertEqual(type(output), list)
with self.subTest('Check output list length'):
self.assertEqual(len(output), 2)
for img_output in output:
with self.subTest('Check output image shape'):
self.assertEqual(img_output.shape, self.multi_output_shape)
| true | true |
f70fb44b8b726f2718970f214122633936106d39 | 3,774 | py | Python | pythonsdk/face/face_pb2_grpc.py | jjrobotcn/andy4 | 4a0cb57aa5f318a3099fbfe6198620555b3a45af | [
"MIT"
] | null | null | null | pythonsdk/face/face_pb2_grpc.py | jjrobotcn/andy4 | 4a0cb57aa5f318a3099fbfe6198620555b3a45af | [
"MIT"
] | null | null | null | pythonsdk/face/face_pb2_grpc.py | jjrobotcn/andy4 | 4a0cb57aa5f318a3099fbfe6198620555b3a45af | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import face_pb2 as face__pb2
class FaceServiceStub(object):
"""faceRecognition.FaceService 人脸服务
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Compare = channel.unary_unary(
'/faceRecognition.FaceService/Compare',
request_serializer=face__pb2.CompareRequest.SerializeToString,
response_deserializer=face__pb2.CompareResponse.FromString,
)
self.Search = channel.unary_unary(
'/faceRecognition.FaceService/Search',
request_serializer=face__pb2.SearchRequest.SerializeToString,
response_deserializer=face__pb2.SearchResponse.FromString,
)
class FaceServiceServicer(object):
"""faceRecognition.FaceService 人脸服务
"""
def Compare(self, request, context):
"""Compare 实现两张人脸图片对比识别,返回两张人脸图片对比的可信度
开发管理平台功能参考: http://10.10.10.2/face/compare
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Search(self, request, context):
"""Search 从FaceSet中搜索近似人脸数据
若存在匹配数据时返回一个FaceDetail及可信度
开发管理平台功能参考: http://10.10.10.2/face/compare
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FaceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Compare': grpc.unary_unary_rpc_method_handler(
servicer.Compare,
request_deserializer=face__pb2.CompareRequest.FromString,
response_serializer=face__pb2.CompareResponse.SerializeToString,
),
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=face__pb2.SearchRequest.FromString,
response_serializer=face__pb2.SearchResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'faceRecognition.FaceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class FaceService(object):
"""faceRecognition.FaceService 人脸服务
"""
@staticmethod
def Compare(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/faceRecognition.FaceService/Compare',
face__pb2.CompareRequest.SerializeToString,
face__pb2.CompareResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/faceRecognition.FaceService/Search',
face__pb2.SearchRequest.SerializeToString,
face__pb2.SearchResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 35.271028 | 101 | 0.645999 |
import grpc
from . import face_pb2 as face__pb2
class FaceServiceStub(object):
def __init__(self, channel):
self.Compare = channel.unary_unary(
'/faceRecognition.FaceService/Compare',
request_serializer=face__pb2.CompareRequest.SerializeToString,
response_deserializer=face__pb2.CompareResponse.FromString,
)
self.Search = channel.unary_unary(
'/faceRecognition.FaceService/Search',
request_serializer=face__pb2.SearchRequest.SerializeToString,
response_deserializer=face__pb2.SearchResponse.FromString,
)
class FaceServiceServicer(object):
def Compare(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Search(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FaceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Compare': grpc.unary_unary_rpc_method_handler(
servicer.Compare,
request_deserializer=face__pb2.CompareRequest.FromString,
response_serializer=face__pb2.CompareResponse.SerializeToString,
),
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=face__pb2.SearchRequest.FromString,
response_serializer=face__pb2.SearchResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'faceRecognition.FaceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FaceService(object):
@staticmethod
def Compare(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/faceRecognition.FaceService/Compare',
face__pb2.CompareRequest.SerializeToString,
face__pb2.CompareResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/faceRecognition.FaceService/Search',
face__pb2.SearchRequest.SerializeToString,
face__pb2.SearchResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| true | true |
f70fb47f91fd9a5618daafe068d611e8a5784530 | 1,876 | py | Python | setup.py | wbogen/cardio | b8826295b7e27168441e2595e9592aff77cf7722 | [
"Apache-2.0"
] | 250 | 2017-11-22T14:41:57.000Z | 2022-02-02T22:41:28.000Z | setup.py | supertime1/cardio | 58087b21295ebe18fb5a5dfbb68479b39ddb4971 | [
"Apache-2.0"
] | 34 | 2017-11-23T18:27:20.000Z | 2020-09-10T11:55:16.000Z | setup.py | supertime1/cardio | 58087b21295ebe18fb5a5dfbb68479b39ddb4971 | [
"Apache-2.0"
] | 85 | 2017-11-23T13:07:31.000Z | 2021-11-24T08:34:07.000Z | """
CardIO is a library that works with electrocardiograms.
Documentation - https://analysiscenter.github.io/cardio/
"""
from setuptools import setup, find_packages
import re
with open('cardio/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
with open('docs/index.rst', 'r') as f:
long_description = f.read()
setup(
name='cardio',
packages=find_packages(exclude=['tutorials', 'examples', 'docs']),
version=version,
url='https://github.com/analysiscenter/cardio',
license='Apache License 2.0',
author='Data Analysis Center team',
author_email='cardio@analysiscenter.ru',
description='A framework for deep research of electrocardiograms',
long_description=long_description,
zip_safe=False,
platforms='any',
install_requires=[
'numpy>=1.13.1',
'scipy>=0.19.1',
'pandas>=0.21.1',
'scikit-learn==0.19.1',
'numba>=0.35.0',
'pywavelets>=0.5.2',
'matplotlib>=2.1.0',
'dill>=0.2.7.1',
'pydicom>=0.9.9',
'pyedflib>=0.1.11',
'wfdb==2.2.1',
'pint>=0.8.1',
],
extras_require={
'tensorflow': ['tensorflow>=1.4'],
'tensorflow-gpu': ['tensorflow-gpu>=1.4'],
'keras': ['keras>=2.0.0'],
'hmmlearn': ['hmmlearn==0.2.0']
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'
],
)
| 30.258065 | 99 | 0.584755 |
from setuptools import setup, find_packages
import re
with open('cardio/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
with open('docs/index.rst', 'r') as f:
long_description = f.read()
setup(
name='cardio',
packages=find_packages(exclude=['tutorials', 'examples', 'docs']),
version=version,
url='https://github.com/analysiscenter/cardio',
license='Apache License 2.0',
author='Data Analysis Center team',
author_email='cardio@analysiscenter.ru',
description='A framework for deep research of electrocardiograms',
long_description=long_description,
zip_safe=False,
platforms='any',
install_requires=[
'numpy>=1.13.1',
'scipy>=0.19.1',
'pandas>=0.21.1',
'scikit-learn==0.19.1',
'numba>=0.35.0',
'pywavelets>=0.5.2',
'matplotlib>=2.1.0',
'dill>=0.2.7.1',
'pydicom>=0.9.9',
'pyedflib>=0.1.11',
'wfdb==2.2.1',
'pint>=0.8.1',
],
extras_require={
'tensorflow': ['tensorflow>=1.4'],
'tensorflow-gpu': ['tensorflow-gpu>=1.4'],
'keras': ['keras>=2.0.0'],
'hmmlearn': ['hmmlearn==0.2.0']
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'
],
)
| true | true |
f70fb4b707686268baecdf75a4c2e0bd818d3206 | 15,820 | py | Python | Password-Locker/run.py | HASSAN1A/Password-Locker | bea88438936fa9ffa174d6f9c7941046d713092b | [
"MIT"
] | 2 | 2021-05-19T12:58:21.000Z | 2021-05-28T14:03:02.000Z | Password-Locker/run.py | HASSAN1A/Password-Locker | bea88438936fa9ffa174d6f9c7941046d713092b | [
"MIT"
] | null | null | null | Password-Locker/run.py | HASSAN1A/Password-Locker | bea88438936fa9ffa174d6f9c7941046d713092b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.8
from account import Account
from credential import Credential
from termcolor import colored, cprint
import os
import time
import pickle
# Functions that implement the behaviours in account class.
def create_account(username, fname, lname, p_word):
'''
Function to create new account
'''
new_account = Account(username, fname, lname, p_word)
return new_account
def save_account(account):
'''
Function to save account
'''
account.save_account()
def delete_account(account):
'''
Function to delete an account
'''
account.delete_account()
def check_account_exists(username):
'''
Function that check if an account with that username already exists and return a Boolean
'''
return Account.account_exists(username)
def auth_user(username, password):
'''
Function to authenicate user during login
'''
return Account.auth_user(username, password)
# Functions that implement the behaviours in credential class.
def create_credential(page, username, password):
'''
Function to create credentials
'''
new_credential = Credential(page, username, password)
return new_credential
def save_credential(credential):
'''
Function to save credential
'''
credential.save_credential()
def delete_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
def find_cred_by_pagename(pagename):
"""
Function that finds a credential by pagename and returns the credentials
"""
return Credential.find_by_pagename(pagename)
def copy_cred_pass(pagename):
'''
Function to copy credential password
'''
return Credential.copy_cred_password(pagename)
def check_credential_exists(pagename):
'''
Function that check if a credential exists with that pagename and return a Boolean
'''
return Credential.credential_exists(pagename)
def display_credentials():
'''
Function that returns all the saved credentials
'''
return Credential.display_credentials()
def generate_password(length):
'''
Function that generte a random password
'''
return Credential.generate_password(length)
def main():
login = False # Set initial login value to false
sign_name = '' # Name of user currently logged in
logged = True
def load_pickles():
try:
file_object = open('accounts.pydata', 'rb')
Account.accounts_list = pickle.load(file_object)
file_object.close()
print("\nLOADED PICKLES ACCOUNTS")
except:
print("\nCLDN'T LOAD PICKLES ACCOUNTS")
Account.accounts_list = []
try:
file_objectt = open('credentials.pydata', 'rb')
Credential.credentials_list = pickle.load(file_objectt)
file_object.close()
print("\nLOADED PICKLES CREDENTIALS")
except:
print("\nCLDN'T LOAD PICKLES CREDENTIALS")
Credential.credentials_list = []
def pickle_save():
try:
file_object = open('accounts.pydata', 'wb')
pickle.dump(Account.accounts_list, file_object)
file_object.close()
print("\nSAVED ACCOUNTS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T ACCOUNTS SAVE TO PICKLES.")
try:
file_objectt = open('credentials.pydata', 'wb')
pickle.dump(display_credentials(), file_objectt)
file_objectt.close()
print("\nSAVED CREDENTIALS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T CREDENTIALS SAVE TO PICKLES.")
def display_title():
os.system('clear')
'''
Function to display app title bar
'''
cprint("""
\n\t\t\t\t**********************************************
\t\t**************************************************************************
\t*******************************************************************************************
\n
\t\t\t\t
\t\t\t\t
\t\t\t\t |\ /|
\t\t\t\t | \ / |
\t\t\t\t | \/ |
\n\t\t\t\t*** WELCOME TO PASSWORD LOCKER ***
\n`\t\t\t******************************************************************
""", "magenta")
while logged:
display_title()
load_pickles()
while login == False:
cprint("""
Use the following short codes to manage your password locker account
'ln' - Login
'xx' - Close app
""", "blue")
s_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if s_code == 'ln':
acc_code = input(
colored('\tDo you have an account? Y/N >> ', 'cyan')).upper()
if acc_code == 'Y':
cprint(
'\tEnter your username and password to login >>>\n', 'pink')
login_user_name = input(
colored('\tEnter username >> ', 'cyan'))
login_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in...")
time.sleep(1.5)
if auth_user(login_user_name, login_password):
cprint('\n\t\tLOGIN SUCCESSFUL',
'green', attrs=['bold'])
sign_name = login_user_name
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY',
'red', attrs=['bold'])
elif acc_code == 'N':
cprint(
'\tEnter your username,firstname,lastname and password to register account >>>\n', 'blue')
reg_user_name = input(
colored('\tEnter username >> ', 'cyan'))
reg_f_name = input(
colored('\tEnter firstname >> ', 'cyan'))
reg_l_name = input(colored('\tEnter lastname >> ', 'cyan'))
reg_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tRegistering ...")
time.sleep(1.5)
if check_account_exists(reg_user_name):
cprint(
f"\n\t\tACCOUNT WITH, {reg_user_name.upper()} USERNAME ALREADY CREATED", "red", attrs=['bold'])
else:
new_acc = create_account(
reg_user_name, reg_f_name, reg_l_name, reg_password)
save_account(new_acc)
cprint(
"\n\t\tCONGRATULATIONS, YOUR ACCOUNT HAS BEEN CREATED", "green", attrs=['bold'])
cprint("\n\tSign into your new account", "blue")
sign_username = input(
colored('\n\tEnter username >> ', 'cyan'))
sign_password = input(
colored('\n\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in ...")
time.sleep(1.5)
if auth_user(sign_username, sign_password):
cprint("\n\t\tLOGIN SUCCESSFUL",
"green", attrs=['bold'])
sign_name = sign_username
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY USER',
'red', attrs=['bold'])
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
elif s_code == 'xx':
cprint(f"""\n\t\tTHANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
logged = False
break
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
while login == True:
time.sleep(1.5)
cprint(f"""
{sign_name.upper()}, WELCOME TO YOUR PASSWORD LOCKER:
Use the following commands to navigate the application:
'sc' >> Save existing page credentials
'cc' >> Create new page credentials
'dc' >> Display all credentials saved
'fc' >> Find credential saved by page name
'cp' >> Copy pagename credential password to clipboard
'dl' >> Delete page credential
'lgo' >> Log out
'ex' >> Close App
""", "blue")
app_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if app_code == 'sc':
cprint(
'\tEnter pagename,username and password to save credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tSaving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'cc':
cprint(
'\tEnter pagename,username and password to create and save new page credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
gen_pass_code = input(colored(
'\tWould you like to generate a random password? Y/N >> ', 'cyan')).upper()
pass_word = ''
if gen_pass_code == 'Y':
pass_len = int(input(colored(
'\tHow long would you like your password? Provide numbers only >> ', 'cyan')))
pass_word = generate_password(pass_len)
else:
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tCreating and Saving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'dc':
if len(display_credentials()) > 0:
cprint("\n\t\t"+sign_name.upper() +
", CREDENTIALS", "green", attrs=['bold'])
for credential in display_credentials():
cprint(f'''
-------------------------------------------------------
Page Name >>>> {credential.page_name.upper()}
Page Username >>>> {credential.user_name}
Page Password >>>> {credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint("\n\t\t"+sign_name.upper() +
",HAS NO CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'fc':
search_page = input(
colored('\n\tEnter page name to search credentials >> ', 'cyan')).lower()
print("\n\t\tLoading ...")
time.sleep(1.5)
if check_credential_exists(search_page):
found_credential = find_cred_by_pagename(search_page)
cprint(f'''
-------------------------------------------------------
Page Name >>>> {found_credential.page_name.upper()}
Page Username >>>> {found_credential.user_name}
Page Password >>>> {found_credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'cp':
search_page = input(colored(
'\n\tEnter page name to copy password to clipboard >> ', 'cyan')).lower()
print("\n\t\tSearching ...")
time.sleep(1.5)
if check_credential_exists(search_page):
copy_cred_pass(search_page)
cprint("\n\t\t"+search_page.upper() +
", PASSWORD COPIED TO CLIPBOARD", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'dl':
del_page = input(
colored('\n\tEnter page name you want to delete >> ', 'cyan')).lower()
print("\n\t\tDeleting ...")
time.sleep(1.5)
if check_credential_exists(del_page):
found_page = find_cred_by_pagename(del_page)
found_page.delete_credential()
cprint("\n\t\t"+del_page.upper() +
", CREDENTIALS DELETED", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{del_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'lgo':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tLogin out >>>>>
""", "green", attrs=['bold'])
time.sleep(1.5)
login = False
elif app_code == 'ex':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
login = False
logged = False
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
if __name__ == '__main__':
main()
| 38.77451 | 123 | 0.468078 |
from account import Account
from credential import Credential
from termcolor import colored, cprint
import os
import time
import pickle
def create_account(username, fname, lname, p_word):
new_account = Account(username, fname, lname, p_word)
return new_account
def save_account(account):
account.save_account()
def delete_account(account):
account.delete_account()
def check_account_exists(username):
return Account.account_exists(username)
def auth_user(username, password):
return Account.auth_user(username, password)
def create_credential(page, username, password):
new_credential = Credential(page, username, password)
return new_credential
def save_credential(credential):
credential.save_credential()
def delete_credential(credential):
credential.delete_credential()
def find_cred_by_pagename(pagename):
return Credential.find_by_pagename(pagename)
def copy_cred_pass(pagename):
return Credential.copy_cred_password(pagename)
def check_credential_exists(pagename):
return Credential.credential_exists(pagename)
def display_credentials():
return Credential.display_credentials()
def generate_password(length):
return Credential.generate_password(length)
def main():
login = False
sign_name = ''
logged = True
def load_pickles():
try:
file_object = open('accounts.pydata', 'rb')
Account.accounts_list = pickle.load(file_object)
file_object.close()
print("\nLOADED PICKLES ACCOUNTS")
except:
print("\nCLDN'T LOAD PICKLES ACCOUNTS")
Account.accounts_list = []
try:
file_objectt = open('credentials.pydata', 'rb')
Credential.credentials_list = pickle.load(file_objectt)
file_object.close()
print("\nLOADED PICKLES CREDENTIALS")
except:
print("\nCLDN'T LOAD PICKLES CREDENTIALS")
Credential.credentials_list = []
def pickle_save():
try:
file_object = open('accounts.pydata', 'wb')
pickle.dump(Account.accounts_list, file_object)
file_object.close()
print("\nSAVED ACCOUNTS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T ACCOUNTS SAVE TO PICKLES.")
try:
file_objectt = open('credentials.pydata', 'wb')
pickle.dump(display_credentials(), file_objectt)
file_objectt.close()
print("\nSAVED CREDENTIALS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T CREDENTIALS SAVE TO PICKLES.")
def display_title():
os.system('clear')
cprint("""
\n\t\t\t\t**********************************************
\t\t**************************************************************************
\t*******************************************************************************************
\n
\t\t\t\t
\t\t\t\t
\t\t\t\t |\ /|
\t\t\t\t | \ / |
\t\t\t\t | \/ |
\n\t\t\t\t*** WELCOME TO PASSWORD LOCKER ***
\n`\t\t\t******************************************************************
""", "magenta")
while logged:
display_title()
load_pickles()
while login == False:
cprint("""
Use the following short codes to manage your password locker account
'ln' - Login
'xx' - Close app
""", "blue")
s_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if s_code == 'ln':
acc_code = input(
colored('\tDo you have an account? Y/N >> ', 'cyan')).upper()
if acc_code == 'Y':
cprint(
'\tEnter your username and password to login >>>\n', 'pink')
login_user_name = input(
colored('\tEnter username >> ', 'cyan'))
login_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in...")
time.sleep(1.5)
if auth_user(login_user_name, login_password):
cprint('\n\t\tLOGIN SUCCESSFUL',
'green', attrs=['bold'])
sign_name = login_user_name
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY',
'red', attrs=['bold'])
elif acc_code == 'N':
cprint(
'\tEnter your username,firstname,lastname and password to register account >>>\n', 'blue')
reg_user_name = input(
colored('\tEnter username >> ', 'cyan'))
reg_f_name = input(
colored('\tEnter firstname >> ', 'cyan'))
reg_l_name = input(colored('\tEnter lastname >> ', 'cyan'))
reg_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tRegistering ...")
time.sleep(1.5)
if check_account_exists(reg_user_name):
cprint(
f"\n\t\tACCOUNT WITH, {reg_user_name.upper()} USERNAME ALREADY CREATED", "red", attrs=['bold'])
else:
new_acc = create_account(
reg_user_name, reg_f_name, reg_l_name, reg_password)
save_account(new_acc)
cprint(
"\n\t\tCONGRATULATIONS, YOUR ACCOUNT HAS BEEN CREATED", "green", attrs=['bold'])
cprint("\n\tSign into your new account", "blue")
sign_username = input(
colored('\n\tEnter username >> ', 'cyan'))
sign_password = input(
colored('\n\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in ...")
time.sleep(1.5)
if auth_user(sign_username, sign_password):
cprint("\n\t\tLOGIN SUCCESSFUL",
"green", attrs=['bold'])
sign_name = sign_username
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY USER',
'red', attrs=['bold'])
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
elif s_code == 'xx':
cprint(f"""\n\t\tTHANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
logged = False
break
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
while login == True:
time.sleep(1.5)
cprint(f"""
{sign_name.upper()}, WELCOME TO YOUR PASSWORD LOCKER:
Use the following commands to navigate the application:
'sc' >> Save existing page credentials
'cc' >> Create new page credentials
'dc' >> Display all credentials saved
'fc' >> Find credential saved by page name
'cp' >> Copy pagename credential password to clipboard
'dl' >> Delete page credential
'lgo' >> Log out
'ex' >> Close App
""", "blue")
app_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if app_code == 'sc':
cprint(
'\tEnter pagename,username and password to save credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tSaving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'cc':
cprint(
'\tEnter pagename,username and password to create and save new page credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
gen_pass_code = input(colored(
'\tWould you like to generate a random password? Y/N >> ', 'cyan')).upper()
pass_word = ''
if gen_pass_code == 'Y':
pass_len = int(input(colored(
'\tHow long would you like your password? Provide numbers only >> ', 'cyan')))
pass_word = generate_password(pass_len)
else:
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tCreating and Saving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'dc':
if len(display_credentials()) > 0:
cprint("\n\t\t"+sign_name.upper() +
", CREDENTIALS", "green", attrs=['bold'])
for credential in display_credentials():
cprint(f'''
-------------------------------------------------------
Page Name >>>> {credential.page_name.upper()}
Page Username >>>> {credential.user_name}
Page Password >>>> {credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint("\n\t\t"+sign_name.upper() +
",HAS NO CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'fc':
search_page = input(
colored('\n\tEnter page name to search credentials >> ', 'cyan')).lower()
print("\n\t\tLoading ...")
time.sleep(1.5)
if check_credential_exists(search_page):
found_credential = find_cred_by_pagename(search_page)
cprint(f'''
-------------------------------------------------------
Page Name >>>> {found_credential.page_name.upper()}
Page Username >>>> {found_credential.user_name}
Page Password >>>> {found_credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'cp':
search_page = input(colored(
'\n\tEnter page name to copy password to clipboard >> ', 'cyan')).lower()
print("\n\t\tSearching ...")
time.sleep(1.5)
if check_credential_exists(search_page):
copy_cred_pass(search_page)
cprint("\n\t\t"+search_page.upper() +
", PASSWORD COPIED TO CLIPBOARD", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'dl':
del_page = input(
colored('\n\tEnter page name you want to delete >> ', 'cyan')).lower()
print("\n\t\tDeleting ...")
time.sleep(1.5)
if check_credential_exists(del_page):
found_page = find_cred_by_pagename(del_page)
found_page.delete_credential()
cprint("\n\t\t"+del_page.upper() +
", CREDENTIALS DELETED", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{del_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'lgo':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tLogin out >>>>>
""", "green", attrs=['bold'])
time.sleep(1.5)
login = False
elif app_code == 'ex':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
login = False
logged = False
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
if __name__ == '__main__':
main()
| true | true |
f70fb5030be554ad24d7c738e890f3d047427e38 | 4,209 | py | Python | config/jobs/kubernetes/kops/build-pipeline.py | celestehorgan/test-infra | 3a4d5a94f214381ecca8146aef354bba29b0ac67 | [
"Apache-2.0"
] | null | null | null | config/jobs/kubernetes/kops/build-pipeline.py | celestehorgan/test-infra | 3a4d5a94f214381ecca8146aef354bba29b0ac67 | [
"Apache-2.0"
] | null | null | null | config/jobs/kubernetes/kops/build-pipeline.py | celestehorgan/test-infra | 3a4d5a94f214381ecca8146aef354bba29b0ac67 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
template = """
# Verify the latest-ci version from the {{branch}} branch of kops
# Runs a small subset of the e2e tests.
# Publishes the version to latest-ci-updown-green on success.
- interval: 60m
name: {{name}}
decorate: true
decoration_config:
timeout: 45m
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
spec:
containers:
- image: {{e2e_image}}
command:
- runner.sh
- kubetest
args:
# Generic e2e test args
- --up
- --test
- --down
- --dump=$(ARTIFACTS)
- --timeout=45m
- --gcp-service-account=$(E2E_GOOGLE_APPLICATION_CREDENTIALS)
# kops-specific test args
- --deployment=kops
- --provider=aws
- --cluster={{name}}.test-cncf-aws.k8s.io
- --kops-ssh-user={{ssh_user}}
- --kops-nodes=4
- --extract={{extract}}
- --kops-state=s3://k8s-kops-prow/
- --kops-ssh-key=$(AWS_SSH_PRIVATE_KEY_FILE)
- --kops-ssh-public-key=$(AWS_SSH_PUBLIC_KEY_FILE)
- --kops-publish=gs://k8s-staging-kops/kops/releases/markers/{{branch}}/latest-ci-updown-green.txt
- --kops-version=https://storage.googleapis.com/k8s-staging-kops/kops/releases/markers/{{branch}}/latest-ci.txt
#- --kops-kubernetes-version should be inferred by kubetest from --extract
#- --kops-zone should be randomized by kubetest
# Specific test args
- --test_args=--ginkgo.focus=\\[k8s.io\\]\\sNetworking.*\\[Conformance\\] --ginkgo.skip=\\[Slow\\]|\\[Serial\\]
- --ginkgo-parallel
annotations:
testgrid-dashboards: sig-cluster-lifecycle-kops, google-aws, kops-misc, kops-k8s-{{k8s_version}}
testgrid-tab-name: {{tab}}
"""
def build_tests(branch, k8s_version, ssh_user):
def expand(s):
subs = {}
if k8s_version:
subs['k8s_version'] = k8s_version
if branch:
subs['branch'] = branch
return s.format(**subs)
if branch == 'master':
extract = "release/latest-1.19"
e2e_image = "gcr.io/k8s-testimages/kubekins-e2e:v20200713-e9b3d9d-1.19"
else:
extract = expand("release/stable-{k8s_version}")
# Hack to stop the autobumper getting confused
e2e_image = "gcr.io/k8s-testimages/kubekins-e2e:v20200713-e9b3d9d-1.18"
e2e_image = e2e_image[:-4] + k8s_version
tab = expand('kops-pipeline-updown-{branch}')
# Names must be valid pod and DNS names
name = expand('e2e-kops-pipeline-updown-kops{branch}')
name = name.replace('.', '')
y = template
y = y.replace('{{extract}}', extract)
y = y.replace('{{e2e_image}}', e2e_image)
y = y.replace('{{k8s_version}}', k8s_version)
y = y.replace('{{name}}', name)
y = y.replace('{{ssh_user}}', ssh_user)
y = y.replace('{{tab}}', tab)
if branch == 'master':
y = y.replace('{{branch}}', "master")
else:
y = y.replace('{{branch}}', "release-" + branch)
spec = {
'branch': branch,
'k8s_version': k8s_version,
}
jsonspec = json.dumps(spec, sort_keys=True)
print("")
print("# " + jsonspec)
print(y.strip())
branches = [
"master",
"1.16",
"1.17",
"1.18",
]
def generate():
print("# Test scenarios generated by build-pipeline.py (do not manually edit)")
print("periodics:")
for branch in branches:
k8s_version = "1.19" if branch == "master" else branch
ssh_user = "admin" if branch in ("1.16", "1.17") else "ubuntu"
build_tests(branch=branch, k8s_version=k8s_version, ssh_user=ssh_user)
generate()
| 32.376923 | 117 | 0.628415 |
import json
template = """
# Verify the latest-ci version from the {{branch}} branch of kops
# Runs a small subset of the e2e tests.
# Publishes the version to latest-ci-updown-green on success.
- interval: 60m
name: {{name}}
decorate: true
decoration_config:
timeout: 45m
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
spec:
containers:
- image: {{e2e_image}}
command:
- runner.sh
- kubetest
args:
# Generic e2e test args
- --up
- --test
- --down
- --dump=$(ARTIFACTS)
- --timeout=45m
- --gcp-service-account=$(E2E_GOOGLE_APPLICATION_CREDENTIALS)
# kops-specific test args
- --deployment=kops
- --provider=aws
- --cluster={{name}}.test-cncf-aws.k8s.io
- --kops-ssh-user={{ssh_user}}
- --kops-nodes=4
- --extract={{extract}}
- --kops-state=s3://k8s-kops-prow/
- --kops-ssh-key=$(AWS_SSH_PRIVATE_KEY_FILE)
- --kops-ssh-public-key=$(AWS_SSH_PUBLIC_KEY_FILE)
- --kops-publish=gs://k8s-staging-kops/kops/releases/markers/{{branch}}/latest-ci-updown-green.txt
- --kops-version=https://storage.googleapis.com/k8s-staging-kops/kops/releases/markers/{{branch}}/latest-ci.txt
#- --kops-kubernetes-version should be inferred by kubetest from --extract
#- --kops-zone should be randomized by kubetest
# Specific test args
- --test_args=--ginkgo.focus=\\[k8s.io\\]\\sNetworking.*\\[Conformance\\] --ginkgo.skip=\\[Slow\\]|\\[Serial\\]
- --ginkgo-parallel
annotations:
testgrid-dashboards: sig-cluster-lifecycle-kops, google-aws, kops-misc, kops-k8s-{{k8s_version}}
testgrid-tab-name: {{tab}}
"""
def build_tests(branch, k8s_version, ssh_user):
def expand(s):
subs = {}
if k8s_version:
subs['k8s_version'] = k8s_version
if branch:
subs['branch'] = branch
return s.format(**subs)
if branch == 'master':
extract = "release/latest-1.19"
e2e_image = "gcr.io/k8s-testimages/kubekins-e2e:v20200713-e9b3d9d-1.19"
else:
extract = expand("release/stable-{k8s_version}")
e2e_image = "gcr.io/k8s-testimages/kubekins-e2e:v20200713-e9b3d9d-1.18"
e2e_image = e2e_image[:-4] + k8s_version
tab = expand('kops-pipeline-updown-{branch}')
name = expand('e2e-kops-pipeline-updown-kops{branch}')
name = name.replace('.', '')
y = template
y = y.replace('{{extract}}', extract)
y = y.replace('{{e2e_image}}', e2e_image)
y = y.replace('{{k8s_version}}', k8s_version)
y = y.replace('{{name}}', name)
y = y.replace('{{ssh_user}}', ssh_user)
y = y.replace('{{tab}}', tab)
if branch == 'master':
y = y.replace('{{branch}}', "master")
else:
y = y.replace('{{branch}}', "release-" + branch)
spec = {
'branch': branch,
'k8s_version': k8s_version,
}
jsonspec = json.dumps(spec, sort_keys=True)
print("")
print("# " + jsonspec)
print(y.strip())
branches = [
"master",
"1.16",
"1.17",
"1.18",
]
def generate():
print("# Test scenarios generated by build-pipeline.py (do not manually edit)")
print("periodics:")
for branch in branches:
k8s_version = "1.19" if branch == "master" else branch
ssh_user = "admin" if branch in ("1.16", "1.17") else "ubuntu"
build_tests(branch=branch, k8s_version=k8s_version, ssh_user=ssh_user)
generate()
| true | true |
f70fb6a161b5ea492c9c47948d6ae0810e774828 | 828 | py | Python | src/utils/api/permissions.py | jsmesami/naovoce | 235c6e05ef37be23d3b9bd0b76d80080c58617a0 | [
"BSD-3-Clause"
] | 18 | 2016-02-23T15:34:58.000Z | 2022-02-28T08:15:30.000Z | src/utils/api/permissions.py | jsmesami/naovoce | 235c6e05ef37be23d3b9bd0b76d80080c58617a0 | [
"BSD-3-Clause"
] | 66 | 2016-03-15T19:59:09.000Z | 2022-03-11T23:25:41.000Z | src/utils/api/permissions.py | jsmesami/naovoce | 235c6e05ef37be23d3b9bd0b76d80080c58617a0 | [
"BSD-3-Clause"
] | 7 | 2016-03-24T09:13:07.000Z | 2018-09-16T17:04:50.000Z | from rest_framework import permissions
class IsAuthenticated(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.user
and request.user.is_authenticated
and request.user.is_email_verified
)
class IsAuthenticatedOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS
or request.user
and request.user.is_authenticated
and request.user.is_email_verified
)
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.is_owner(request.user)
| 27.6 | 60 | 0.678744 | from rest_framework import permissions
class IsAuthenticated(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.user
and request.user.is_authenticated
and request.user.is_email_verified
)
class IsAuthenticatedOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS
or request.user
and request.user.is_authenticated
and request.user.is_email_verified
)
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.is_owner(request.user)
| true | true |
f70fb71ce3042a4cc28deea40be606cfe703d92d | 11,029 | py | Python | ncappzoo/tensorflow/topcoder_andresduque/supporting/inferences.py | yockgen/movidius | cc32f1951a4d00d2250bb0d2b9000c5f2435b41a | [
"MIT"
] | 1 | 2018-11-23T01:48:59.000Z | 2018-11-23T01:48:59.000Z | ncappzoo/tensorflow/topcoder_andresduque/supporting/inferences.py | yockgen/movidius | cc32f1951a4d00d2250bb0d2b9000c5f2435b41a | [
"MIT"
] | null | null | null | ncappzoo/tensorflow/topcoder_andresduque/supporting/inferences.py | yockgen/movidius | cc32f1951a4d00d2250bb0d2b9000c5f2435b41a | [
"MIT"
] | 1 | 2020-10-01T15:38:04.000Z | 2020-10-01T15:38:04.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#~ The MIT License (MIT)
#~ Copyright 2018 ©klo86min
#~ Permission is hereby granted, free of charge, to any person obtaining a copy
#~ of this software and associated documentation files (the "Software"), to deal
#~ in the Software without restriction, including without limitation the rights
#~ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#~ copies of the Software, and to permit persons to whom the Software is
#~ furnished to do so, subject to the following conditions:
#~ The above copyright notice and this permission notice shall be included in
#~ all copies or substantial portions of the Software.
#~ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#~ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#~ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#~ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#~ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#~ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#~ SOFTWARE.
import argparse
import csv
import cv2
import mvnc.mvncapi as mvnc
import numpy as np
import os.path
# image settings
IMAGE_DIM = 299
###############################################################################
#
# Modified code from https://github.com/ashwinvijayakumar/ncappzoo/apps/
# rapid-image-classifier/rapid-image-classifier.py
# also under the MIT License
#
###############################################################################
# ---- Step 1: Open the enumerated device and get a handle to it -------------
def open_ncs_device(verbose=False):
if verbose:
mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
# Look for enumerated NCS device(s); quit program if none found.
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( 'No devices found' )
quit()
# Get a handle to the first enumerated device and open it
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
# ---- Step 2: Load a graph file onto the NCS device -------------------------
def load_graph( device, graph_file):
# Read the graph file into a buffer
with open( graph_file, mode='rb' ) as f:
blob = f.read()
# Load the graph buffer into the NCS
graph = device.AllocateGraph( blob )
return graph
# ---- Step 5: Unload the graph and close the device -------------------------
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
##################### End of ncappzoo code ################################
class MovidiusImage(object):
"""Image metadata and loader for Movidius NCS
Args:
name (str): image reference name as used in CSV files
path (str): image path
class_index (int): 1-based class label index
Attributes:
top_k (list): list of predicted (class_index, proba)
inference_time (float): computation time in ms
"""
def __init__(self, name, path, class_index = None):
self.name = name
self.path = path
self.class_index = class_index
self.top_k = None
self.inference_time = None
def load_BGR(self, dim, dtype=np.float16):
"""Return image data in BGR order
Args:
dim (tuple): image dimensions
dtype (numpy.dtype): new type for the BGR blob
Returns:
numpy.ndarray: the transformed BGR blob
"""
mean = 128
std = 1/128
img = cv2.imread(self.path).astype(np.float32)
dx,dy,dz= img.shape
delta=float(abs(dy-dx))
if dx > dy: #crop the x dimension
img=img[int(0.5*delta):dx-int(0.5*delta),0:dy]
else:
img=img[0:dx,int(0.5*delta):dy-int(0.5*delta)]
img = cv2.resize(img, (dim, dim))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
for i in range(3):
img[:,:,i] = (img[:,:,i] - mean) * std
img = img.astype(dtype)
return img
def save_top_k(self, predictions, labels, k=5):
"""Save the top_k predicted probabilities
Args:
predictions (numpy.ndarray): the probabilities for each class
k (int): Number of top_k probas
"""
order_k = predictions.argsort()[::-1][:k]
# class_index is 1-based
self.top_k = [(labels[pos], np.float(predictions[pos]))
for pos in order_k]
def result_string(self):
""" Return image results with the following fields:
[name, top1, proba1, ... top5, proba5, time]
Returns:
str: formatted CSV string
"""
res = [ self.name, ]
for k, prob in self.top_k:
res += [k, prob]
res += [self.inference_time]
pattern = "%s," + "%d,%.9f," * len(self.top_k) + "%.9f"
return pattern % tuple(res)
def init_images(data_dir, images_file):
"""Parse image_file CSV and create one MovidiusImage per row.
Args:
data_dir (str): path of the folder containing images
image_file (str): CSV file (one image path per row)
Returns:
list: list of MovidiusImage instances
"""
images_dir = {}
images = []
for file in sorted(os.listdir(data_dir)):
if file.endswith(".jpg"):
image = MovidiusImage(file, os.path.realpath(data_dir) + "/" + "/" + file, -1)
images_dir[file] = image
images.append(image)
if os.path.isfile(images_file):
images = []
with open(images_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
# skip header
next(reader)
for row_pos, row in enumerate(reader):
name = row[0]
truth = int(row[1])
img = images_dir[name]
img.class_index = truth
images.append(img)
return images
def write_inferences_csv(output_path, images):
""" For each image, retrieve and write results.
Args:
output_path (str): path for the CSV output
images (list): list of processed MovidiusImage instances
"""
with open(output_path, 'w') as output_file:
for image in images:
output_file.write(image.result_string() + '\n')
def score_inferences(images, min_proba = 1e-15, mult = 100, n_classes=200,
log_loss_max=15.0, time_limit=1000.0):
""" Compute the logLoss and reference computation time
Args:
images (list): list of processed MovidiusImage instances
min_proba (float): minimum probability to be used in logLoss
mult (int): number of images used for the reference time
n_classes (int): total number of classes
log_loss_limit (float): minimum log_loss requirement
time_limit (float): maximum time per image (in ms)
Returns:
tuple: LogLoss and reference_time float values
"""
min_proba = np.float(min_proba)
max_proba = 1.0 - min_proba
n_images = len(images)
probas = np.zeros(n_images, dtype=np.float)
image_time = 0.0
top_1_accuracy = 0.0
top_k_accuracy = 0.0
for i, image in enumerate(images):
class_probas = dict(image.top_k)
if image.class_index == image.top_k[0][0]:
top_1_accuracy += 1.0
if image.class_index in class_probas:
top_k_accuracy += 1.0
probas[i] = class_probas[image.class_index]
if probas[i] > 0:
sum_probas = sum(class_probas.values())
probas[i] /= sum_probas
probas[i] = max(min_proba, min(max_proba, probas[i]))
image_time += image.inference_time
log_loss = np.mean(-np.log(probas))
top_1_accuracy /= n_images
top_k_accuracy /= n_images
image_time /= n_images
t = mult * image_time
print("top_1_accuracy = %.9f" % top_1_accuracy)
print("top_k_accuracy = %.9f" % top_k_accuracy )
print("log_loss = %.9f" % log_loss)
print("image_time = %.9f" % image_time)
if image_time > time_limit or log_loss > log_loss_max:
score = 0.0
else:
t_max = mult * time_limit
score = 1e6 * (1.0 - log_loss * np.log(t) / (log_loss_max * np.log(t_max)))
print("score = %.2f" % score)
return score
def main(args):
parser = argparse.ArgumentParser(description='TopCoder Movidius MM')
parser.add_argument(
"-images-dir",
dest="images_dir",
help="""Folder containing images to classify"""
)
parser.add_argument(
"-output-file",
dest="output_file",
default="",
help="""Output CSV file to save inference results"""
)
parser.add_argument(
"-graph-file",
dest="graph_file",
default="",
help="""Movidius graph file path"""
)
parser.add_argument(
"-labels-map-file",
dest="labels_map_file",
default="",
help="""Labels map file"""
)
parser.add_argument(
"-images-file",
dest="images_file",
default="",
help="""CSV file containing list of images filenames to classify in images-dir folder, only filenames listed here will be processed"""
)
args = parser.parse_args()
if not os.path.isdir(args.images_dir):
print("data is not a directory: %s" % args.images_dir)
print("Please use the right path as argument, and/or change the Makefile MOVIDIUSDIR variable")
return 0
print("IMAGE_DIM", IMAGE_DIM)
# start NCS
device = open_ncs_device()
graph = load_graph(device, args.graph_file)
# prepare images
images = init_images(args.images_dir, args.images_file)
n_images = len(images)
info_frequency = 100
print("n_images = %d" % n_images)
# load labels map file
labelsLines = [line.rstrip('\n') for line in open(args.labels_map_file)]
labels = {}
for label in labelsLines:
split = label.split(":")
labels[int(split[0])] = int(split[1])
# process images
for i, image in enumerate(images):
if (i+1) % info_frequency == 0:
print("progess %d/%d ..." % (i+1, n_images), flush=True)
bgr_blob = image.load_BGR(IMAGE_DIM)
graph.LoadTensor(bgr_blob, 'user object')
output, userobj = graph.GetResult()
#print(output)
image.inference_time = np.sum(
graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN ) )
image.save_top_k(output, labels, 5)
# stop NCS
close_ncs_device(device, graph)
# process results
write_inferences_csv(args.output_file, images)
if os.path.isfile(args.images_file):
score_inferences(images)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| 33.625 | 140 | 0.602684 |
import argparse
import csv
import cv2
import mvnc.mvncapi as mvnc
import numpy as np
import os.path
IMAGE_DIM = 299
| true | true |
f70fb7c77d935e9a4bdb46140f8bc3e6d53a17ee | 2,501 | py | Python | radio_bridge/tests/unit/test_dtmf_decoder.py | Kami/raspberry-pi-ham-radio | 7ff9180e3a4d645b92e07ce92cbcbf73c7a0a628 | [
"Apache-2.0"
] | 2 | 2020-10-26T06:16:52.000Z | 2021-11-15T11:05:29.000Z | radio_bridge/tests/unit/test_dtmf_decoder.py | Kami/raspberry-pi-ham-radio | 7ff9180e3a4d645b92e07ce92cbcbf73c7a0a628 | [
"Apache-2.0"
] | null | null | null | radio_bridge/tests/unit/test_dtmf_decoder.py | Kami/raspberry-pi-ham-radio | 7ff9180e3a4d645b92e07ce92cbcbf73c7a0a628 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Tomaz Muraus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from radio_bridge.dtmf import FFTDTMFDecoderImplementation
__all__ = ["TestFFTDTMFDecoder"]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.abspath(os.path.join(BASE_DIR, "../fixtures/dtmf"))
class TestFFTDTMFDecoder(unittest.TestCase):
def test_decode_anytone_578_dtmf_data(self):
values = [
("1.wav", "1"),
("2.wav", "2"),
("3.wav", "3"),
("4.wav", "4"),
("5.wav", "5"),
("6.wav", "6"),
("7.wav", "7"),
("8.wav", "8"),
("9.wav", "9"),
("*.wav", "*"),
("0.wav", "0"),
("#.wav", "#"),
]
for file_path, expected_code in values:
file_path = os.path.join(FIXTURES_DIR, "anytone_578/", file_path)
decoder = FFTDTMFDecoderImplementation(file_path=file_path)
self.assertEqual(decoder.decode(), expected_code)
def test_decode_audio_check_tone_generator_data(self):
values = [
("audiocheck.net_dtmf_1.wav", "1"),
("audiocheck.net_dtmf_2.wav", "2"),
("audiocheck.net_dtmf_3.wav", "3"),
("audiocheck.net_dtmf_4.wav", "4"),
("audiocheck.net_dtmf_5.wav", "5"),
("audiocheck.net_dtmf_6.wav", "6"),
("audiocheck.net_dtmf_7.wav", "7"),
("audiocheck.net_dtmf_8.wav", "8"),
("audiocheck.net_dtmf_9.wav", "9"),
("audiocheck.net_dtmf_*.wav", "*"),
("audiocheck.net_dtmf_0.wav", "0"),
("audiocheck.net_dtmf_#.wav", "#"),
]
for file_path, expected_code in values:
file_path = os.path.join(FIXTURES_DIR, "audiochecknet/", file_path)
decoder = FFTDTMFDecoderImplementation(file_path=file_path)
self.assertEqual(decoder.decode(), expected_code)
| 36.246377 | 79 | 0.594562 |
import os
import unittest
from radio_bridge.dtmf import FFTDTMFDecoderImplementation
__all__ = ["TestFFTDTMFDecoder"]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.abspath(os.path.join(BASE_DIR, "../fixtures/dtmf"))
class TestFFTDTMFDecoder(unittest.TestCase):
def test_decode_anytone_578_dtmf_data(self):
values = [
("1.wav", "1"),
("2.wav", "2"),
("3.wav", "3"),
("4.wav", "4"),
("5.wav", "5"),
("6.wav", "6"),
("7.wav", "7"),
("8.wav", "8"),
("9.wav", "9"),
("*.wav", "*"),
("0.wav", "0"),
("#.wav", "#"),
]
for file_path, expected_code in values:
file_path = os.path.join(FIXTURES_DIR, "anytone_578/", file_path)
decoder = FFTDTMFDecoderImplementation(file_path=file_path)
self.assertEqual(decoder.decode(), expected_code)
def test_decode_audio_check_tone_generator_data(self):
values = [
("audiocheck.net_dtmf_1.wav", "1"),
("audiocheck.net_dtmf_2.wav", "2"),
("audiocheck.net_dtmf_3.wav", "3"),
("audiocheck.net_dtmf_4.wav", "4"),
("audiocheck.net_dtmf_5.wav", "5"),
("audiocheck.net_dtmf_6.wav", "6"),
("audiocheck.net_dtmf_7.wav", "7"),
("audiocheck.net_dtmf_8.wav", "8"),
("audiocheck.net_dtmf_9.wav", "9"),
("audiocheck.net_dtmf_*.wav", "*"),
("audiocheck.net_dtmf_0.wav", "0"),
("audiocheck.net_dtmf_#.wav", "#"),
]
for file_path, expected_code in values:
file_path = os.path.join(FIXTURES_DIR, "audiochecknet/", file_path)
decoder = FFTDTMFDecoderImplementation(file_path=file_path)
self.assertEqual(decoder.decode(), expected_code)
| true | true |
f70fb837d095c7afa802c8994d156aba293785e2 | 4,084 | py | Python | sharinator/settings.py | Technikradio/sharinator | 3aa72d01d3829520c9627320d044af14fda913b8 | [
"BSD-3-Clause"
] | null | null | null | sharinator/settings.py | Technikradio/sharinator | 3aa72d01d3829520c9627320d044af14fda913b8 | [
"BSD-3-Clause"
] | 9 | 2020-05-31T16:24:49.000Z | 2020-06-19T17:42:56.000Z | sharinator/settings.py | Technikradio/sharinator | 3aa72d01d3829520c9627320d044af14fda913b8 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for sharinator project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ofj2gu)@$2xahppvk%25217+y!-1d4#@1-*#)c6zssk%&s87ai'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'sharinator.administration',
'sharinator.dashboard',
'sharinator.equipment',
'sharinator.peers',
'sharinator.shares',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'sharinator.administration.middleware.ForceLogoutMiddleware',
]
ROOT_URLCONF = 'sharinator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sharinator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MESSAGE_TAGS = {
messages.DEBUG: 'alert alert-dark',
messages.INFO: 'alert alert-info',
messages.SUCCESS: 'alert alert-success',
messages.WARNING: 'alert alert-warning',
messages.ERROR: 'alert alert-danger',
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static.dist')
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "sharinator", "static"),
]
LOGIN_URL = "/admin/dbadmin/login"
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=sharinator',
'--logging-level=WARN'
]
| 26.012739 | 91 | 0.698335 |
import os
from django.contrib.messages import constants as messages
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'ofj2gu)@$2xahppvk%25217+y!-1d4#@1-*#)c6zssk%&s87ai'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'sharinator.administration',
'sharinator.dashboard',
'sharinator.equipment',
'sharinator.peers',
'sharinator.shares',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'sharinator.administration.middleware.ForceLogoutMiddleware',
]
ROOT_URLCONF = 'sharinator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sharinator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MESSAGE_TAGS = {
messages.DEBUG: 'alert alert-dark',
messages.INFO: 'alert alert-info',
messages.SUCCESS: 'alert alert-success',
messages.WARNING: 'alert alert-warning',
messages.ERROR: 'alert alert-danger',
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static.dist')
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "sharinator", "static"),
]
LOGIN_URL = "/admin/dbadmin/login"
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=sharinator',
'--logging-level=WARN'
]
| true | true |
f70fb89aca0288cdc68dc3bcc49697d7f2d22348 | 2,530 | py | Python | tests/py3/test_property_map.py | agarwalrounak/qmt | 6fb8ee55fb9d544b72f6dc0c275000914e03af06 | [
"MIT"
] | 1 | 2018-09-30T00:45:53.000Z | 2018-09-30T00:45:53.000Z | tests/py3/test_property_map.py | imagineagents/qmt | 5e8a7001cc020979636e492448abcfd894396038 | [
"MIT"
] | null | null | null | tests/py3/test_property_map.py | imagineagents/qmt | 5e8a7001cc020979636e492448abcfd894396038 | [
"MIT"
] | null | null | null | import numpy as np
from qmt.geometry import PropertyMap, MaterialPropertyMap
from qmt.materials import Materials
class DummyPartMap:
def __init__(self, part_ids):
assert len(part_ids) == 2
self.partIds = part_ids
def __call__(self, x):
assert np.ndim(x) >= 1
x = np.asanyarray(x)
if np.ndim(x) == 1:
return self.partIds[x[0] > 0]
else:
return np.where(x[..., 0] > 0, self.partIds[1], self.partIds[0])
def test_property_map():
int_map = DummyPartMap([0, 1])
str_map = DummyPartMap(['part1', 'part2'])
prop_map1 = PropertyMap(int_map, np.vectorize(lambda p: 'yes' if p > 0 else 'no'))
assert prop_map1.get_part((1., 2.)) == 1
assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)
assert prop_map1((1., 2.)) == 'yes'
assert np.all(prop_map1(-np.ones((2, 3))) == 'no')
props = {'part1': 'yes', 'part2': 'no'}
prop_map2 = PropertyMap(str_map, np.vectorize(lambda p: props[p]))
assert prop_map2.get_part((1., 2.)) == 'part2'
assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')
assert prop_map1((1., 2.)) == 'yes'
assert np.all(prop_map1(-np.ones((2, 3))) == 'no')
def test_materials_property_map():
int_map = DummyPartMap([0, 1])
str_map = DummyPartMap(['part1', 'part2'])
part_materials1 = {0: 'InAs', 1: 'GaSb'}
part_materials2 = {'part1': 'InAs', 'part2': 'Al'}
mat_lib = Materials(matDict={})
mat_lib.add_material('InAs', 'semi', electronMass=0.026, directBandGap=417.,
valenceBandOffset=-590.)
mat_lib.add_material('GaSb', 'semi', electronMass=.039, directBandGap=812.,
valenceBandOffset=-30.)
mat_lib.add_material('Al', 'metal', workFunction=4280.)
prop_map1 = MaterialPropertyMap(int_map, part_materials1, mat_lib, 'electronMass')
assert prop_map1.get_part((1., 2.)) == 1
assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)
assert prop_map1((1., 2.)) == mat_lib['GaSb']['electronMass']
assert np.all(prop_map1(-np.ones((2, 3))) == mat_lib['InAs']['electronMass'])
prop_map2 = MaterialPropertyMap(str_map, part_materials2, mat_lib, 'directBandGap', eunit='eV',
fill_value=0.)
assert prop_map2.get_part((1., 2.)) == 'part2'
assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')
assert prop_map2((1., 2.)) == 0.
assert np.all(prop_map2(-np.ones((2, 3))) == mat_lib.find('InAs', 'eV')['directBandGap'])
| 40.15873 | 99 | 0.608696 | import numpy as np
from qmt.geometry import PropertyMap, MaterialPropertyMap
from qmt.materials import Materials
class DummyPartMap:
def __init__(self, part_ids):
assert len(part_ids) == 2
self.partIds = part_ids
def __call__(self, x):
assert np.ndim(x) >= 1
x = np.asanyarray(x)
if np.ndim(x) == 1:
return self.partIds[x[0] > 0]
else:
return np.where(x[..., 0] > 0, self.partIds[1], self.partIds[0])
def test_property_map():
int_map = DummyPartMap([0, 1])
str_map = DummyPartMap(['part1', 'part2'])
prop_map1 = PropertyMap(int_map, np.vectorize(lambda p: 'yes' if p > 0 else 'no'))
assert prop_map1.get_part((1., 2.)) == 1
assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)
assert prop_map1((1., 2.)) == 'yes'
assert np.all(prop_map1(-np.ones((2, 3))) == 'no')
props = {'part1': 'yes', 'part2': 'no'}
prop_map2 = PropertyMap(str_map, np.vectorize(lambda p: props[p]))
assert prop_map2.get_part((1., 2.)) == 'part2'
assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')
assert prop_map1((1., 2.)) == 'yes'
assert np.all(prop_map1(-np.ones((2, 3))) == 'no')
def test_materials_property_map():
int_map = DummyPartMap([0, 1])
str_map = DummyPartMap(['part1', 'part2'])
part_materials1 = {0: 'InAs', 1: 'GaSb'}
part_materials2 = {'part1': 'InAs', 'part2': 'Al'}
mat_lib = Materials(matDict={})
mat_lib.add_material('InAs', 'semi', electronMass=0.026, directBandGap=417.,
valenceBandOffset=-590.)
mat_lib.add_material('GaSb', 'semi', electronMass=.039, directBandGap=812.,
valenceBandOffset=-30.)
mat_lib.add_material('Al', 'metal', workFunction=4280.)
prop_map1 = MaterialPropertyMap(int_map, part_materials1, mat_lib, 'electronMass')
assert prop_map1.get_part((1., 2.)) == 1
assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)
assert prop_map1((1., 2.)) == mat_lib['GaSb']['electronMass']
assert np.all(prop_map1(-np.ones((2, 3))) == mat_lib['InAs']['electronMass'])
prop_map2 = MaterialPropertyMap(str_map, part_materials2, mat_lib, 'directBandGap', eunit='eV',
fill_value=0.)
assert prop_map2.get_part((1., 2.)) == 'part2'
assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')
assert prop_map2((1., 2.)) == 0.
assert np.all(prop_map2(-np.ones((2, 3))) == mat_lib.find('InAs', 'eV')['directBandGap'])
| true | true |
f70fb93da9d51c1f9838f67977dbbd4aef65562e | 4,576 | py | Python | tensorflow/python/kernel_tests/batch_scatter_ops_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/kernel_tests/batch_scatter_ops_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/kernel_tests/batch_scatter_ops_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
indx = i[:-1] + (indx,)
ref[indx] = updates[i]
_TF_OPS_TO_NUMPY = {
state_ops.batch_scatter_update: _NumpyUpdate,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
sparse_dim = len(indices_shape) - 1
indices = np.random.randint(
indices_shape[sparse_dim], size=indices_shape, dtype=itype)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
vtypes = [np.float32, np.float64]
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(
state_ops.batch_scatter_update, vtype, itype)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.batch_scatter_update(var, [1], [True])
update1 = state_ops.batch_scatter_update(
var, constant_op.constant(
[0], dtype=dtypes.int64), [False])
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
state_ops.batch_scatter_update(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = \[-1\] does not index into shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = \[6\] does not index into '
r'shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
| 35.472868 | 80 | 0.644231 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
indx = i[:-1] + (indx,)
ref[indx] = updates[i]
_TF_OPS_TO_NUMPY = {
state_ops.batch_scatter_update: _NumpyUpdate,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
sparse_dim = len(indices_shape) - 1
indices = np.random.randint(
indices_shape[sparse_dim], size=indices_shape, dtype=itype)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
new = old.copy()
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
ref = variables.Variable(old)
ref.initializer.run()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
vtypes = [np.float32, np.float64]
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(
state_ops.batch_scatter_update, vtype, itype)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.batch_scatter_update(var, [1], [True])
update1 = state_ops.batch_scatter_update(
var, constant_op.constant(
[0], dtype=dtypes.int64), [False])
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
indices = np.array([2, 0, 5])
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = \[-1\] does not index into shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = \[6\] does not index into '
r'shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
| true | true |
f70fb9ac8a380f47eb700eb0aa4f64af7b5fd5bd | 2,144 | py | Python | nova/scheduler/filters/retry_filter.py | teresa-ho/stx-nova | 1f82323439da2449edbbaed2fe1c8414a550c86f | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/retry_filter.py | teresa-ho/stx-nova | 1f82323439da2449edbbaed2fe1c8414a550c86f | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/retry_filter.py | teresa-ho/stx-nova | 1f82323439da2449edbbaed2fe1c8414a550c86f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
from oslo_log import log as logging
from nova.i18n import _LI
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
"""Filter out nodes that have already been attempted for scheduling
purposes
"""
# NOTE(danms): This does not affect _where_ an instance lands, so not
# related to rebuild.
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
"""Skip nodes that have already been attempted."""
retry = spec_obj.retry
if not retry:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled")
return True
# TODO(sbauza): Once the HostState is actually a ComputeNode, we could
# easily get this one...
host = [host_state.host, host_state.nodename]
# TODO(sbauza)... and we wouldn't need to primitive the hosts into
# lists
hosts = [[cn.host, cn.hypervisor_hostname] for cn in retry.hosts]
passes = host not in hosts
if not passes:
LOG.info(_LI("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s"), {'host': host, 'hosts': hosts})
msg = ('Previously tried: %(hosts)s' % {'hosts': hosts})
self.filter_reject(host_state, spec_obj, msg, append=True)
# Host passes if it's not in the list of previously attempted hosts:
return passes
| 35.147541 | 78 | 0.661381 |
from oslo_log import log as logging
from nova.i18n import _LI
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
retry = spec_obj.retry
if not retry:
LOG.debug("Re-scheduling is disabled")
return True
host = [host_state.host, host_state.nodename]
# lists
hosts = [[cn.host, cn.hypervisor_hostname] for cn in retry.hosts]
passes = host not in hosts
if not passes:
LOG.info(_LI("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s"), {'host': host, 'hosts': hosts})
msg = ('Previously tried: %(hosts)s' % {'hosts': hosts})
self.filter_reject(host_state, spec_obj, msg, append=True)
# Host passes if it's not in the list of previously attempted hosts:
return passes
| true | true |
f70fb9ca8e2cc531992c308148f77617cc1fff51 | 10,495 | py | Python | server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/identifier_completer_test.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | 10 | 2020-07-21T21:59:54.000Z | 2021-07-19T11:01:47.000Z | server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/identifier_completer_test.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | null | null | null | server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/identifier_completer_test.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | 1 | 2021-01-30T18:17:01.000Z | 2021-01-30T18:17:01.000Z | # Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import os
from hamcrest import assert_that, empty, equal_to, contains_exactly
from ycmd.user_options_store import DefaultOptions
from ycmd.completers.all import identifier_completer as ic
from ycmd.completers.all.identifier_completer import IdentifierCompleter
from ycmd.request_wrap import RequestWrap
from ycmd.tests import PathToTestFile
from ycmd.tests.test_utils import BuildRequest
def BuildRequestWrap( contents, column_num, line_num = 1 ):
return RequestWrap( BuildRequest( column_num = column_num,
line_num = line_num,
contents = contents ) )
def GetCursorIdentifier_StartOfLine_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
1 ) ) ) )
assert_that( 'fooBar', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'fooBar',
1 ) ) ) )
def GetCursorIdentifier_EndOfLine_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
3 ) ) ) )
def GetCursorIdentifier_PastEndOfLine_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
11 ) ) ) )
def GetCursorIdentifier_NegativeColumn_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
-10 ) ) ) )
def GetCursorIdentifier_StartOfLine_StopsAtNonIdentifierChar_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo(goo)',
1 ) ) ) )
def GetCursorIdentifier_AtNonIdentifier_test():
assert_that( 'goo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo(goo)',
4 ) ) ) )
def GetCursorIdentifier_WalksForwardForIdentifier_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( ' foo',
1 ) ) ) )
def GetCursorIdentifier_FindsNothingForward_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo ()***()',
5 ) ) ) )
def GetCursorIdentifier_SingleCharIdentifier_test():
assert_that( 'f', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( ' f ',
1 ) ) ) )
def GetCursorIdentifier_StartsInMiddleOfIdentifier_test():
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foobar',
4 ) ) ) )
def GetCursorIdentifier_LineEmpty_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '',
12 ) ) ) )
def GetCursorIdentifier_IgnoreIdentifierFromCommentsAndStrings_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '"foobar"',
4 ) ) ) )
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '/*\n' ' * foobar\n' ' */',
5,
2 ) ) ) )
def GetCursorIdentifier_CollectIdentifierFromCommentsAndStrings_test():
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( True,
BuildRequestWrap( '"foobar"',
4 ) ) ) )
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( True,
BuildRequestWrap( '/*\n' ' * foobar\n' ' */',
5,
2 ) ) ) )
def PreviousIdentifier_Simple_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo',
4 ) ) ) )
def PreviousIdentifier_WholeIdentShouldBeBeforeColumn_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foobar',
column_num = 4 ) ) ) )
def PreviousIdentifier_DoNotWrap_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foobar\n bar',
column_num = 4 ) ) ) )
def PreviousIdentifier_IgnoreForwardIdents_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo bar zoo',
4 ) ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 4,
False,
BuildRequestWrap( 'foo',
4 ) ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_DontContinueLooking_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 4,
False,
BuildRequestWrap( 'abcde foo',
10 ) ) ) )
def PreviousIdentifier_WhitespaceAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ',
6 ) ) ) )
def PreviousIdentifier_JunkAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ;;()** ',
13 ) ) ) )
def PreviousIdentifier_IdentInMiddleOfJunk_test():
assert_that( 'aa', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ;;(aa)** ',
13 ) ) ) )
def PreviousIdentifier_IdentOnPreviousLine_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo\n ',
column_num = 3,
line_num = 2 ) ) ) )
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo\n',
column_num = 1,
line_num = 2 ) ) ) )
def PreviousIdentifier_IdentOnPreviousLine_JunkAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo **;()\n ',
column_num = 3,
line_num = 2 ) ) ) )
def PreviousIdentifier_NoGoodIdentFound_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 5,
False,
BuildRequestWrap( 'foo\n ',
column_num = 2,
line_num = 2 ) ) ) )
def PreviousIdentifier_IgnoreIdentifierFromCommentsAndStrings_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( '"foo"\n',
column_num = 1,
line_num = 2 ) ) ) )
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( '/*\n' ' * foo\n' ' */',
column_num = 2,
line_num = 3 ) ) ) )
def PreviousIdentifier_CollectIdentifierFromCommentsAndStrings_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
True,
BuildRequestWrap( '"foo"\n',
column_num = 1,
line_num = 2 ) ) ) )
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
True,
BuildRequestWrap( '/*\n' ' * foo\n' ' */',
column_num = 2,
line_num = 3 ) ) ) )
def FilterUnchangedTagFiles_NoFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
assert_that( list( ident_completer._FilterUnchangedTagFiles( [] ) ),
empty() )
def FilterUnchangedTagFiles_SkipBadFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
assert_that( list( ident_completer._FilterUnchangedTagFiles(
[ '/some/tags' ] ) ),
empty() )
def FilterUnchangedTagFiles_KeepGoodFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
tag_file = PathToTestFile( 'basic.tags' )
assert_that( ident_completer._FilterUnchangedTagFiles( [ tag_file ] ),
contains_exactly( tag_file ) )
def FilterUnchangedTagFiles_SkipUnchangesFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
# simulate an already open tags file that didn't change in the meantime.
tag_file = PathToTestFile( 'basic.tags' )
ident_completer._tags_file_last_mtime[ tag_file ] = os.path.getmtime(
tag_file )
assert_that( list( ident_completer._FilterUnchangedTagFiles( [ tag_file ] ) ),
empty() )
| 34.409836 | 80 | 0.536541 |
import os
from hamcrest import assert_that, empty, equal_to, contains_exactly
from ycmd.user_options_store import DefaultOptions
from ycmd.completers.all import identifier_completer as ic
from ycmd.completers.all.identifier_completer import IdentifierCompleter
from ycmd.request_wrap import RequestWrap
from ycmd.tests import PathToTestFile
from ycmd.tests.test_utils import BuildRequest
def BuildRequestWrap( contents, column_num, line_num = 1 ):
return RequestWrap( BuildRequest( column_num = column_num,
line_num = line_num,
contents = contents ) )
def GetCursorIdentifier_StartOfLine_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
1 ) ) ) )
assert_that( 'fooBar', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'fooBar',
1 ) ) ) )
def GetCursorIdentifier_EndOfLine_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
3 ) ) ) )
def GetCursorIdentifier_PastEndOfLine_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
11 ) ) ) )
def GetCursorIdentifier_NegativeColumn_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
-10 ) ) ) )
def GetCursorIdentifier_StartOfLine_StopsAtNonIdentifierChar_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo(goo)',
1 ) ) ) )
def GetCursorIdentifier_AtNonIdentifier_test():
assert_that( 'goo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo(goo)',
4 ) ) ) )
def GetCursorIdentifier_WalksForwardForIdentifier_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( ' foo',
1 ) ) ) )
def GetCursorIdentifier_FindsNothingForward_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo ()***()',
5 ) ) ) )
def GetCursorIdentifier_SingleCharIdentifier_test():
assert_that( 'f', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( ' f ',
1 ) ) ) )
def GetCursorIdentifier_StartsInMiddleOfIdentifier_test():
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foobar',
4 ) ) ) )
def GetCursorIdentifier_LineEmpty_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '',
12 ) ) ) )
def GetCursorIdentifier_IgnoreIdentifierFromCommentsAndStrings_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '"foobar"',
4 ) ) ) )
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '/*\n' ' * foobar\n' ' */',
5,
2 ) ) ) )
def GetCursorIdentifier_CollectIdentifierFromCommentsAndStrings_test():
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( True,
BuildRequestWrap( '"foobar"',
4 ) ) ) )
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( True,
BuildRequestWrap( '/*\n' ' * foobar\n' ' */',
5,
2 ) ) ) )
def PreviousIdentifier_Simple_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo',
4 ) ) ) )
def PreviousIdentifier_WholeIdentShouldBeBeforeColumn_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foobar',
column_num = 4 ) ) ) )
def PreviousIdentifier_DoNotWrap_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foobar\n bar',
column_num = 4 ) ) ) )
def PreviousIdentifier_IgnoreForwardIdents_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo bar zoo',
4 ) ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 4,
False,
BuildRequestWrap( 'foo',
4 ) ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_DontContinueLooking_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 4,
False,
BuildRequestWrap( 'abcde foo',
10 ) ) ) )
def PreviousIdentifier_WhitespaceAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ',
6 ) ) ) )
def PreviousIdentifier_JunkAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ;;()** ',
13 ) ) ) )
def PreviousIdentifier_IdentInMiddleOfJunk_test():
assert_that( 'aa', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ;;(aa)** ',
13 ) ) ) )
def PreviousIdentifier_IdentOnPreviousLine_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo\n ',
column_num = 3,
line_num = 2 ) ) ) )
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo\n',
column_num = 1,
line_num = 2 ) ) ) )
def PreviousIdentifier_IdentOnPreviousLine_JunkAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo **;()\n ',
column_num = 3,
line_num = 2 ) ) ) )
def PreviousIdentifier_NoGoodIdentFound_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 5,
False,
BuildRequestWrap( 'foo\n ',
column_num = 2,
line_num = 2 ) ) ) )
def PreviousIdentifier_IgnoreIdentifierFromCommentsAndStrings_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( '"foo"\n',
column_num = 1,
line_num = 2 ) ) ) )
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( '/*\n' ' * foo\n' ' */',
column_num = 2,
line_num = 3 ) ) ) )
def PreviousIdentifier_CollectIdentifierFromCommentsAndStrings_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
True,
BuildRequestWrap( '"foo"\n',
column_num = 1,
line_num = 2 ) ) ) )
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
True,
BuildRequestWrap( '/*\n' ' * foo\n' ' */',
column_num = 2,
line_num = 3 ) ) ) )
def FilterUnchangedTagFiles_NoFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
assert_that( list( ident_completer._FilterUnchangedTagFiles( [] ) ),
empty() )
def FilterUnchangedTagFiles_SkipBadFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
assert_that( list( ident_completer._FilterUnchangedTagFiles(
[ '/some/tags' ] ) ),
empty() )
def FilterUnchangedTagFiles_KeepGoodFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
tag_file = PathToTestFile( 'basic.tags' )
assert_that( ident_completer._FilterUnchangedTagFiles( [ tag_file ] ),
contains_exactly( tag_file ) )
def FilterUnchangedTagFiles_SkipUnchangesFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
tag_file = PathToTestFile( 'basic.tags' )
ident_completer._tags_file_last_mtime[ tag_file ] = os.path.getmtime(
tag_file )
assert_that( list( ident_completer._FilterUnchangedTagFiles( [ tag_file ] ) ),
empty() )
| true | true |
f70fba585f35db352b62ef695402806501bee0cf | 5,147 | py | Python | models/eweights.py | RemiBe/Crack | 334df92c5598f2a3249cae022e570ab32068ba79 | [
"Apache-2.0"
] | 1 | 2021-09-16T01:13:51.000Z | 2021-09-16T01:13:51.000Z | models/eweights.py | RemiBe/crack | 334df92c5598f2a3249cae022e570ab32068ba79 | [
"Apache-2.0"
] | null | null | null | models/eweights.py | RemiBe/crack | 334df92c5598f2a3249cae022e570ab32068ba79 | [
"Apache-2.0"
] | null | null | null | """Edge weights.
"""
__author__ = "Rémi Barat"
__version__ = "1.0"
import math
import random
from crack.models.weights import condition_models, format_crit
#####################################################
### Format the models for init_EWeights functions ###
#####################################################
def _init_EWeights(init_fct):
"""Decorator that prepares the [models] to the [init_fct].
"""
def wrapper(models, records, crit=0, key_out="eweights", **kwargs):
condition_models(init_fct, models, records, crit, key_out, "eweights", **kwargs)
return wrapper
######################
### Initialization ###
######################
def init_EWeights_from_args(models, records, wgts, key_in=None, key_out="eweights"):
if isinstance(key_in, str):
key_in = [key_in]
nbr_n = len(wgts)
nbr_c = len(wgts[0])
models[key_out] = {
"entity" : "eweights",
"nbr_n" : nbr_n,
"nbr_c" : nbr_c,
"weights": wgts,
"totals" : [sum(w[c] for w in wgts) for c in range(nbr_c)],
"keys" : key_in,
}
@_init_EWeights
def init_EWeights_from_HWeights(models, records, key_out="eweights", key_graph="graph", key_hypergraph="hypergraph", key_hweights="hweights", f=None, f_args="sum_centers"):
# Arguments #
nbr_e = models[key_graph]["nbr_e"]
edges = models[key_graph]["edges"]
hwgts = models[key_hweights]["weights"]
hedges = models[key_hypergraph]["edges"]
if f is None:
def f(*hwgts): return sum(hwgt[0] for hwgt in hwgts)
#############
if f_args == "sum_centers":
wgts = [f(hwgts[i], hwgts[j]) for i, j in edges]
else:
crack_error(
ValueError, "init_EWeights_from_HWeights",
"Unknown 'f_args'. Possible values are: 'sum_centers'."
)
return wgts
@_init_EWeights
def init_EWeights_from_NWeights(
models, records,
key_out="eweights", key_in="graph",
key_nweights="nweights", nweights_crit=0,
f=None, f_args="all_ends",
):
"""Returns Weights based on the weights of the nodes for a
given criterion.
"""
nbr_e = models[key_in]["nbr_e"]
edges = models[key_in]["edges"]
nwgts = models[key_nweights]["weights"]
crit = format_crit(nweights_crit)
if f is None:
def f(*nwgts): return sum(nwgt[c] for c in crit for nwgt in nwgts)
if f_args == "all_ends":
wgts = [f(*[nwgts[i] for i in edges[e]]) for e in range(nbr_e)]
else:
crack_error(
ValueError, "init_EWeights_from_NWeights",
"Unknown 'f_args'. Possible values are: 'all_ends'."
)
return wgts
@_init_EWeights
def init_EWeights_random(models, records, key_in=None, nbr_e=None, inf=1, sup=100, **kwargs):
"""Generates (uniformly) random eweights.
"""
if nbr_e is None:
nbr_e = models[key_in]["nbr_e"]
return [random.randint(inf, sup) for e in range(nbr_e)]
@_init_EWeights
def init_EWeights_unit(models, records, key_in=None, nbr_e=None):
"""Give a unit weight to every element.
Options:
key_in: str: Key of the entity the weights will correspond to.
"""
if nbr_e is None:
nbr_e = models[key_in]["nbr_e"]
return [1] * nbr_e
@_init_EWeights
def init_EWeights_topologic_mountains(structs, inf=1, sup=100, npeaks=2):
"""Some Edges are picked randomly to serve as peaks. The more an
Edge is close to a peak, the higher is its weight.
"""
# TODO
pass
###############
### Coarsen ###
###############
def coarsen_EWeights(models, records, c_models, key_eweights, aggregation):
"""Add the coarsen edge weights to [c_models], under [key_weights].
"""
nbr_c = models[key_eweights]["nbr_c"]
ewgts = models[key_eweights]["weights"]
key_in = models[key_eweights]["keys"]
key_topo = key_in[0]
edges = models[key_topo]["edges"]
nbr_e_ = c_models[key_topo]["nbr_e"]
edges_ = c_models[key_topo]["edges"]
nodes_ = c_models[key_topo]["nodes"]
ewgts_ = [[0] * nbr_c for _ in range(nbr_e_)]
tots_ = [0] * nbr_c
for e, edge in enumerate(edges):
i = aggregation[edge[0]]
j = aggregation[edge[1]]
if i != j:
e_ = nodes_[i][1][
next(f for f, j_ in enumerate(nodes_[i][0]) if j_ == j)
]
for c in range(nbr_c):
ewgts_[e_][c] += ewgts[e][c]
tots_[c] += ewgts[e][c]
c_models[key_eweights] = {
"entity" : "eweights",
"nbr_n" : nbr_e_,
"nbr_c" : nbr_c,
"weights": ewgts_,
"totals" : models[key_eweights]["totals"],
"keys" : models[key_eweights]["keys"],
}
####################
### Function IDs ###
####################
INIT_EWGT_FCTS = {
"init_EWeights_from_HWeights" : init_EWeights_from_HWeights,
"init_EWeights_from_NWeights" : init_EWeights_from_NWeights,
"init_EWeights_topologic_mountains": init_EWeights_topologic_mountains,
"init_EWeights_random" : init_EWeights_random,
"init_EWeights_unit" : init_EWeights_unit,
}
| 30.099415 | 172 | 0.592967 |
__author__ = "Rémi Barat"
__version__ = "1.0"
import math
import random
from crack.models.weights import condition_models, format_crit
| true | true |
f70fbb21c94acb9d07d8e2e1ca75454e92d0eaf5 | 28,076 | py | Python | game_client.py | wenlianglaw/Tetris-in-Python | d4f0a22c4827e7eeb44c55def3f024e0c6932ebe | [
"MIT"
] | 1 | 2021-06-25T20:43:19.000Z | 2021-06-25T20:43:19.000Z | game_client.py | wenlianglaw/Tetris-in-Python | d4f0a22c4827e7eeb44c55def3f024e0c6932ebe | [
"MIT"
] | null | null | null | game_client.py | wenlianglaw/Tetris-in-Python | d4f0a22c4827e7eeb44c55def3f024e0c6932ebe | [
"MIT"
] | null | null | null | # This file defines the back end of the Tetris game
#
# GameState is the base class of GameClient.
#
# GameClient.Run() will start two threads:
# - _ProcessActions: Process the action list every x seconds
# - _AutoDrop: Auto drops the current piece.
#
# GameClient:
# - current piece
# - held piece
# - piece list
# - color_map: game board
# - InputActions(...): Inputs a list of actions.
# - ProcessActions(...): Lets the game client process a list of actions
# directly
# - ProcessAction(...): Lets the game client process one actions directly
# - PutPiece(...): Puts the current piece if the position is valid.
# - GetState(...): Gets game state, useful to AI
# - CheckValidity(...): Checks if a move is valid
# - SpawnPiece(...): Sets the current piece.
# - Restart(...): Restarts the game.
# - Rotate(...): Alternatively, callers can directly call Rotate to rotate
# current_piece
# - Move(...): Alternatively, callers can directly call Move to move the
# current_piece
#
import copy
import queue
import threading
import time
from threading import Lock
from typing import Tuple, List
import numpy as np
import actions
import shape
# Some global settings
DEFAULT_LENGTH = 20
DEFAULT_WIDTH = 10
MAP_PADDING_SIZE = 4
# When there are less than threshold pieces, spawn a new bag.
REFILL_THRESHOLD = 5
# Disable the auto drop in next few seconds
MAXIMUM_LOCK_TIME = 4
INCREMENTAL_LOCK_TIME = 1
# Scores
SINGLE = 5
DOUBLE = 10
TSS = 20
TRIPLE = 40
QUAD = 50
TSD = 60
TST = 80
PC = 120
# ATTACKS
ATTACK_DOUBLE = 1
ATTACK_TSS = 2
ATTACK_TRIPLE = 2
ATTACK_QUAD = 4
ATTACK_TSD = 4
ATTACK_TST = 6
ATTACK_PC = 10
class InternalError(Exception):
"""Any internal errors."""
class GameState:
def __init__(self):
self.height = 0
self.width = 0
self.color_map = np.array([])
self.current_piece = None
self.held_piece = None
self.score = 0
self.piece_list = []
self.is_gameover = False
self.can_swap = True
self.accumulated_lines_eliminated = 0
self.piece_dropped = 0
self.blevel_increase = False
self.level = 0
self.line_sent = 0
self.line_received = 0
def __deepcopy__(self, memodict=None):
if memodict is None:
memodict = dict()
another = copy.copy(self)
another.color_map = self.color_map.copy()
if self.current_piece is not None:
another.current_piece = self.current_piece.copy()
if self.held_piece is not None:
another.held_piece = self.held_piece.copy()
another.piece_list = copy.deepcopy(self.piece_list.copy())
return another
def copy(self):
return self.__deepcopy__()
def __str__(self):
ret = ""
ret += f"""height: {self.height}
width: {self.width}
color_map: {self.color_map}
current_piece: {self.current_piece}
held_piece: {self.held_piece}
score: {self.score}
piece_list: {self.piece_list}
is_gameover: {self.is_gameover}
can_swap: {self.can_swap}
piece_dropped: {self.piece_dropped}
level: {self.level}
"""
class GameClient(GameState):
def __init__(self, height: int = DEFAULT_LENGTH, width: int = DEFAULT_WIDTH, map_height_padding=MAP_PADDING_SIZE,
map_side_padding=MAP_PADDING_SIZE):
super().__init__()
self.height = height
self.width = width
self.map_height_padding = map_height_padding
self.map_side_padding = map_side_padding
self.dtype = np.uint8
self.dtype_length = 8
if self.width + 2 * map_side_padding > 8:
self.dtype = np.uint16
self.dtype_length = 16
if self.width + 2 * map_side_padding > 16:
self.dtype = np.uint32
self.dtype_length = 32
if self.width + 2 * map_side_padding > 32:
self.dtype = np.uint64
self.dtype_length = 64
if self.width + 2 * map_side_padding > 64:
self.dtype = np.uint128
self.dtype_length = 128
if self.width + 2 * map_side_padding > 128:
raise InternalError(
"width too long to support bit map. Consider chaning it to a smaller value.")
# Lock time settings
# When the lock is enabled, count the lock time.
# When the accumulated lock time is greater than the current maximum lock time,
# force to perform the auto drop. Otherwise autodop is disabled for this turn.
# When current locktime is reached but an refresh lock time request is genertaed.
# increase the current maximum lock time by incremental lock time.
self.maximum_lock_time = MAXIMUM_LOCK_TIME
self.current_maximum_lock_time = 0
self.incremental_lock_time = INCREMENTAL_LOCK_TIME
self.accumulate_lock_time = 0
# Only when move or rotate at bottom locks the auto drop
self._enable_lock_time = False
# Color map marks the color for each cell.
self.color_map = np.array([[]], dtype=self.dtype)
# Bit map for a better performance in some calculation.
self.bit_map = np.array([], dtype=self.dtype)
# Lock for current_piece
self.mutex_current_piece = Lock()
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500 # 500 ms at level 0
self._current_spawn_interval = 500
# actions.Action
self.last_action = None
self.disable_autodrop = False
self.line_tobesent = 0
# Used when calculate the auto drop interval decrease based on current level.
# Generated from the sigmoid function
# x = np.linspace(0, 40, 40)
# interval_decrease = 110 / (1 + np.exp(0.16 * x))
# interval_decrease = np.cumsum(interval_decrease)
# print(repr(np.cumsum(interval_decrease)))
self.interval_decrease = np.array(
[55., 100.49727968, 150.55179446, 190.28030383,
230.85041422, 260.47244367, 290.38990828, 320.86947489,
345.19115272, 350.63934095, 380.49515164, 400.03022699,
410.5020957, 420.15098155, 430.19789113, 440.8437644,
450.26946046, 455.63636342, 461.08741849, 465.74844074,
469.72957119, 473.12678557, 476.02338748, 478.4914391,
480.59310001, 482.38185737, 483.90364044, 485.19781892,
486.29808909, 487.23325451, 488.02790975, 488.70303602,
489.27651798, 489.76359062, 490.17722443, 490.52845671,
490.82667585, 491.07986489, 491.2948099, 491.47727802])
self._RefillPieces()
self._TakePieceFromList()
self.accumulated_lines_eliminated = 0
# When soft-dropping, temporarily disable auto-drop
self.soft_drop = False
self.piece_dropped = 0
# Must be put after the initializations above
self._InitMap()
def _InitMap(self):
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
bottom_padding = (1 << (self.width + 2 * self.map_side_padding)) - 1
self.bit_map = np.concatenate((
np.array((self.map_height_padding + self.height) * [init_row], dtype=self.dtype),
np.array(self.map_height_padding * [bottom_padding], dtype=self.dtype)), dtype=self.dtype)
self.color_map = np.array([[0 for i in range(self.width)] for x in range(self.height + self.map_height_padding)],
dtype=self.dtype)
def Restart(self):
self._InitMap()
self.piece_list = []
self.held_piece = None
self.current_piece = None
# Lock of the game state
self.mutex_current_piece = Lock()
self.is_gameover = False
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500.0
self._current_spawn_interval = 500.0
# actions.Action
self.last_action = []
self.can_swap = True
self.score = 0
self.accumulate_lock_time = 0
self.accumulated_lines_eliminated = 0
self.soft_drop = False
self.piece_dropped = 0
self.line_sent = 0
self.line_received = 0
self.line_tobesent = 0
self._enable_lock_time = False
self._RefillPieces()
self._TakePieceFromList()
def Run(self):
auto_drop_th = threading.Thread(target=self.AutoDrop, name="auto_drop", daemon=True)
process_input_th = threading.Thread(target=self._ProcessActionsThread, daemon=True)
if not self.disable_autodrop:
auto_drop_th.start()
process_input_th.start()
if not self.disable_autodrop:
auto_drop_th.join()
process_input_th.join()
print("game ends")
def GetState(self) -> GameState:
"""Gets game state.
Returns the objects ref instead of copy For better performance.
"""
return copy.deepcopy(super())
def GetCell(self, i: int, j: int) -> int:
"""Gets cell at [i,j].
Notes: This function doesn't check the index out of boundary error.
"""
return self.color_map[i, j]
def GetMap(self):
"""Gets whole color_map."""
return self.color_map
def GetMapArea(self, corner: Tuple[int, int],
size: Tuple[int, int]) -> np.array:
"""Gets an area of
:param top_left:
:param bottom_right:
:return: The area of the color_map.
"""
size = (np.min([size[0], self.color_map.shape[0] - corner[0]]),
np.min([size[1], self.color_map.shape[1] - corner[1]]))
return self.color_map[corner[0]: corner[0] + size[0],
corner[1]: corner[1] + size[1]]
def SetMap(self, pos: Tuple[int, int], v: int, map: np.array = None):
"""Sets the cell at [i,j] to value v."""
(i, j) = pos
bit_map = self.bit_map.copy()
if map is None or map is self.color_map:
map = self.color_map
bit_map = self.bit_map
map[i, j] = v
# Set a bit to value: Clear to bit to 0 and then set to value
bit_v = 0 if v == 0 else 1
bit_j_pos = self.width + self.map_side_padding - 1 - j
bit_map[i] = (bit_map[i] & ~(1 << bit_j_pos)) | (bit_v << bit_j_pos)
def SetWholeMap(self, map: np.array):
if map.shape != self.color_map.shape:
raise InternalError(
f"Map shape {map.shape}"
f" must match the color_map shape: {self.color_map.shape}")
self.color_map = map
# Convert the map to Bollean map
bit_color_map = map != 0
# Revert the order and padding, then call the packbits(..., order="little") fn
bit_color_map = bit_color_map[:, ::-1]
bit_color_map = np.pad(
bit_color_map,
((0, 0), (self.map_side_padding, self.map_side_padding)),
"constant", constant_values=(1,))
padding0_len = self.dtype_length - bit_color_map.shape[1]
bit_color_map = np.pad(bit_color_map, ((0, 0), (0, padding0_len)),
"constant", constant_values=(0,))
int_color_map = np.packbits(bit_color_map, bitorder="little").view(self.dtype)
self.bit_map[0:self.map_height_padding + self.height] = int_color_map
print(int_color_map)
print(self.bit_map)
def copy(self):
another = copy.copy(self)
another.last_action = copy.copy(self.last_action)
if self.last_put_piece is not None:
another.last_put_piece = self.last_put_piece.copy()
another.color_map = np.copy(self.color_map)
another.bit_map = np.copy(self.bit_map)
another.action_list = copy.copy(self.action_list)
another.piece_list = self.piece_list.copy()
another.current_piece = self.current_piece.copy()
if self.held_piece is None:
another.held_piece = None
else:
another.held_piece = self.held_piece.copy()
return another
def AutoDrop(self):
while True:
if self.soft_drop:
# If it is soft dropping, we don't perform auto drop.
self.soft_drop = False
else:
if self.CheckValidity(self.current_piece, offset=(1, 0)):
self.Move(actions.Action(down=True, source_user_or_ai=False))
else:
if (not self._enable_lock_time or
self.accumulate_lock_time >= self.current_maximum_lock_time):
self.PutPiece()
else:
self.accumulate_lock_time += self._current_spawn_interval / 1000
time.sleep(self._current_spawn_interval / 1000)
def InputActions(self, acts: List[actions.Action]):
if self.is_gameover:
return
if len(acts) > 30:
print("len:", len(acts))
acts = acts[-30:]
for act in acts:
if self.action_list.qsize() > 50:
break
self.action_list.put(act)
def ProcessActions(self, actions: List[actions.Action], post_processing=True):
for a in actions:
self.ProcessAction(a, post_processing=post_processing)
def ProcessAction(self, action: actions.Action, post_processing=True):
if self.is_gameover:
return
# print(f"Processed action: {action.direction}, {action.rotation}, {action.swap}")
# self.test += 1
# print(self.test)
if action.swap:
self.Swap()
self.Rotate(action.rotation)
self.Move(action, post_processing=post_processing)
def _ProcessActionsThread(self):
while True:
while not self.action_list.empty():
act = self.action_list.get()
self.ProcessAction(act)
self.action_list.task_done()
time.sleep(0.001)
def SetLevel(self, level: int = 0):
"""Let the front end set!"""
self.level = level
i = min(len(self.interval_decrease), self.level)
self._current_spawn_interval = max(
10, self._init_spawn_interval - self.interval_decrease[i])
def IncreaseLevel(self, inc: int = 1):
"""Let the front end decide!"""
self.level += inc
self.SetLevel(self.level)
def Move(self, action: actions.Action, post_processing=True) -> bool:
"""Moves the current piece.
:param direction: Direction to move
:param post_processing: if True, put the piece to color_map and
apply line eliminate. Otherwise just update the current_piece's states.
:return True if moved; False otherwise
"""
if (action.direction == actions.NONE and
not action.down):
return False
moved = False
if action.down:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
self.soft_drop = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.LEFT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, -1)):
self.current_piece.y += -1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.RIGHT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, 1)):
self.current_piece.y += 1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:
try:
self.mutex_current_piece.acquire()
while self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
finally:
self.mutex_current_piece.release()
if post_processing and action.direction == actions.HARD_DROP:
self.PutPiece()
if moved:
self.last_action = action
at_bottom = not self.CheckValidity(self.current_piece, (1, 0))
if (at_bottom and action.direction != actions.HARD_DROP and
action.source_user):
self._RefreshLockTime()
return moved
def _RefreshLockTime(self):
self._enable_lock_time = True
if self.accumulate_lock_time >= self.current_maximum_lock_time:
self.current_maximum_lock_time = min(
self.current_maximum_lock_time + self.incremental_lock_time,
self.maximum_lock_time)
def _ResetLockTime(self):
self._enable_lock_time = False
self.accumulate_lock_time = 0
self.current_maximum_lock_time = 0
def Swap(self):
"""Swaps the held piece and the current if its swappable"""
if not self.can_swap:
return
try:
self.mutex_current_piece.acquire()
t = self.held_piece
self.held_piece = self.current_piece
self.current_piece = t
if not self.current_piece:
self._TakePieceFromList()
self.current_piece.Init()
self.held_piece.Init()
self.can_swap = False
finally:
self.mutex_current_piece.release()
def CheckGameOver(self):
self.is_gameover = np.any(
self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)
return self.is_gameover
def _AnalyzeElimination(self, n_eliminate: int) -> int:
ret = 0
is_last_put_t = isinstance(self.last_put_piece, shape.T)
if n_eliminate == 1:
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSS")
ret += TSS
self.line_tobesent += ATTACK_TSS
else:
ret += SINGLE
if n_eliminate == 2:
# TSD
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSD")
ret += TSD
self.line_tobesent += ATTACK_TSD
# Normal Double
else:
ret += DOUBLE
self.line_tobesent += ATTACK_DOUBLE
if n_eliminate == 3:
# TST
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TST")
ret += TST
self.line_tobesent += ATTACK_TST
else:
ret += TRIPLE
self.line_tobesent += ATTACK_TRIPLE
if n_eliminate == 4:
ret += QUAD
self.line_tobesent += ATTACK_QUAD
# Checks for PC
if np.all(self.color_map == 0):
print("PC")
ret += PC
self.line_tobesent += ATTACK_PC
return ret * (self.level + 3)
def _LineClear(self):
elimated_lines = []
elimated_cnt = 0
# Checks the 4 lines... This is not adapt to shape with higher than 4 lines
# but that's not a part of this game. I don't have plan to support custom
# shapes.
for row in range(4):
if not (self.last_put_piece.x + row >= 0 and
self.last_put_piece.x + row < self.height + self.map_height_padding):
continue
if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):
elimated_lines.append(row + self.last_put_piece.x)
elimated_cnt += 1
self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),
dtype=self.dtype),
np.delete(self.color_map, elimated_lines, axis=0)))
# Updates the bit_map
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
self.bit_map = np.concatenate((elimated_cnt * [init_row],
np.delete(self.bit_map, elimated_lines))).astype(self.dtype)
self.accumulated_lines_eliminated += elimated_cnt
self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)
def _SendAttack(self):
"""Send attack to target."""
# This feature has not been implemented yet.
self.line_sent += self.line_tobesent
self.line_tobesent = 0
def PutPiece(self, piece: shape.Shape = None):
""" Puts a piece to color_map if it is a valid placement then execute the post processing.
:param piece: The piece to put, if None, put the self.current_piece
:param color_map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
if self._PrePutPiece(piece):
self._PostPutPiece(piece)
return True
else:
return False
def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):
""" Puts a piece to color_map if it is a valid placement.
Post put processing such as self._LineClear will not be executed
:param piece: The piece to put, if None, put the self.current_piece
:param map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
try:
if not piece:
self.mutex_current_piece.acquire()
piece = self.current_piece
if map is None:
map = self.color_map
if not self.CheckValidity(piece):
return False
for (i, j) in piece.GetShape():
self.SetMap((piece.x + i, piece.y + j), piece.id, map)
return True
finally:
if self.mutex_current_piece.locked():
self.mutex_current_piece.release()
def _PostPutPiece(self, piece: shape.Shape = None):
if piece is not None:
self.last_put_piece = piece
else:
self.last_put_piece = self.current_piece
# LineClear should be called prior to SendAttack
self._LineClear()
if piece is None:
self._TakePieceFromList()
self.CheckGameOver()
self._ResetLockTime()
self._SendAttack()
self.can_swap = True
self.piece_dropped += 1
def TextDraw(self):
preview_map = self.color_map.copy()
self._PrePutPiece(self.current_piece, preview_map)
for i in preview_map:
print(i)
print()
def SpawnPiece(self, piece: shape.Shape = None) -> bool:
if not piece:
self._TakePieceFromList()
else:
self.current_piece = piece.copy()
return self.CheckValidity(self.current_piece)
def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):
"""Finds a location that fits this piece with n 90rotations.
Ref: https://tetris.fandom.com/wiki/SRS
:param piece: The piece to be put in the color_map. If none, it will be set to the current_piece
:param num_90rotations: How many 90 rotations
:return: piece - shape.Shape: the piece with rotations that fits the color_map.
"""
if not piece:
piece = self.current_piece
def _IsJLSTZ(piece: shape.Shape):
jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]
for s in jlstz:
if isinstance(piece, s):
return True
return False
# The 180 rotation wall kick table is copied from
# https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation
# which is origined from
# https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java
offset_map_jlstz = [
# state 0
([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1
# 0>>2, 180 rotation
# [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],
[(0, 0)],
[(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3
# state 1
([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2
# l>>3, 180 rotation
# [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0
# state 2
([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3
# [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,
[(0, 0)],
[(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1
# state 3
([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0
# 3>>1, 180 rotation
# [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2
]
offset_map_i = [
# state 0
[[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1
# [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation
[(0, 0)],
[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3
# state 1
[[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2
# [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,
[(0, 0)],
[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0
# state 2
[[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3
# [(0, 0), (1, 0), (2, 0), (-1, 0), (-2, 0), (0, -1)], # 2>>0, 180 rotation
[(0, 0)],
[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)]], # 2>>1
# state 3
[[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)], # 3>>0
# [(0, 0), (0, 1), (0, 2), (0, -1), (0, -2), (1, 0)], # 3>>1, 180 rotation
[(0, 0)],
[(0, 0), (0, -2), (0, 1), (1, -2), (2, 1)]], # 3>>2
]
state = piece.state
num_90rotations %= 4
offset_piece = piece.copy()
ori_x = offset_piece.x
ori_y = offset_piece.y
for _ in range(num_90rotations):
offset_piece.Rotate90()
if num_90rotations == 0:
if self.CheckValidity(offset_piece):
return offset_piece
num_90rotations -= 1
if _IsJLSTZ(piece):
for (offset_x, offset_y) in offset_map_jlstz[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
else:
for (offset_x, offset_y) in offset_map_i[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
return None
def Rotate(self, n: int) -> bool:
"""Rotates the current piece.
:param n: rotations, in range [0,4)
:return: True if the current piece can be rotated. False otherwise.
"""
n %= 4
if n == 0:
return False
fitted_piece = self._FindFittedPiece(num_90rotations=n)
if fitted_piece:
self.current_piece = fitted_piece
self.last_action = actions.Action(dir=0, rotation=n)
if not self.CheckValidity(self.current_piece, (1, 0)):
self._RefreshLockTime()
return fitted_piece is not None
def CheckValidity(self, piece: shape.Shape, offset: Tuple[int, int] = (0, 0)):
"""Checks if the piece with offset can be put in the color_map
:param piece: The piece to be put.
:param offset: The inital offset to the piece
:return: True if the current state can fit into the color_map. False otherwise.
"""
(ox, oy, os) = (piece.x, piece.y, piece.state)
piece.x += offset[0]
piece.y += offset[1]
a = self.bit_map[piece.x: piece.x + 4]
b = self.width - piece.y
c = piece.GetBitMap().astype(self.dtype)
d = c << b
e = a & d
check_rst = e == 0
(piece.x, piece.y, piece.state) = (ox, oy, os)
return np.all(check_rst)
def _GetNextBag(self):
start_y = int((self.width - 3) / 2)
assert start_y >= 0
bag = [shape.I(start_y=start_y),
shape.J(start_y=start_y),
shape.L(start_y=start_y),
shape.O(start_y=start_y),
shape.S(start_y=start_y),
shape.T(start_y=start_y),
shape.Z(start_y=start_y)]
np.random.shuffle(bag)
return bag
def _RefillPieces(self):
"""
When there are less than REFILL_THRESHOLD pieces in the list,
refill it with a new bag.
"""
if len(self.piece_list) <= REFILL_THRESHOLD:
self.piece_list.extend(self._GetNextBag())
def _TakePieceFromList(self):
self._RefillPieces()
self.current_piece = self.piece_list[0].copy()
self.piece_list = self.piece_list[1:]
def CreateGameFromState(state: GameState) -> GameClient:
game = GameClient(height=state.height, width=state.width)
game.color_map = np.copy(state.color_map)
game.current_piece = state.current_piece.copy()
if state.held_piece is not None:
game.held_piece = state.held_piece.copy()
else:
game.held_piece = None
game.score = state.score
game.piece_list = state.piece_list.copy()
game.can_swap = state.can_swap
game.is_gameover = state.is_gameover
game.accumulated_lines_eliminated = state.accumulated_lines_eliminated
game.piece_dropped = state.piece_dropped
game.line_sent = state.line_sent
game.line_received = state.line_received
return game
| 32.799065 | 123 | 0.63036 |
import copy
import queue
import threading
import time
from threading import Lock
from typing import Tuple, List
import numpy as np
import actions
import shape
DEFAULT_LENGTH = 20
DEFAULT_WIDTH = 10
MAP_PADDING_SIZE = 4
REFILL_THRESHOLD = 5
MAXIMUM_LOCK_TIME = 4
INCREMENTAL_LOCK_TIME = 1
SINGLE = 5
DOUBLE = 10
TSS = 20
TRIPLE = 40
QUAD = 50
TSD = 60
TST = 80
PC = 120
ATTACK_DOUBLE = 1
ATTACK_TSS = 2
ATTACK_TRIPLE = 2
ATTACK_QUAD = 4
ATTACK_TSD = 4
ATTACK_TST = 6
ATTACK_PC = 10
class InternalError(Exception):
class GameState:
def __init__(self):
self.height = 0
self.width = 0
self.color_map = np.array([])
self.current_piece = None
self.held_piece = None
self.score = 0
self.piece_list = []
self.is_gameover = False
self.can_swap = True
self.accumulated_lines_eliminated = 0
self.piece_dropped = 0
self.blevel_increase = False
self.level = 0
self.line_sent = 0
self.line_received = 0
def __deepcopy__(self, memodict=None):
if memodict is None:
memodict = dict()
another = copy.copy(self)
another.color_map = self.color_map.copy()
if self.current_piece is not None:
another.current_piece = self.current_piece.copy()
if self.held_piece is not None:
another.held_piece = self.held_piece.copy()
another.piece_list = copy.deepcopy(self.piece_list.copy())
return another
def copy(self):
return self.__deepcopy__()
def __str__(self):
ret = ""
ret += f"""height: {self.height}
width: {self.width}
color_map: {self.color_map}
current_piece: {self.current_piece}
held_piece: {self.held_piece}
score: {self.score}
piece_list: {self.piece_list}
is_gameover: {self.is_gameover}
can_swap: {self.can_swap}
piece_dropped: {self.piece_dropped}
level: {self.level}
"""
class GameClient(GameState):
def __init__(self, height: int = DEFAULT_LENGTH, width: int = DEFAULT_WIDTH, map_height_padding=MAP_PADDING_SIZE,
map_side_padding=MAP_PADDING_SIZE):
super().__init__()
self.height = height
self.width = width
self.map_height_padding = map_height_padding
self.map_side_padding = map_side_padding
self.dtype = np.uint8
self.dtype_length = 8
if self.width + 2 * map_side_padding > 8:
self.dtype = np.uint16
self.dtype_length = 16
if self.width + 2 * map_side_padding > 16:
self.dtype = np.uint32
self.dtype_length = 32
if self.width + 2 * map_side_padding > 32:
self.dtype = np.uint64
self.dtype_length = 64
if self.width + 2 * map_side_padding > 64:
self.dtype = np.uint128
self.dtype_length = 128
if self.width + 2 * map_side_padding > 128:
raise InternalError(
"width too long to support bit map. Consider chaning it to a smaller value.")
self.maximum_lock_time = MAXIMUM_LOCK_TIME
self.current_maximum_lock_time = 0
self.incremental_lock_time = INCREMENTAL_LOCK_TIME
self.accumulate_lock_time = 0
self._enable_lock_time = False
self.color_map = np.array([[]], dtype=self.dtype)
self.bit_map = np.array([], dtype=self.dtype)
self.mutex_current_piece = Lock()
self.last_put_piece = None
self.action_list = queue.Queue()
self._init_spawn_interval = 500
self._current_spawn_interval = 500
self.last_action = None
self.disable_autodrop = False
self.line_tobesent = 0
self.interval_decrease = np.array(
[55., 100.49727968, 150.55179446, 190.28030383,
230.85041422, 260.47244367, 290.38990828, 320.86947489,
345.19115272, 350.63934095, 380.49515164, 400.03022699,
410.5020957, 420.15098155, 430.19789113, 440.8437644,
450.26946046, 455.63636342, 461.08741849, 465.74844074,
469.72957119, 473.12678557, 476.02338748, 478.4914391,
480.59310001, 482.38185737, 483.90364044, 485.19781892,
486.29808909, 487.23325451, 488.02790975, 488.70303602,
489.27651798, 489.76359062, 490.17722443, 490.52845671,
490.82667585, 491.07986489, 491.2948099, 491.47727802])
self._RefillPieces()
self._TakePieceFromList()
self.accumulated_lines_eliminated = 0
self.soft_drop = False
self.piece_dropped = 0
self._InitMap()
def _InitMap(self):
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
bottom_padding = (1 << (self.width + 2 * self.map_side_padding)) - 1
self.bit_map = np.concatenate((
np.array((self.map_height_padding + self.height) * [init_row], dtype=self.dtype),
np.array(self.map_height_padding * [bottom_padding], dtype=self.dtype)), dtype=self.dtype)
self.color_map = np.array([[0 for i in range(self.width)] for x in range(self.height + self.map_height_padding)],
dtype=self.dtype)
def Restart(self):
self._InitMap()
self.piece_list = []
self.held_piece = None
self.current_piece = None
self.mutex_current_piece = Lock()
self.is_gameover = False
self.last_put_piece = None
self.action_list = queue.Queue()
self._init_spawn_interval = 500.0
self._current_spawn_interval = 500.0
self.last_action = []
self.can_swap = True
self.score = 0
self.accumulate_lock_time = 0
self.accumulated_lines_eliminated = 0
self.soft_drop = False
self.piece_dropped = 0
self.line_sent = 0
self.line_received = 0
self.line_tobesent = 0
self._enable_lock_time = False
self._RefillPieces()
self._TakePieceFromList()
def Run(self):
auto_drop_th = threading.Thread(target=self.AutoDrop, name="auto_drop", daemon=True)
process_input_th = threading.Thread(target=self._ProcessActionsThread, daemon=True)
if not self.disable_autodrop:
auto_drop_th.start()
process_input_th.start()
if not self.disable_autodrop:
auto_drop_th.join()
process_input_th.join()
print("game ends")
def GetState(self) -> GameState:
return copy.deepcopy(super())
def GetCell(self, i: int, j: int) -> int:
return self.color_map[i, j]
def GetMap(self):
return self.color_map
def GetMapArea(self, corner: Tuple[int, int],
size: Tuple[int, int]) -> np.array:
size = (np.min([size[0], self.color_map.shape[0] - corner[0]]),
np.min([size[1], self.color_map.shape[1] - corner[1]]))
return self.color_map[corner[0]: corner[0] + size[0],
corner[1]: corner[1] + size[1]]
def SetMap(self, pos: Tuple[int, int], v: int, map: np.array = None):
(i, j) = pos
bit_map = self.bit_map.copy()
if map is None or map is self.color_map:
map = self.color_map
bit_map = self.bit_map
map[i, j] = v
bit_v = 0 if v == 0 else 1
bit_j_pos = self.width + self.map_side_padding - 1 - j
bit_map[i] = (bit_map[i] & ~(1 << bit_j_pos)) | (bit_v << bit_j_pos)
def SetWholeMap(self, map: np.array):
if map.shape != self.color_map.shape:
raise InternalError(
f"Map shape {map.shape}"
f" must match the color_map shape: {self.color_map.shape}")
self.color_map = map
bit_color_map = map != 0
bit_color_map = bit_color_map[:, ::-1]
bit_color_map = np.pad(
bit_color_map,
((0, 0), (self.map_side_padding, self.map_side_padding)),
"constant", constant_values=(1,))
padding0_len = self.dtype_length - bit_color_map.shape[1]
bit_color_map = np.pad(bit_color_map, ((0, 0), (0, padding0_len)),
"constant", constant_values=(0,))
int_color_map = np.packbits(bit_color_map, bitorder="little").view(self.dtype)
self.bit_map[0:self.map_height_padding + self.height] = int_color_map
print(int_color_map)
print(self.bit_map)
def copy(self):
another = copy.copy(self)
another.last_action = copy.copy(self.last_action)
if self.last_put_piece is not None:
another.last_put_piece = self.last_put_piece.copy()
another.color_map = np.copy(self.color_map)
another.bit_map = np.copy(self.bit_map)
another.action_list = copy.copy(self.action_list)
another.piece_list = self.piece_list.copy()
another.current_piece = self.current_piece.copy()
if self.held_piece is None:
another.held_piece = None
else:
another.held_piece = self.held_piece.copy()
return another
def AutoDrop(self):
while True:
if self.soft_drop:
self.soft_drop = False
else:
if self.CheckValidity(self.current_piece, offset=(1, 0)):
self.Move(actions.Action(down=True, source_user_or_ai=False))
else:
if (not self._enable_lock_time or
self.accumulate_lock_time >= self.current_maximum_lock_time):
self.PutPiece()
else:
self.accumulate_lock_time += self._current_spawn_interval / 1000
time.sleep(self._current_spawn_interval / 1000)
def InputActions(self, acts: List[actions.Action]):
if self.is_gameover:
return
if len(acts) > 30:
print("len:", len(acts))
acts = acts[-30:]
for act in acts:
if self.action_list.qsize() > 50:
break
self.action_list.put(act)
def ProcessActions(self, actions: List[actions.Action], post_processing=True):
for a in actions:
self.ProcessAction(a, post_processing=post_processing)
def ProcessAction(self, action: actions.Action, post_processing=True):
if self.is_gameover:
return
# print(f"Processed action: {action.direction}, {action.rotation}, {action.swap}")
# self.test += 1
# print(self.test)
if action.swap:
self.Swap()
self.Rotate(action.rotation)
self.Move(action, post_processing=post_processing)
def _ProcessActionsThread(self):
while True:
while not self.action_list.empty():
act = self.action_list.get()
self.ProcessAction(act)
self.action_list.task_done()
time.sleep(0.001)
def SetLevel(self, level: int = 0):
self.level = level
i = min(len(self.interval_decrease), self.level)
self._current_spawn_interval = max(
10, self._init_spawn_interval - self.interval_decrease[i])
def IncreaseLevel(self, inc: int = 1):
self.level += inc
self.SetLevel(self.level)
def Move(self, action: actions.Action, post_processing=True) -> bool:
if (action.direction == actions.NONE and
not action.down):
return False
moved = False
if action.down:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
self.soft_drop = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.LEFT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, -1)):
self.current_piece.y += -1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.RIGHT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, 1)):
self.current_piece.y += 1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:
try:
self.mutex_current_piece.acquire()
while self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
finally:
self.mutex_current_piece.release()
if post_processing and action.direction == actions.HARD_DROP:
self.PutPiece()
if moved:
self.last_action = action
at_bottom = not self.CheckValidity(self.current_piece, (1, 0))
if (at_bottom and action.direction != actions.HARD_DROP and
action.source_user):
self._RefreshLockTime()
return moved
def _RefreshLockTime(self):
self._enable_lock_time = True
if self.accumulate_lock_time >= self.current_maximum_lock_time:
self.current_maximum_lock_time = min(
self.current_maximum_lock_time + self.incremental_lock_time,
self.maximum_lock_time)
def _ResetLockTime(self):
self._enable_lock_time = False
self.accumulate_lock_time = 0
self.current_maximum_lock_time = 0
def Swap(self):
if not self.can_swap:
return
try:
self.mutex_current_piece.acquire()
t = self.held_piece
self.held_piece = self.current_piece
self.current_piece = t
if not self.current_piece:
self._TakePieceFromList()
self.current_piece.Init()
self.held_piece.Init()
self.can_swap = False
finally:
self.mutex_current_piece.release()
def CheckGameOver(self):
self.is_gameover = np.any(
self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)
return self.is_gameover
def _AnalyzeElimination(self, n_eliminate: int) -> int:
ret = 0
is_last_put_t = isinstance(self.last_put_piece, shape.T)
if n_eliminate == 1:
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSS")
ret += TSS
self.line_tobesent += ATTACK_TSS
else:
ret += SINGLE
if n_eliminate == 2:
# TSD
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSD")
ret += TSD
self.line_tobesent += ATTACK_TSD
# Normal Double
else:
ret += DOUBLE
self.line_tobesent += ATTACK_DOUBLE
if n_eliminate == 3:
# TST
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TST")
ret += TST
self.line_tobesent += ATTACK_TST
else:
ret += TRIPLE
self.line_tobesent += ATTACK_TRIPLE
if n_eliminate == 4:
ret += QUAD
self.line_tobesent += ATTACK_QUAD
# Checks for PC
if np.all(self.color_map == 0):
print("PC")
ret += PC
self.line_tobesent += ATTACK_PC
return ret * (self.level + 3)
def _LineClear(self):
elimated_lines = []
elimated_cnt = 0
# Checks the 4 lines... This is not adapt to shape with higher than 4 lines
# but that's not a part of this game. I don't have plan to support custom
# shapes.
for row in range(4):
if not (self.last_put_piece.x + row >= 0 and
self.last_put_piece.x + row < self.height + self.map_height_padding):
continue
if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):
elimated_lines.append(row + self.last_put_piece.x)
elimated_cnt += 1
self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),
dtype=self.dtype),
np.delete(self.color_map, elimated_lines, axis=0)))
# Updates the bit_map
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
self.bit_map = np.concatenate((elimated_cnt * [init_row],
np.delete(self.bit_map, elimated_lines))).astype(self.dtype)
self.accumulated_lines_eliminated += elimated_cnt
self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)
def _SendAttack(self):
# This feature has not been implemented yet.
self.line_sent += self.line_tobesent
self.line_tobesent = 0
def PutPiece(self, piece: shape.Shape = None):
if self._PrePutPiece(piece):
self._PostPutPiece(piece)
return True
else:
return False
def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):
try:
if not piece:
self.mutex_current_piece.acquire()
piece = self.current_piece
if map is None:
map = self.color_map
if not self.CheckValidity(piece):
return False
for (i, j) in piece.GetShape():
self.SetMap((piece.x + i, piece.y + j), piece.id, map)
return True
finally:
if self.mutex_current_piece.locked():
self.mutex_current_piece.release()
def _PostPutPiece(self, piece: shape.Shape = None):
if piece is not None:
self.last_put_piece = piece
else:
self.last_put_piece = self.current_piece
# LineClear should be called prior to SendAttack
self._LineClear()
if piece is None:
self._TakePieceFromList()
self.CheckGameOver()
self._ResetLockTime()
self._SendAttack()
self.can_swap = True
self.piece_dropped += 1
def TextDraw(self):
preview_map = self.color_map.copy()
self._PrePutPiece(self.current_piece, preview_map)
for i in preview_map:
print(i)
print()
def SpawnPiece(self, piece: shape.Shape = None) -> bool:
if not piece:
self._TakePieceFromList()
else:
self.current_piece = piece.copy()
return self.CheckValidity(self.current_piece)
def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):
if not piece:
piece = self.current_piece
def _IsJLSTZ(piece: shape.Shape):
jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]
for s in jlstz:
if isinstance(piece, s):
return True
return False
# The 180 rotation wall kick table is copied from
# https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation
# which is origined from
# https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java
offset_map_jlstz = [
# state 0
([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1
# 0>>2, 180 rotation
# [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],
[(0, 0)],
[(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3
# state 1
([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2
# l>>3, 180 rotation
# [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0
# state 2
([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3
# [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,
[(0, 0)],
[(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1
# state 3
([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0
# 3>>1, 180 rotation
# [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2
]
offset_map_i = [
# state 0
[[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1
# [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation
[(0, 0)],
[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3
# state 1
[[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2
# [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,
[(0, 0)],
[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0
# state 2
[[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3
# [(0, 0), (1, 0), (2, 0), (-1, 0), (-2, 0), (0, -1)], # 2>>0, 180 rotation
[(0, 0)],
[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)]], # 2>>1
# state 3
[[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)], # 3>>0
# [(0, 0), (0, 1), (0, 2), (0, -1), (0, -2), (1, 0)], # 3>>1, 180 rotation
[(0, 0)],
[(0, 0), (0, -2), (0, 1), (1, -2), (2, 1)]], # 3>>2
]
state = piece.state
num_90rotations %= 4
offset_piece = piece.copy()
ori_x = offset_piece.x
ori_y = offset_piece.y
for _ in range(num_90rotations):
offset_piece.Rotate90()
if num_90rotations == 0:
if self.CheckValidity(offset_piece):
return offset_piece
num_90rotations -= 1
if _IsJLSTZ(piece):
for (offset_x, offset_y) in offset_map_jlstz[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
else:
for (offset_x, offset_y) in offset_map_i[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
return None
def Rotate(self, n: int) -> bool:
n %= 4
if n == 0:
return False
fitted_piece = self._FindFittedPiece(num_90rotations=n)
if fitted_piece:
self.current_piece = fitted_piece
self.last_action = actions.Action(dir=0, rotation=n)
if not self.CheckValidity(self.current_piece, (1, 0)):
self._RefreshLockTime()
return fitted_piece is not None
def CheckValidity(self, piece: shape.Shape, offset: Tuple[int, int] = (0, 0)):
(ox, oy, os) = (piece.x, piece.y, piece.state)
piece.x += offset[0]
piece.y += offset[1]
a = self.bit_map[piece.x: piece.x + 4]
b = self.width - piece.y
c = piece.GetBitMap().astype(self.dtype)
d = c << b
e = a & d
check_rst = e == 0
(piece.x, piece.y, piece.state) = (ox, oy, os)
return np.all(check_rst)
def _GetNextBag(self):
start_y = int((self.width - 3) / 2)
assert start_y >= 0
bag = [shape.I(start_y=start_y),
shape.J(start_y=start_y),
shape.L(start_y=start_y),
shape.O(start_y=start_y),
shape.S(start_y=start_y),
shape.T(start_y=start_y),
shape.Z(start_y=start_y)]
np.random.shuffle(bag)
return bag
def _RefillPieces(self):
if len(self.piece_list) <= REFILL_THRESHOLD:
self.piece_list.extend(self._GetNextBag())
def _TakePieceFromList(self):
self._RefillPieces()
self.current_piece = self.piece_list[0].copy()
self.piece_list = self.piece_list[1:]
def CreateGameFromState(state: GameState) -> GameClient:
game = GameClient(height=state.height, width=state.width)
game.color_map = np.copy(state.color_map)
game.current_piece = state.current_piece.copy()
if state.held_piece is not None:
game.held_piece = state.held_piece.copy()
else:
game.held_piece = None
game.score = state.score
game.piece_list = state.piece_list.copy()
game.can_swap = state.can_swap
game.is_gameover = state.is_gameover
game.accumulated_lines_eliminated = state.accumulated_lines_eliminated
game.piece_dropped = state.piece_dropped
game.line_sent = state.line_sent
game.line_received = state.line_received
return game
| true | true |
f70fbcc4d201ff1d88c227e27e1344dec0d9a084 | 5,208 | py | Python | homeassistant/components/upnp/__init__.py | Juggler00/home-assistant | 3f87d413813de84935ea67b5212c55348524447f | [
"Apache-2.0"
] | 1 | 2021-04-08T06:02:25.000Z | 2021-04-08T06:02:25.000Z | homeassistant/components/upnp/__init__.py | ayonix/home-assistant | 8fda70537736db9a73c0a863800d6bb4df67f5fc | [
"Apache-2.0"
] | null | null | null | homeassistant/components/upnp/__init__.py | ayonix/home-assistant | 8fda70537736db9a73c0a863800d6bb4df67f5fc | [
"Apache-2.0"
] | null | null | null | """
Will open a port in your router for Home Assistant and provide statistics.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/upnp/
"""
import asyncio
from ipaddress import ip_address
import aiohttp
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import dispatcher
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.components.discovery import DOMAIN as DISCOVERY_DOMAIN
from .const import (
CONF_ENABLE_PORT_MAPPING, CONF_ENABLE_SENSORS,
CONF_HASS, CONF_LOCAL_IP, CONF_PORTS,
CONF_UDN, CONF_SSDP_DESCRIPTION,
SIGNAL_REMOVE_SENSOR,
)
from .const import DOMAIN
from .const import LOGGER as _LOGGER
from .config_flow import ensure_domain_data
from .device import Device
REQUIREMENTS = ['async-upnp-client==0.12.4']
DEPENDENCIES = ['http']
NOTIFICATION_ID = 'upnp_notification'
NOTIFICATION_TITLE = 'UPnP/IGD Setup'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_ENABLE_PORT_MAPPING, default=False): cv.boolean,
vol.Optional(CONF_ENABLE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_LOCAL_IP): vol.All(ip_address, cv.string),
vol.Optional(CONF_PORTS):
vol.Schema({
vol.Any(CONF_HASS, cv.positive_int):
vol.Any(CONF_HASS, cv.positive_int)
})
}),
}, extra=vol.ALLOW_EXTRA)
def _substitute_hass_ports(ports, hass_port):
"""Substitute 'hass' for the hass_port."""
ports = ports.copy()
# substitute 'hass' for hass_port, both keys and values
if CONF_HASS in ports:
ports[hass_port] = ports[CONF_HASS]
del ports[CONF_HASS]
for port in ports:
if ports[port] == CONF_HASS:
ports[port] = hass_port
return ports
# config
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Register a port mapping for Home Assistant via UPnP."""
ensure_domain_data(hass)
# ensure sane config
if DOMAIN not in config:
return True
if DISCOVERY_DOMAIN not in config:
_LOGGER.warning('UPNP needs discovery, please enable it')
return False
# overridden local ip
upnp_config = config[DOMAIN]
if CONF_LOCAL_IP in upnp_config:
hass.data[DOMAIN]['local_ip'] = upnp_config[CONF_LOCAL_IP]
# determine ports
ports = {CONF_HASS: CONF_HASS} # default, port_mapping disabled by default
if CONF_PORTS in upnp_config:
# copy from config
ports = upnp_config[CONF_PORTS]
hass.data[DOMAIN]['auto_config'] = {
'active': True,
'enable_sensors': upnp_config[CONF_ENABLE_SENSORS],
'enable_port_mapping': upnp_config[CONF_ENABLE_PORT_MAPPING],
'ports': ports,
}
return True
# config flow
async def async_setup_entry(hass: HomeAssistantType,
config_entry: ConfigEntry):
"""Set up UPnP/IGD-device from a config entry."""
ensure_domain_data(hass)
data = config_entry.data
# build UPnP/IGD device
ssdp_description = data[CONF_SSDP_DESCRIPTION]
try:
device = await Device.async_create_device(hass, ssdp_description)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error('Unable to create upnp-device')
return False
hass.data[DOMAIN]['devices'][device.udn] = device
# port mapping
if data.get(CONF_ENABLE_PORT_MAPPING):
local_ip = hass.data[DOMAIN].get('local_ip')
ports = hass.data[DOMAIN]['auto_config']['ports']
_LOGGER.debug('Enabling port mappings: %s', ports)
hass_port = hass.http.server_port
ports = _substitute_hass_ports(ports, hass_port)
await device.async_add_port_mappings(ports, local_ip=local_ip)
# sensors
if data.get(CONF_ENABLE_SENSORS):
_LOGGER.debug('Enabling sensors')
# register sensor setup handlers
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
config_entry, 'sensor'))
async def unload_entry(event):
"""Unload entry on quit."""
await async_unload_entry(hass, config_entry)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, unload_entry)
return True
async def async_unload_entry(hass: HomeAssistantType,
config_entry: ConfigEntry):
"""Unload a config entry."""
data = config_entry.data
udn = data[CONF_UDN]
if udn not in hass.data[DOMAIN]['devices']:
return True
device = hass.data[DOMAIN]['devices'][udn]
# port mapping
if data.get(CONF_ENABLE_PORT_MAPPING):
_LOGGER.debug('Deleting port mappings')
await device.async_delete_port_mappings()
# sensors
if data.get(CONF_ENABLE_SENSORS):
_LOGGER.debug('Deleting sensors')
dispatcher.async_dispatcher_send(hass, SIGNAL_REMOVE_SENSOR, device)
# clear stored device
del hass.data[DOMAIN]['devices'][udn]
return True
| 30.635294 | 79 | 0.699117 | import asyncio
from ipaddress import ip_address
import aiohttp
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import dispatcher
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.components.discovery import DOMAIN as DISCOVERY_DOMAIN
from .const import (
CONF_ENABLE_PORT_MAPPING, CONF_ENABLE_SENSORS,
CONF_HASS, CONF_LOCAL_IP, CONF_PORTS,
CONF_UDN, CONF_SSDP_DESCRIPTION,
SIGNAL_REMOVE_SENSOR,
)
from .const import DOMAIN
from .const import LOGGER as _LOGGER
from .config_flow import ensure_domain_data
from .device import Device
REQUIREMENTS = ['async-upnp-client==0.12.4']
DEPENDENCIES = ['http']
NOTIFICATION_ID = 'upnp_notification'
NOTIFICATION_TITLE = 'UPnP/IGD Setup'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_ENABLE_PORT_MAPPING, default=False): cv.boolean,
vol.Optional(CONF_ENABLE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_LOCAL_IP): vol.All(ip_address, cv.string),
vol.Optional(CONF_PORTS):
vol.Schema({
vol.Any(CONF_HASS, cv.positive_int):
vol.Any(CONF_HASS, cv.positive_int)
})
}),
}, extra=vol.ALLOW_EXTRA)
def _substitute_hass_ports(ports, hass_port):
ports = ports.copy()
if CONF_HASS in ports:
ports[hass_port] = ports[CONF_HASS]
del ports[CONF_HASS]
for port in ports:
if ports[port] == CONF_HASS:
ports[port] = hass_port
return ports
async def async_setup(hass: HomeAssistantType, config: ConfigType):
ensure_domain_data(hass)
if DOMAIN not in config:
return True
if DISCOVERY_DOMAIN not in config:
_LOGGER.warning('UPNP needs discovery, please enable it')
return False
upnp_config = config[DOMAIN]
if CONF_LOCAL_IP in upnp_config:
hass.data[DOMAIN]['local_ip'] = upnp_config[CONF_LOCAL_IP]
ports = {CONF_HASS: CONF_HASS}
if CONF_PORTS in upnp_config:
ports = upnp_config[CONF_PORTS]
hass.data[DOMAIN]['auto_config'] = {
'active': True,
'enable_sensors': upnp_config[CONF_ENABLE_SENSORS],
'enable_port_mapping': upnp_config[CONF_ENABLE_PORT_MAPPING],
'ports': ports,
}
return True
async def async_setup_entry(hass: HomeAssistantType,
config_entry: ConfigEntry):
ensure_domain_data(hass)
data = config_entry.data
ssdp_description = data[CONF_SSDP_DESCRIPTION]
try:
device = await Device.async_create_device(hass, ssdp_description)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error('Unable to create upnp-device')
return False
hass.data[DOMAIN]['devices'][device.udn] = device
if data.get(CONF_ENABLE_PORT_MAPPING):
local_ip = hass.data[DOMAIN].get('local_ip')
ports = hass.data[DOMAIN]['auto_config']['ports']
_LOGGER.debug('Enabling port mappings: %s', ports)
hass_port = hass.http.server_port
ports = _substitute_hass_ports(ports, hass_port)
await device.async_add_port_mappings(ports, local_ip=local_ip)
if data.get(CONF_ENABLE_SENSORS):
_LOGGER.debug('Enabling sensors')
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
config_entry, 'sensor'))
async def unload_entry(event):
await async_unload_entry(hass, config_entry)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, unload_entry)
return True
async def async_unload_entry(hass: HomeAssistantType,
config_entry: ConfigEntry):
data = config_entry.data
udn = data[CONF_UDN]
if udn not in hass.data[DOMAIN]['devices']:
return True
device = hass.data[DOMAIN]['devices'][udn]
if data.get(CONF_ENABLE_PORT_MAPPING):
_LOGGER.debug('Deleting port mappings')
await device.async_delete_port_mappings()
if data.get(CONF_ENABLE_SENSORS):
_LOGGER.debug('Deleting sensors')
dispatcher.async_dispatcher_send(hass, SIGNAL_REMOVE_SENSOR, device)
del hass.data[DOMAIN]['devices'][udn]
return True
| true | true |
f70fbcdfcc1d45f5ca92376a915b19a073966d04 | 8,295 | py | Python | components/diagnostics/diagnose_me/component.py | areshytko/pipelines | 9e818e9d13569614b7188a7bff47770ae449827c | [
"Apache-2.0"
] | null | null | null | components/diagnostics/diagnose_me/component.py | areshytko/pipelines | 9e818e9d13569614b7188a7bff47770ae449827c | [
"Apache-2.0"
] | null | null | null | components/diagnostics/diagnose_me/component.py | areshytko/pipelines | 9e818e9d13569614b7188a7bff47770ae449827c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple, Optional
def run_diagnose_me(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: Optional[List[Any]] = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
""" Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
"""
# Installing pip3 and kfp, since the base image 'google/cloud-sdk:279.0.0'
# does not come with pip3 pre-installed.
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
# Check quota.
quota_dict = {} # Mapping from region to dict[metric, available]
for region_quota in quota_list:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
# Get the project ID
# from project configuration
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
# Get project buckets
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
# Get the root of the user provided bucket i.e. gs://root.
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
# Verify APIs that are required are enabled
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
# printing the results in stdout for logging purposes
print('%s %s' % (item['config']['name'], item['state']))
# Check if target apis are enabled
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
if __name__ == '__main__':
import kfp.components as comp
comp.func_to_container_op(
run_diagnose_me,
base_image='google/cloud-sdk:279.0.0',
output_component_file='component.yaml',
)
| 35.75431 | 98 | 0.642917 |
from typing import Any, List, NamedTuple, Optional
def run_diagnose_me(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: Optional[List[Any]] = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
quota_dict = {}
for region_quota in quota_list:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
print('%s %s' % (item['config']['name'], item['state']))
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
if __name__ == '__main__':
import kfp.components as comp
comp.func_to_container_op(
run_diagnose_me,
base_image='google/cloud-sdk:279.0.0',
output_component_file='component.yaml',
)
| true | true |
f70fbd594ac7cc796d70fbf753d9788864624349 | 135,700 | py | Python | test/test_binary_ufuncs.py | ZackPashkin/pytorch | 5b1f5c8f17ec4067dc9f9df98bbcc6757ab24444 | [
"Intel"
] | 1 | 2022-01-25T15:48:31.000Z | 2022-01-25T15:48:31.000Z | test/test_binary_ufuncs.py | ZackPashkin/pytorch | 5b1f5c8f17ec4067dc9f9df98bbcc6757ab24444 | [
"Intel"
] | null | null | null | test/test_binary_ufuncs.py | ZackPashkin/pytorch | 5b1f5c8f17ec4067dc9f9df98bbcc6757ab24444 | [
"Intel"
] | null | null | null | import torch
import numpy as np
import itertools
from itertools import product
import math
import random
import unittest
import warnings
import operator
from functools import partial
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, iter_indices, TEST_WITH_ASAN, run_tests,
torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,
skipCUDAIfRocm, skipIf)
from torch.testing import all_types_and_complex_and
if TEST_SCIPY:
import scipy.special
# TODO: remove this
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: refactor this out
# Converts half/bfloat16 dtype to float when device is cpu
def _convert_t(dtype, device):
if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:
return torch.float
return dtype
# TODO: revise the tests to use make_tensor in common_utils.py instead
# Returns a tensor of the requested shape, dtype, and device
# Requesting a half CPU tensor returns a float CPU tensor with
# values representable by a half.
# Initialization uses randint for non-float types and randn for float types.
def _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:
# Returns a tensor filled with ones
if fill_ones:
return torch.ones(*shape, dtype=_convert_t(dtype, device), device=device)
# Returns a tensor with random integer values
if not (dtype.is_floating_point or dtype.is_complex):
t = torch.randint(0, 10, shape, device=device)
if dtype != torch.uint8:
t = t - 5 # generate negative values also
return t.to(_convert_t(dtype, device))
# Populates the CPU tensor with floats representable as half/bfloat16
if dtype == torch.half and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).half().float()
if dtype == torch.bfloat16 and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).bfloat16().float()
# Default: returns a tensor with random float values
return torch.randn(shape, dtype=dtype, device=device).to(dtype=dtype)
# TODO: update to use opinfos consistently
class TestBinaryUfuncs(TestCase):
def test_add_broadcast_empty(self, device):
# empty + empty
self.assertRaises(RuntimeError, lambda: torch.randn(5, 0, device=device) + torch.randn(0, 5, device=device))
self.assertEqual(torch.randn(5, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, device=device))
self.assertEqual(torch.randn(5, 0, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, 1, device=device))
# scalar + empty
self.assertEqual(torch.randn(5, 0, 6, device=device), torch.randn((), device=device) + torch.randn(5, 0, 6, device=device))
# non-empty, empty
self.assertEqual(torch.randn(0, device=device), torch.randn(0, device=device) + torch.randn(1, device=device))
self.assertEqual(torch.randn(0, 7, 0, 6, 5, 0, 7, device=device),
torch.randn(0, 7, 0, 6, 5, 0, 1, device=device) + torch.randn(1, 1, 5, 1, 7, device=device))
self.assertRaises(RuntimeError, lambda: torch.randn(7, 0, device=device) + torch.randn(2, 1, device=device))
def test_addcmul_scalars_as_floats(self, device):
# zero-dim variables that don't require grad should bind to scalar arguments
x = torch.tensor(2.)
y = torch.tensor(3., device=device)
# 3 + (3 * 3) * 2
self.assertEqual(y.addcmul(y, y, value=x), 21)
x = torch.tensor(2., requires_grad=True)
self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops(self, device):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertEqual(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertEqual(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertEqual(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertEqual(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertEqual(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertEqual(x[idx] >= y[idx], ge[idx] == 1)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_must_take_bool_output(self, device):
for op in [torch.lt, torch.le, torch.gt, torch.ge, torch.eq, torch.ne,
torch.logical_and, torch.logical_or, torch.logical_xor]:
self.assertEqual(op(torch.tensor([True]), torch.tensor([False])).dtype, torch.bool)
# TODO: update to work on CUDA, too
@onlyCPU
def test_inplace_comparison_ops_require_inputs_have_same_dtype(self, device):
with self.assertRaisesRegex(RuntimeError, 'Expected object of scalar type'):
for op in ['lt_', 'le_', 'gt_', 'ge_', 'eq_', 'ne_', 'logical_xor_', 'logical_and_', 'logical_or_']:
x = torch.tensor([1], dtype=torch.int)
y = torch.tensor([2], dtype=torch.long)
in_place_method = getattr(x, op)
in_place_method(y)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_check_for_scalar_overflow(self, device):
s = 1 << 20
t = torch.tensor([1 << 5], dtype=torch.uint8)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t < s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s < t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t <= s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s <= t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t > s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s > t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t >= s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s >= t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t == s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s == t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t != s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s != t)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_check_for_zerodim_tensor_overflow(self, device):
t1 = torch.tensor([1 << 5], dtype=torch.uint8)
t2 = torch.tensor([1 << 30], dtype=torch.int32)
ts1 = torch.tensor(1 << 20, dtype=torch.int32)
ts2 = torch.tensor(1 << 40, dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 < ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 < t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 <= ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 <= t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 > ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 > t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 >= ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 >= t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 == ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 == t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 != ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 != t2)
# TODO: update to work on CUDA, too
@onlyCPU
def test_bitwise_ops(self, device):
x = torch.randn(5, 5).gt(0)
y = torch.randn(5, 5).gt(0)
and_result = x & y
for idx in iter_indices(x):
if and_result[idx]:
self.assertTrue(x[idx] and y[idx])
else:
self.assertFalse(x[idx] and y[idx])
or_result = x | y
for idx in iter_indices(x):
if or_result[idx]:
self.assertTrue(x[idx] or y[idx])
else:
self.assertFalse(x[idx] or y[idx])
xor_result = x ^ y
for idx in iter_indices(x):
if xor_result[idx]:
self.assertTrue(x[idx] ^ y[idx])
else:
self.assertFalse(x[idx] ^ y[idx])
x_clone = x.clone()
x_clone &= y
self.assertEqual(x_clone, and_result)
x_clone = x.clone()
x_clone |= y
self.assertEqual(x_clone, or_result)
x_clone = x.clone()
x_clone ^= y
self.assertEqual(x_clone, xor_result)
def test_inplace_division(self, device):
t = torch.rand(5, 5, device=device)
id_before = id(t)
t /= 2
id_after = id(t)
self.assertEqual(id_before, id_after)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_complex=False))
def test_div_rounding_modes(self, device, dtype):
if dtype.is_floating_point:
low, high = -10.0, 10.0
else:
info = torch.iinfo(dtype)
low, high = info.min, info.max
a = make_tensor((100,), device, dtype, low=low, high=high)
b = make_tensor((100,), device, dtype, low=low, high=high)
# Avoid division by zero so we can test (a / b) * b == a
if dtype.is_floating_point:
eps = 0.1
b[(-eps < b) & (b < eps)] = eps
else:
b[b == 0] = 1
if not dtype.is_floating_point:
# floor(a / b) * b can be < a, so fixup slightly to avoid underflow
a = torch.where(a < 0, a + b, a)
d_true = torch.divide(a, b, rounding_mode=None)
self.assertTrue(d_true.is_floating_point())
self.assertEqual(d_true * b, a.to(d_true.dtype))
d_floor = torch.divide(a, b, rounding_mode='floor')
if dtype not in (torch.bfloat16, torch.half):
self.assertEqual(d_floor * b + torch.remainder(a, b), a)
else:
self.assertEqual(d_floor * b + torch.remainder(a.float(), b.float()), a,
exact_dtype=False)
d_trunc = torch.divide(a, b, rounding_mode='trunc')
rounding_unsupported = (
dtype == torch.half and device != 'cuda' or
dtype == torch.bfloat16 and device != 'cpu')
d_ref = d_true.float() if rounding_unsupported else d_true
self.assertEqual(d_trunc, d_ref.trunc().to(dtype))
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_div_rounding_nonfinite(self, device, dtype):
# Compare division of special floating point values against NumPy
num = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype)
# Divide by zero is tested seperately
denom = num[num != 0]
a, b = num[None, :].clone(), denom[:, None].clone()
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in ((None, np.true_divide), ("floor", np.floor_divide)):
with np.errstate(all='ignore'):
expect = np_ref(an, bn)
kwargs = dict(rounding_mode=mode) if mode is not None else {}
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(actual, torch.from_numpy(expect),
exact_device=False, exact_dtype=exact_dtype)
# Compare contiguous (likely vectorized) against non-contiguous (not vectorized)
a_noncontig = torch.empty([2 * i for i in a.shape], dtype=dtype, device=device)[::2, ::2]
a_noncontig[:] = a
b_noncontig = torch.empty([2 * i for i in b.shape], dtype=dtype, device=device)[::2, ::2]
b_noncontig[:] = b
for rounding_mode in (None, "trunc", "floor"):
expect = torch.divide(a_noncontig, b_noncontig, rounding_mode=rounding_mode)
actual = torch.divide(a, b, rounding_mode=rounding_mode)
self.assertEqual(actual, expect)
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_divide_by_zero_rounding(self, device, dtype):
a = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype)
exact_dtype = (dtype != torch.bfloat16)
if exact_dtype:
an = a.cpu().numpy()
else:
an = a.float().cpu().numpy()
zero = torch.zeros_like(a)
# NOTE: NumPy's floor_divide rounding changed in 1.20.0 to be consistent with divide
expect = np.divide(an, 0)
for rounding_mode in (None, 'floor'):
# CPU scalar
actual = torch.divide(a, 0, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
# Device tensor
actual = torch.divide(a, zero, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
@dtypes(*torch.testing.get_all_dtypes(
include_bool=False, include_complex=False, include_bfloat16=False))
def test_div_rounding_numpy(self, device, dtype):
info = (torch.finfo(dtype) if dtype.is_floating_point
else torch.iinfo(dtype))
low, high = info.min, info.max
# Compare division of random values against NumPy
a = make_tensor((4096,), device, dtype, low=low, high=high)
b = make_tensor((4096,), device, dtype, low=low, high=high)
# Avoid division by zero which raises for integers and, for floats,
# NumPy 1.20 changed floor_divide to follow IEEE rules for inf/nan
# after dividing by zero.
b[b == 0] = 1
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in (
(None, np.true_divide),
("floor", np.floor_divide),
("trunc", lambda a, b: np.trunc(np.true_divide(a, b)).astype(a.dtype))
):
with np.errstate(all='ignore'):
expect = torch.from_numpy(np_ref(an, bn))
kwargs = dict(rounding_mode=mode) if mode is not None else {}
# Contiguous (likely vectorized)
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)
# Non-contiguous (not vectorized)
expect = expect[::2]
with set_default_dtype(torch.double):
actual = torch.divide(a[::2], b[::2], **kwargs)
self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)
# Tests that trying to add, inplace, a CUDA tensor to a CPU tensor
# throws the correct error message
@onlyCUDA
def test_cross_device_inplace_error_msg(self, device):
a = torch.tensor(2.)
b = torch.tensor(2., device=device)
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
a += b
# TODO: refactor this test into a more generic one, it's parked here currently
@onlyOnCPUAndCUDA
def test_out_resize_warning(self, device):
a = torch.tensor((1, 2, 3), device=device, dtype=torch.float32)
b = torch.tensor((4, 5, 6), device=device, dtype=torch.float32)
unary_inputs = (a,)
binary_inputs = (a, b)
unary_ops = (torch.ceil, torch.exp)
binary_ops = (torch.add, torch.sub)
for op in (unary_ops + binary_ops):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inputs = unary_inputs if op in unary_ops else binary_inputs
# No warnings
op(*inputs, out=torch.empty(3, device=device))
op(*inputs, out=torch.empty(0, device=device))
self.assertEqual(len(w), 0)
# Cases that throw warnings
op(*inputs, out=torch.empty(2, device=device))
self.assertEqual(len(w), 1)
# Verifies that the inplace dunders (like idiv) actually are in place
@onlyOnCPUAndCUDA
def test_inplace_dunders(self, device):
t = torch.randn((1,), device=device)
expected = t.data_ptr()
t += 1
t -= 1
t *= 1
t /= 1
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
t //= 1
t %= 1
self.assertEqual(expected, t.data_ptr())
def check_internal_mem_overlap(self, inplace_op, num_inputs,
dtype, device,
expected_failure=False):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input)
for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(self, data, sz, op,
expected_failure=False):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz:2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
def binary_check_input_output_mem_overlap(self, op, device,
expected_failure=False):
sz = 3
data = torch.randn(2 * sz, device=device)
other = torch.randn(sz, device=device)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out: op(other, input, out=out),
expected_failure=expected_failure)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out: op(input, other, out=out),
expected_failure=expected_failure)
@dtypes(torch.double)
def test_binary_op_mem_overlap(self, device, dtype):
ops = [
("add", True, True, 'cpu'),
("add", True, True, 'cuda'),
("mul", True, True, 'cpu'),
("mul", True, True, 'cuda'),
("sub", True, True, 'cpu'),
("sub", True, True, 'cuda'),
("div", True, True, 'cpu'),
("div", True, True, 'cuda'),
("pow", True, True, 'cpu'),
("pow", True, True, 'cuda'),
("fmod", True, True, 'cpu'),
("fmod", True, True, 'cuda'),
("atan2", True, True, 'cpu'),
("atan2", True, True, 'cuda'),
("hypot", True, True, 'cpu'),
("hypot", True, True, 'cuda'),
("igamma", True, True, 'cpu'),
("igamma", True, True, 'cuda'),
("igammac", True, True, 'cpu'),
("igammac", True, True, 'cuda'),
("nextafter", True, True, 'cpu'),
("nextafter", True, True, 'cuda'),
("le", True, True, 'cpu'),
("le", True, True, 'cuda'),
("lt", True, True, 'cpu'),
("lt", True, True, 'cuda'),
("ge", True, True, 'cpu'),
("ge", True, True, 'cuda'),
("gt", True, True, 'cpu'),
("gt", True, True, 'cuda'),
("eq", True, True, 'cpu'),
("eq", True, True, 'cuda'),
("ne", True, True, 'cpu'),
("ne", True, True, 'cuda'),
("logical_and", True, True, 'cpu'),
("logical_and", True, True, 'cuda'),
("logical_or", True, True, 'cpu'),
("logical_or", True, True, 'cuda'),
("logical_xor", True, True, 'cpu'),
("logical_xor", True, True, 'cuda'),
]
for (fn, has_input_output_mem_overlap_check,
has_internal_mem_overlap_check, dev) in ops:
if dev != device:
continue
out_op = getattr(torch, fn)
inplace_op = getattr(torch.Tensor, fn + '_')
self.check_internal_mem_overlap(
inplace_op, 2, dtype, device,
expected_failure=not has_internal_mem_overlap_check)
self.binary_check_input_output_mem_overlap(out_op, device,
expected_failure=not has_input_output_mem_overlap_check)
def _do_pow_for_exponents(self, m1, exponents, pow_fn, atol):
for num in exponents:
if isinstance(num, int) and num < 0 and not m1.is_floating_point() and not m1.is_complex():
with self.assertRaisesRegex(RuntimeError,
r'Integers to negative integer powers are not allowed\.'):
torch.pow(m1[4], num)
else:
# base - tensor, exponent - number
# contiguous
res1 = torch.pow(m1[4], num)
res2 = res1.clone().zero_()
# `math.pow` has issues with complex exponentiation so we need to resort to normal `pow`.
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[4][i], num)
rtol = 0 if atol is not None else None
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# non-contiguous
res1 = torch.pow(m1[:, 4], num)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[i, 4], num)
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# scalar ** tensor to enforce correct handling of dtypes for __rpow__().
expected_dtype = torch.result_type(num, m1)
res1 = num ** m1[4]
res2 = torch.tensor(num, dtype=expected_dtype, device=m1.device) ** m1[4]
self.assertEqual(res1, res2)
self.assertEqual(res1.dtype, expected_dtype)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_pow(self, device, dtype):
m1 = torch.empty(0, dtype=dtype, device=device)
if m1.is_floating_point() or m1.is_complex():
m1 = make_tensor((100, 100), low=0, high=1, dtype=dtype, device=device) + 0.5
else:
# math.pow will overflow and throw exceptions for large integers
range_high = 4 if dtype in (torch.int8, torch.uint8) else 10
m1 = make_tensor((100, 100), low=1, high=range_high, dtype=dtype, device=device)
exponents = [-2.8, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 4, 3.3]
complex_exponents = [-2.5j, -1.0j, 0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]
if m1.is_complex():
self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, exponents, math.pow, None)
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
# base - number, exponent - tensor
# contiguous
res1 = torch.pow(3, m1[4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[4, i])
self.assertEqual(res1, res2)
# non-contiguous
res1 = torch.pow(3, m1[:, 4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[i][4])
self.assertEqual(res1, res2)
# TODO: refactor all these tests using opinfos properly
def _test_pow(self, base, exponent, np_exponent=None):
if np_exponent is None:
np_exponent = exponent
def to_np(value):
if isinstance(value, torch.Tensor):
return value.cpu().numpy()
return value
try:
np_res = np.power(to_np(base), to_np(np_exponent))
expected = torch.from_numpy(np_res) if isinstance(np_res, np.ndarray) else torch.tensor(np_res, dtype=base.dtype)
except ValueError as e:
err_msg = "Integers to negative integer powers are not allowed."
self.assertEqual(str(e), err_msg)
out = torch.empty_like(base)
test_cases = [
lambda: base.pow(exponent),
lambda: base.pow_(exponent),
lambda: torch.pow(base, exponent),
lambda: torch.pow(base, exponent, out=out)
]
for test_case in test_cases:
self.assertRaisesRegex(RuntimeError, err_msg, test_case)
else:
if isinstance(base, torch.Tensor):
actual = base.pow(exponent)
self.assertEqual(actual, expected.to(actual))
actual = base.clone()
# When base is a 0-dim cpu tensor and exp is a cuda tensor, we exp `pow` to work but `pow_` to fail, since
# `pow` will try to create the output tensor on a cuda device, but `pow_` needs to use the cpu tensor as the output
if (isinstance(exponent, torch.Tensor) and base.dim() == 0 and base.device.type == 'cpu' and
exponent.device.type == 'cuda'):
regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
elif torch.can_cast(torch.result_type(base, exponent), base.dtype):
actual2 = actual.pow_(exponent)
self.assertEqual(actual, expected)
self.assertEqual(actual2, expected)
else:
self.assertRaisesRegex(RuntimeError, "Found dtype \\w+ but expected \\w+", lambda: actual.pow_(exponent))
actual = torch.pow(base, exponent)
self.assertEqual(actual, expected.to(actual))
actual2 = torch.pow(base, exponent, out=actual)
self.assertEqual(actual, expected.to(actual))
self.assertEqual(actual2, expected.to(actual))
# Tests pow() for integral, floating-type tensors, with integral, floating-type
# exponents (tensor or scalar), respectively. noncontiguous tensors are also tested.
def test_int_and_float_pow(self, device):
def _test_int_and_float_pow(dt, low, high, dev):
test_cases = (
((4, 4), 0, (4, 1)),
((3, 1), 4, (3, 1)),
((2,), 4, (1,)),
((1,), 2, ()),
((513, 513), 4, (513,)),
((5, 5, 5), 5, (5,)),
((), 2, ()),
)
for base_shape, exp_scalar, exp_shape in test_cases:
base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high)
# int tensors don't take negative exponents
if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high)
else:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
# test non-contiguous tensors as well
base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high,
noncontiguous=True)
if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high,
noncontiguous=True)
else:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high,
noncontiguous=True)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
_test_int_and_float_pow(torch.int8, -2, 2, device)
_test_int_and_float_pow(torch.uint8, 0, 3, device)
_test_int_and_float_pow(torch.int16, -5, 5, device)
_test_int_and_float_pow(torch.int64, -10, 10, device)
_test_int_and_float_pow(torch.int32, -10, 10, device)
_test_int_and_float_pow(torch.float16, 0., 5., device)
_test_int_and_float_pow(torch.float32, 0., 10., device)
_test_int_and_float_pow(torch.float64, 0., 10., device)
# pow's output would have some NaNs as well
_test_int_and_float_pow(torch.float32, -10., 10., device)
_test_int_and_float_pow(torch.float64, -10., 10., device)
# Tests that a Runtime error occurs when a base tensor cannot be resized
# by pow's inplace variant due to PyTorch's broadcasting semantics.
def test_pow_inplace_resizing_exception(self, device):
test_cases = (
((), (3,)),
((2,), (2, 1)),
((2, 1), (2, 2)),
((2, 2), (2, 1, 1)),
)
test_inputs = list((make_tensor(base_size, dtype=torch.float64, device=device,
high=10., low=0.),
make_tensor(exp_size, dtype=torch.float64, device=device,
high=10., low=0.))
for base_size, exp_size in test_cases)
for base, exponent in test_inputs:
regex = "doesn't match the broadcast shape"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
def test_int_tensor_pow_neg_ints(self, device):
ints = [torch.iinfo(torch.int32).min,
-3, -2, -1, 0, 1, 2, 3,
torch.iinfo(torch.int32).max]
neg_ints = [torch.iinfo(torch.int32).min, -3, -2, -1]
tensor = torch.tensor(ints, dtype=torch.int32, device=device)
for pow in neg_ints:
self._test_pow(tensor, pow)
def test_long_tensor_pow_floats(self, device):
ints = [0, 1, 23, 4567]
floats = [0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
tensor = torch.tensor(ints, dtype=torch.int64, device=device)
for pow in floats:
self._test_pow(tensor, pow)
@dtypes(*[torch.float32, torch.float64])
def test_float_scalar_pow_float_tensor(self, device, dtype):
floats = [2.0, -3 / 2, -1.0, -1 / 2, -1 / 3, 0.0,
1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
exponent_shapes = (
(1,),
(2, 2),
(2, 1),
(2, 2, 2),
)
tensors = list(make_tensor(shape, dtype=dtype, device=device, low=0)
for shape in exponent_shapes)
floats_tensor = torch.tensor(floats, dtype=dtype, device=device)
for base in floats:
self._test_pow(base, floats_tensor)
for tensor in tensors:
self._test_pow(base, tensor)
@onlyCUDA
def test_cuda_tensor_pow_scalar_tensor(self, device):
cuda_tensors = [torch.randn((3, 3), device=device), torch.tensor(3.0, device=device)]
scalar_tensors = [torch.tensor(5.0, device='cpu'), torch.tensor(-3), torch.tensor(1)]
for base, exp in product(cuda_tensors, scalar_tensors):
self._test_pow(base, exp)
@onlyCUDA
def test_cpu_tensor_pow_cuda_scalar_tensor(self, device):
cuda_tensors = [torch.tensor(5.0, device='cuda'), torch.tensor(-3, device='cuda')]
for exp in cuda_tensors:
base = torch.randn((3, 3), device='cpu')
regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'
self.assertRaisesRegex(RuntimeError, regex, torch.pow, base, exp)
for exp in cuda_tensors:
# Binary ops with a cpu + cuda tensor are allowed if the cpu tensor has 0 dimension
base = torch.tensor(3.0, device='cpu')
self._test_pow(base, exp)
@onlyCUDA
@dtypes(torch.complex64, torch.complex128)
def test_pow_cuda_complex_extremal_failing(self, device, dtype):
t = torch.tensor(complex(-1., float('inf')), dtype=dtype, device=device)
with self.assertRaises(AssertionError):
cuda_out = t.pow(2)
cpu_out = t.cpu().pow(2)
self.assertEqual(cpu_out, cuda_out)
@onlyOnCPUAndCUDA
@dtypes(*(torch.testing.get_all_dtypes(include_bool=False, include_bfloat16=False)))
def test_complex_scalar_pow_tensor(self, device, dtype):
complexes = [0.5j, 1. + 1.j, -1.5j, 2.2 - 1.6j, 1 + 0j]
first_exp = make_tensor((100,), device, dtype, low=-2, high=2)
second_exp = make_tensor((100,), device, dtype, low=-2, high=2, noncontiguous=True)
first_exp[0] = first_exp[10] = first_exp[20] = 0
second_exp[0] = second_exp[10] = second_exp[20] = 0
for base in complexes:
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
@onlyOnCPUAndCUDA
def test_pow_scalar_type_promotion(self, device):
# Test against a scalar and non-scalar input
inputs = [17, [17]]
for input in inputs:
# We expect the computation to be performed in uint8 (overflowing to 0), and then cast to int64
input_tensor_uint8 = torch.tensor(input, dtype=torch.uint8, device=device)
out_uint8_computation = torch.pow(2, input_tensor_uint8, out=torch.tensor(0, dtype=torch.int64, device=device))
# Computation should run in int64, and not overflow
input_tensor_int64 = torch.tensor(input, dtype=torch.int64, device=device)
out_int64_computation = torch.pow(2, input_tensor_int64, out=torch.tensor(0, dtype=torch.int64, device=device))
self.assertNotEqual(out_uint8_computation, out_int64_computation)
self.assertEqual(out_uint8_computation.to(dtype=torch.uint8), out_int64_computation.to(dtype=torch.uint8))
def test_tensor_pow_tensor(self, dev):
def rotate(l, n):
return l[-n:] + l[:-n]
def test_tensor_pow_tensor(values, torch_type, numpy_type):
vals_tensor = torch.tensor(values, dtype=torch_type, device=dev)
for i in range(len(values)):
pows = rotate(values, i)
pows_tensor = torch.tensor(pows, dtype=torch_type, device=dev)
self._test_pow(vals_tensor, pows_tensor)
ints = [0, 1, 2, 3]
test_tensor_pow_tensor(ints, torch.uint8, np.uint8)
test_tensor_pow_tensor(ints, torch.int8, np.int8)
test_tensor_pow_tensor(ints, torch.int16, np.int16)
test_tensor_pow_tensor(ints, torch.int32, np.int32)
test_tensor_pow_tensor(ints, torch.int64, np.int64)
floats = [-3.0, -2.0, -1.0, -1 / 2, -1 / 3,
0.0, 1 / 3, 1 / 2, 1.0, 2.0, 3.0]
test_tensor_pow_tensor(floats, torch.float16, np.float16)
test_tensor_pow_tensor(floats, torch.float32, np.float32)
test_tensor_pow_tensor(floats, torch.float64, np.float64)
def test_logical_xor_with_nontrivial_alignment(self, device):
# test tensor that is not aligned to multiple of 16 bytes
size = 128
a = (torch.randn(size, device=device) > 0)
b = (torch.randn(size, device=device) > 0)
c = (torch.randn(size, device=device) > 0)
non_trivial_alignment = [1, 2, 4, 8, 15]
for i in non_trivial_alignment:
for j in non_trivial_alignment:
for k in non_trivial_alignment:
a_ = a[i: 100 + i]
b_ = b[j: 100 + j]
c_ = c[k: 100 + k]
torch.logical_xor(a_, b_, out=c_)
for x, y, z in zip(a_.tolist(), b_.tolist(), c_.tolist()):
self.assertEqual(x ^ y, z)
@dtypes(torch.float)
def test_add_with_tail(self, device, dtype):
# test tensor where there is a tail which is not a multiple
# of GPU warp size
for tail_size in [1, 63, 67, 130]:
size = 4096 + tail_size
a = torch.randn(size, device=device, dtype=dtype)
b = torch.randn(size, device=device, dtype=dtype)
c = a + b
for x, y, z in zip(a.tolist(), b.tolist(), c.tolist()):
self.assertEqual(x + y, z)
# Tests that CUDA tensors on different devices cannot be used in the same
# binary operation, and that CUDA "scalars" cannot be used in the same
# binary operation as non-scalar CPU tensors.
@deviceCountAtLeast(2)
@onlyCUDA
def test_cross_device_binary_ops(self, devices):
vals = (1., (2.,))
cpu_tensor = torch.randn(2, 2)
def do_test(op, a, b):
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, b)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(b, a)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, cpu_tensor)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(cpu_tensor, a)
for op in (operator.add, torch.add,
operator.sub, torch.sub,
operator.mul, torch.mul,
operator.truediv, torch.true_divide,
operator.floordiv, torch.floor_divide):
for a, b in product(vals, vals):
a = torch.tensor(a, device=devices[0])
b = torch.tensor(b, device=devices[1])
do_test(op, a, b)
# This test ensures that a scalar Tensor can be safely used
# in a binary operation in conjunction with a Tensor on all
# available CUDA devices
@deviceCountAtLeast(2)
@onlyCUDA
def test_binary_op_scalar_device_unspecified(self, devices):
scalar_val = torch.tensor(1.)
for default_device in devices:
with torch.cuda.device(default_device):
for device in devices:
device_obj = torch.device(device)
x = torch.rand(3, device=device)
y0 = x * scalar_val
self.assertEqual(y0.device, device_obj)
y1 = scalar_val * x
self.assertEqual(y1.device, device_obj)
self.assertEqual(y0, y1)
def test_div_and_floordiv_vs_python(self, device):
# Tests torch division ops which can handle both arguments being
# scalars.
# NOTE: torch.floor_divide currently truncates instead of flooring.
# the quotient. See https://github.com/pytorch/pytorch/issues/43874.
def _scalar_helper(python_op, torch_op):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected = python_op(a, b)
for op in (operator.truediv, torch.true_divide):
actual_scalar = torch_op(a, b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
actual_tensor = torch_op(a_t, b_t)
actual_first_tensor = torch_op(a_t, b)
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected_div)
self.assertEqual(actual_tensor.item(), expected_div)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
_scalar_helper(operator.truediv, torch.true_divide)
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
_scalar_helper(lambda a, b: math.trunc(a / b), operator.floordiv)
_scalar_helper(lambda a, b: math.trunc(a / b), torch.floor_divide)
# NOTE: torch.floor_divide currently truncates instead of flooring.
# See https://github.com/pytorch/pytorch/issues/43874.
@onlyOnCPUAndCUDA
def test_div_and_floordiv_script_vs_python(self, device):
# Creates jitted functions of two tensors
def _wrapped_div(a, b):
return a / b
def _wrapped_floordiv(a, b):
return a // b
scripted_div = torch.jit.script(_wrapped_div)
scripted_floordiv = torch.jit.script(_wrapped_floordiv)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_div = a / b
expected_truncdiv = math.trunc(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
self.assertEqual(scripted_div(a_t, b_t), expected_div)
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
self.assertEqual(scripted_floordiv(a_t, b_t), expected_truncdiv)
# Creates jitted functions of one tensor
def _wrapped_div_scalar(a):
return a / 5
# NOTE: the JIT implements division as torch.reciprocal(a) * 5
def _wrapped_rdiv_scalar(a):
return 5 / a
def _wrapped_floordiv_scalar(a):
return a // 5
# NOTE: this fails if the input is not an integer tensor
# See https://github.com/pytorch/pytorch/issues/45199
def _wrapped_rfloordiv_scalar(a):
return 5 // a
scripted_div_scalar = torch.jit.script(_wrapped_div_scalar)
scripted_rdiv_scalar = torch.jit.script(_wrapped_rdiv_scalar)
scripted_floordiv_scalar = torch.jit.script(_wrapped_floordiv_scalar)
scripted_rfloordiv_scalar = torch.jit.script(_wrapped_rfloordiv_scalar)
for a in range(-10, 10):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
a_t = torch.tensor(a, device=device)
self.assertEqual(a / 5, scripted_div_scalar(a_t))
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
self.assertEqual(math.trunc(a / 5), scripted_floordiv_scalar(a_t))
# Skips zero divisors
if a == 0:
continue
self.assertEqual(5 / a, scripted_rdiv_scalar(a_t))
# Handles Issue 45199 (see comment above)
if a_t.is_floating_point():
with self.assertRaises(RuntimeError):
scripted_rfloordiv_scalar(a_t)
else:
# This should emit a UserWarning, why doesn't it?
# See issue gh-52387
self.assertEqual(5 // a, scripted_rfloordiv_scalar(a_t))
# NOTE: torch.floor_divide currently truncates instead of flooring
# the quotient. See https://github.com/pytorch/pytorch/issues/43874.
@onlyOnCPUAndCUDA
def test_idiv_and_ifloordiv_vs_python(self, device):
def _wrapped_idiv_tensor(a, b):
a /= b
return a
def _wrapped_idiv_scalar(a):
a /= 5
return a
def _wrapped_true_divide__tensor(a, b):
a.true_divide_(b)
return a
def _wrapped_true_divide__scalar(a):
a.true_divide_(5)
return a
def _wrapped_floor_divide__tensor(a, b):
a.floor_divide_(b)
return a
def _wrapped_floor_divide__scalar(a):
a.floor_divide_(5)
return a
# The following functions are unsupported by the JIT
def _wrapped_ifloordiv_tensor(a, b):
a //= b
return a
def _wrapped_ifloordiv_scalar(a):
a //= 5
return a
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_tensor = torch.jit.script(_wrapped_ifloordiv_tensor)
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_scalar = torch.jit.script(_wrapped_ifloordiv_scalar)
scripted_idiv_tensor = torch.jit.script(_wrapped_idiv_tensor)
scripted_idiv_scalar = torch.jit.script(_wrapped_idiv_scalar)
scripted_true_divide__tensor = torch.jit.script(_wrapped_true_divide__tensor)
scripted_true_divide__scalar = torch.jit.script(_wrapped_true_divide__scalar)
scripted_floor_divide__tensor = torch.jit.script(_wrapped_floor_divide__tensor)
scripted_floor_divide__scalar = torch.jit.script(_wrapped_floor_divide__scalar)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_idiv = a / b
expected_ifloordiv = a // b
expected_itruncdiv = math.trunc(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
if a_t.is_floating_point():
tmp0 = a_t.clone()
tmp0 /= b
tmp1 = a_t.clone()
tmp1 /= b_t
self.assertEqual(tmp0.item(), expected_idiv)
self.assertEqual(tmp1.item(), expected_idiv)
self.assertEqual(scripted_true_divide__tensor(a_t.clone(), b_t).item(), expected_idiv)
self.assertEqual(scripted_true_divide__scalar(a_t.clone()).item(), a / 5)
else:
tmp = a_t.clone()
with self.assertRaises(RuntimeError):
tmp /= b
with self.assertRaises(RuntimeError):
tmp /= b_t
with self.assertRaises(RuntimeError):
scripted_true_divide__tensor(tmp, b_t)
with self.assertRaises(RuntimeError):
scripted_true_divide__scalar(tmp)
if not a_t.is_floating_point() and b_t.is_floating_point():
# Inplace modification fails because a float tensor is required
# if the divisor is a float tensor
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a_t.clone().floor_divide_(b_t)
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
scripted_floor_divide_tensor(a_t.clone(), b_t)
tmp = a_t.clone()
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
else:
# Inplace modification is OK when both or neither tensor is
# a float tensor
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(a_t.clone().floor_divide_(b_t).item(), expected_itruncdiv)
self.assertEqual(scripted_floor_divide__tensor(a_t.clone(), b_t).item(), expected_itruncdiv)
tmp = a_t.clone()
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
self.assertEqual(tmp.item(), expected_itruncdiv)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(scripted_floor_divide__scalar(a_t), math.trunc(a / 5))
# Tests binary op equivalence with Python builtin ops
# Also tests that reverse operations are equivalent to forward ops
# NOTE: division ops are tested separately above
def test_binary_ops_with_scalars(self, device):
for ops in ((operator.add, torch.add),
(operator.sub, torch.sub),
(operator.mul, torch.mul),
(operator.truediv, torch.div)):
python_op, torch_op = ops
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0 or a == 0:
continue
a_tensor = torch.tensor(a, device=device)
b_tensor = torch.tensor(b, device=device)
a_tensor_cpu = a_tensor.cpu()
b_tensor_cpu = b_tensor.cpu()
vals = (a, b, a_tensor, b_tensor, a_tensor_cpu, b_tensor_cpu)
for args in product(vals, vals):
first, second = args
first_scalar = first if not isinstance(first, torch.Tensor) else first.item()
second_scalar = second if not isinstance(second, torch.Tensor) else second.item()
expected = python_op(first_scalar, second_scalar)
self.assertEqual(expected, python_op(first, second))
self.assertEqual(expected, torch_op(first, second))
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False), torch.testing.get_all_dtypes(include_complex=False)))
def test_maximum_minimum_type_promotion(self, device, dtypes):
a = torch.tensor((0, 1), device=device, dtype=dtypes[0])
b = torch.tensor((1, 0), device=device, dtype=dtypes[1])
for op in (torch.maximum, torch.max, torch.fmax, torch.minimum, torch.min, torch.fmin):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
@dtypes(*(torch.testing.get_all_int_dtypes() + [torch.bool]))
def test_maximum_minimum_int_and_bool(self, device, dtype):
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
rng = np.random.default_rng()
a_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
numpy_result = numpy_op(a_np, b_np)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@precisionOverride({torch.bfloat16: 1e-2})
@dtypes(*(torch.testing.get_all_fp_dtypes()))
def test_maximum_minimum_float(self, device, dtype):
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
if dtype == torch.bfloat16:
a_np = np.random.randn(10).astype(np.float64)
b_np = np.random.randn(10).astype(np.float64)
else:
a_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
b_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result, exact_dtype=False)
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
@dtypes(*(torch.testing.get_all_fp_dtypes()))
def test_maximum_minimum_float_nan_and_inf(self, device, dtype):
# np.maximum and np.minimum functions compare input arrays element-wisely.
# if one of the elements being compared is a NaN, then that element is returned.
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
a_vals = (float('inf'), -float('inf'), float('nan'), float('inf'), float('nan'), float('nan'), 1, float('nan'))
b_vals = (-float('inf'), float('inf'), float('inf'), float('nan'), float('nan'), 0, float('nan'), -5)
if dtype == torch.bfloat16:
a_np = np.array(a_vals, dtype=np.float64)
b_np = np.array(b_vals, dtype=np.float64)
else:
a_np = np.array(a_vals, dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(b_vals, dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
if dtype == torch.bfloat16:
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
else:
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@dtypes(*product(torch.testing.get_all_complex_dtypes(), torch.testing.get_all_dtypes()))
def test_maximum_minimum_complex(self, device, dtypes):
for torch_op in (torch.maximum, torch.minimum, torch.max, torch.min, torch.fmax, torch.fmin):
with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):
torch_op(torch.ones(1, device=device, dtype=dtypes[0]),
torch.ones(1, device=device, dtype=dtypes[1]))
with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):
torch_op(torch.ones(1, device=device, dtype=dtypes[1]),
torch.ones(1, device=device, dtype=dtypes[0]))
@onlyCUDA
def test_maximum_minimum_cross_device(self, device):
a = torch.tensor((1, 2, -1))
b = torch.tensor((3, 0, 4), device=device)
ops = (torch.maximum, torch.minimum)
for torch_op in ops:
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch_op(a, b)
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch_op(b, a)
# test cuda tensor and cpu scalar
ops = ((torch.maximum, np.maximum), (torch.minimum, np.minimum))
a_np = np.array(1)
b_np = np.array([3, 0, 4])
for torch_op, numpy_op in ops:
a_tensor = torch.from_numpy(a_np)
b_tensor = torch.from_numpy(b_np).to(device=device)
tensor_result_1 = torch_op(a_tensor, b_tensor)
numpy_result_1 = numpy_op(a_np, b_np)
tensor_result_2 = torch_op(b_tensor, a_tensor)
numpy_result_2 = numpy_op(b_np, a_np)
self.assertEqual(tensor_result_1, numpy_result_1)
self.assertEqual(tensor_result_2, numpy_result_2)
# TODO: tests like this should be generic
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_mul_intertype_scalar(self, device, dtype):
x = torch.tensor(1.5, dtype=dtype, device=device)
y = torch.tensor(3, dtype=torch.int32, device=device)
self.assertEqual(x * y, 4.5)
self.assertEqual(y * x, 4.5)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
y *= x
x *= y
self.assertEqual(x, 4.5)
@onlyCPU
@dtypes(*torch.testing.get_all_dtypes())
def test_sub(self, device, dtype):
m1 = torch.tensor([2.34, 4.44], dtype=dtype, device=device)
m2 = torch.tensor([1.23, 2.33], dtype=dtype, device=device)
if dtype == torch.bool:
self.assertRaises(RuntimeError, lambda: m1 - m2)
elif (dtype == torch.bfloat16 or dtype == torch.half):
# bfloat16 has a lower precision so we have to have a separate check for it
self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype), atol=0.01, rtol=0)
else:
self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype))
# TODO: what is this test testing?
@onlyCPU
@dtypes(torch.float)
def test_csub(self, device, dtype):
# with a tensor
a = torch.randn(100, 90, dtype=dtype, device=device)
b = a.clone().normal_()
res_add = torch.add(a, b, alpha=-1)
res_csub = a.clone()
res_csub.sub_(b)
self.assertEqual(res_add, res_csub)
# with a scalar
a = torch.randn(100, 100, dtype=dtype, device=device)
scalar = 123.5
res_add = torch.add(a, -scalar)
res_csub = a.clone()
res_csub.sub_(scalar)
self.assertEqual(res_add, res_csub)
# TODO: reconcile with minimum/maximum tests
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_min_max_binary_op_nan(self, device, dtype):
a = torch.rand(1000, dtype=dtype, device=device)
b = torch.rand(1000, dtype=dtype, device=device)
# 0:250: a -- nan, b -- not nan
a[:250] = float('nan')
# 250:500: a -- not nan, b -- nan
b[250:500] = float('nan')
# 500:750: a and b both nan
a[500:750] = float('nan')
b[500:750] = float('nan')
# 750:1000: neither nan
ma = torch.max(a, b)
mi = torch.min(a, b)
for i in range(750):
self.assertTrue(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]))
self.assertTrue(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]))
for i in range(750, 1000):
self.assertFalse(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]))
self.assertFalse(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]))
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False),
torch.testing.get_all_dtypes(include_complex=False)))
def test_copysign(self, device, dtypes):
def _test_copysign_numpy(a, b):
torch_result = torch.copysign(a, b)
if a.dtype == torch.bfloat16:
np_a = a.to(torch.float).cpu().numpy()
else:
np_a = a.cpu().numpy()
if b.dtype == torch.bfloat16:
np_b = b.to(torch.float).cpu().numpy()
else:
np_b = b.cpu().numpy()
expected = torch.from_numpy(np.copysign(np_a, np_b))
# To handle inconsistencies of type promotion between PyTorch and Numpy
# Applied for both arguments having integral precision and bfloat16
types = [torch.bool, torch.bfloat16] + torch.testing.get_all_int_dtypes()
if a.dtype in types or b.dtype in types:
promoted_type = torch.promote_types(torch_result.dtype, expected.dtype)
torch_result = torch_result.to(promoted_type)
expected = expected.to(promoted_type)
# Verify Value
self.assertEqual(torch_result, expected)
# Verify Sign
# Use double copysign to verify the correctnes of 0.0 and -0.0, since
# it always True for self.assertEqual(0.0 == -0.0). So, we use 1 as the
# magnitude to verify the sign between torch and numpy results, elementwise.
# Special case: NaN conversions between FP32 and FP16 is not bitwise
# equivalent to pass this assertion.
if a.dtype != torch.float16 and b.dtype != torch.float16:
self.assertEqual(torch.copysign(torch.tensor(1.0), torch_result),
torch.copysign(torch.tensor(1.0), expected))
# Compare Result with NumPy
# Type promotion
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# Broadcast
a = make_tensor((10, 1, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 1, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# 0.0/-0.0/inf/-inf/nan
cases = [0.0, -0.0, float('inf'), float('-inf'), float('nan')]
# torch.bfloat16 can not hold '-nan'
# torch.half can not hold '-nan' on CUDA
types = [torch.float32, torch.float64]
if device == 'cpu':
types.append(torch.float16)
if dtypes[0] in types:
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
for case in cases:
_test_copysign_numpy(torch.tensor([case], device=device, dtype=dtypes[0]), b)
if dtypes[1] in torch.testing.get_all_fp_dtypes():
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
for case in cases:
_test_copysign_numpy(a, torch.tensor([case], device=device, dtype=dtypes[1]))
@dtypes(torch.bfloat16, torch.float)
def test_div(self, device, dtype):
for op, method, inplace in ((torch.div, torch.Tensor.div, torch.Tensor.div_),
(torch.true_divide, torch.Tensor.true_divide,
torch.Tensor.true_divide_)):
m1 = torch.randn(10, 10, dtype=torch.float, device=device).to(dtype=dtype)
res1 = m1.clone()
inplace(res1[:, 3], 2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] / 2
self.assertEqual(res1, res2)
if dtype == torch.bfloat16:
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2., 2.], dtype=dtype, device=device)
self.assertEqual(op(a1, a2),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01, rtol=0)
self.assertEqual(method(a1, a2), op(a1, a2))
@dtypes(torch.bfloat16, torch.float)
def test_true_divide_out(self, device, dtype):
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2., 2.], dtype=dtype, device=device)
res = torch.empty_like(a1)
self.assertEqual(torch.true_divide(a1, a2, out=res),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01, rtol=0)
@onlyCUDA
@dtypes(torch.half)
def test_divmul_scalar(self, device, dtype):
x = torch.tensor(100., device=device, dtype=dtype)
x_ref = x.float()
scale = 1e5
res = x.div(scale)
expected = x_ref.div(scale)
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
x = torch.tensor(1e-5, device=device, dtype=dtype)
x_ref = x.float()
res = x.mul(scale)
expected = x_ref.mul(scale)
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
res = scale * x
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
@dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})
@dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})
def test_floor_divide_tensor(self, device, dtype):
x = torch.randn(10, device=device).mul(30).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
z = x // y
z_alt = torch.trunc(x.double() / y.double()).to(dtype)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})
@dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})
def test_floor_divide_scalar(self, device, dtype):
x = torch.randn(100, device=device).mul(10).to(dtype)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
z = x // 3
z_alt = torch.tensor([math.trunc(v.item() / 3.) for v in x], dtype=x.dtype, device=device)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
# Note: this tests fails on XLA
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.long)
def test_floor_divide_out(self, device, dtype):
x = torch.randn(10, device=device).mul(10).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
o = torch.empty(10, dtype=dtype, device=device)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
torch.floor_divide(x, y, out=o)
self.assertEqual(o, x // y)
# Tests scalar with out
torch.floor_divide(x, 2, out=o)
self.assertEqual(o, x // 2)
if dtype == torch.int:
o = torch.empty(10, dtype=torch.float, device=device)
torch.floor_divide(x, y, out=o)
self.assertEqual(o, torch.floor_divide(x.float(), y.float()))
@onlyCPU
@dtypes(*torch.testing.get_all_math_dtypes('cpu'))
def test_rdiv(self, device, dtype):
if dtype is torch.float16:
return
elif dtype.is_complex:
x = torch.rand(100, dtype=dtype, device=device).add(1).mul(4)
else:
x = torch.rand(100, device=device).add(1).mul(4).to(dtype)
y = 30 / x
z = torch.tensor([30 / v.item() for v in x], device=device)
self.assertEqual(y, z, exact_dtype=False)
@dtypes(*torch.testing.get_all_fp_dtypes(include_bfloat16=False))
def test_fmod_remainder_by_zero_float(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check floating-point tensor fmod/remainder to zero is nan on both CPU and GPU
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
self.assertTrue(torch.all(fn(x, 0.0).isnan()))
self.assertTrue(torch.all(fn(x, zero).isnan()))
@onlyOnCPUAndCUDA # Check Issue https://github.com/pytorch/pytorch/issues/48130
@skipCUDAIfRocm # Error happens on both ROCM and XLA
@dtypes(*torch.testing.get_all_int_dtypes())
def test_fmod_remainder_by_zero_integral(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check integral tensor fmod/remainder to zero
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
# RuntimeError on CPU
if self.device_type == 'cpu':
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
fn(x, zero)
# Different value for different dtype on CUDA:
# Due to it's an undefined behavior, CUDA returns a pattern of all 1s
# for integral dividend (other than int64) divided by zero. For int64,
# CUDA returns all 1s for negative dividend, half 1s for positive dividend.
# uint8: 0xff -> 255
# int32: 0xffffffff -> -1
else:
if dtype == torch.int64:
self.assertEqual(fn(x, zero) == 4294967295, x >= 0)
self.assertEqual(fn(x, zero) == -1, x < 0)
else:
value = 255 if dtype == torch.uint8 else -1
self.assertTrue(torch.all(fn(x, zero) == value))
@dtypes(*torch.testing.get_all_dtypes(include_bfloat16=False, include_bool=False, include_complex=False))
def test_fmod_remainder(self, device, dtype):
# Use numpy as reference
def _helper(x, mod, fns_list):
for fn, inplace_fn, ref_fn in fns_list:
np_x = x.cpu().numpy() if torch.is_tensor(x) else x
np_mod = mod.cpu().numpy() if torch.is_tensor(mod) else mod
exp = ref_fn(np_x, np_mod)
exp = torch.from_numpy(exp)
res = fn(x, mod)
self.assertEqual(res, exp, exact_dtype=False)
if torch.is_tensor(x):
# out
out = torch.empty(0, device=device, dtype=res.dtype)
fn(x, mod, out=out)
self.assertEqual(out, exp, exact_dtype=False)
self.assertEqual(out.size(), torch.Size([10, 10]))
# in-place (Type cast runtime error)
try:
inplace_fn(x, mod)
self.assertEqual(x, exp, exact_dtype=False)
except RuntimeError as e:
self.assertRegex(str(e), "result type (Half|Float|Double) "
"can't be cast to the desired output "
"type (Byte|Char|Short|Int|Long)")
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# mod with same dtype as x
mod = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# Exclude 0
mod[mod == 0] = 1
# Mods: Integer, Float, Tensor, Non-contiguous Tensor
mods = [3, 2.3, mod, mod.t()]
# mod with floating-point dtype
if dtype in torch.testing.get_all_int_dtypes():
mod_float = make_tensor((10, 10), device=device, dtype=torch.float, low=-9, high=9)
mod[mod == 0] = 1
mods.append(mod_float)
for dividend, mod in product([x, x.t()], mods):
_helper(dividend, mod,
((torch.fmod, torch.Tensor.fmod_, np.fmod),
(torch.remainder, torch.Tensor.remainder_, np.remainder),))
# Tests for torch.remainder(scalar, tensor)
for dividend, mod in product([5, 3.14], mods):
if torch.is_tensor(mod):
_helper(dividend, mod,
((torch.remainder, torch.Tensor.remainder_, np.remainder),))
@dtypes(torch.float, torch.double)
def test_remainder_fmod_large_dividend(self, device, dtype):
alarge = 1e9
pi = 3.14159265358979
for avalue in [alarge, -alarge]:
for bvalue in [pi, -pi]:
a = torch.tensor([avalue], dtype=dtype, device=device)
b = torch.tensor([bvalue], dtype=dtype, device=device)
c = torch.remainder(a, b)
d = torch.fmod(a, b)
self.assertTrue((b[0] > 0) == (c[0] > 0)) # remainder has same sign as divisor
self.assertTrue((a[0] > 0) == (d[0] > 0)) # fmod has same sign as dividend
self.assertTrue(abs(c[0]) < abs(b[0])) # remainder is within range of divisor
self.assertTrue(abs(d[0]) < abs(b[0])) # fmod is within range of divisor
if ((a[0] > 0) == (b[0] > 0)):
self.assertTrue(c[0] == d[0]) # remainder is same as fmod
else:
self.assertTrue(abs(c[0] - d[0]) == abs(b[0])) # differ by one divisor
@dtypesIfCPU(torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
def test_hypot(self, device, dtype):
inputs = [
(torch.randn(10, device=device).to(dtype), torch.randn(10, device=device).to(dtype)),
(torch.randn((3, 3, 3), device=device).to(dtype), torch.randn((3, 3, 3), device=device).to(dtype)),
(torch.randn((10, 1), device=device).to(dtype), torch.randn((10, 1), device=device).to(dtype).transpose(0, 1)),
(torch.randint(100, (10, ), device=device, dtype=torch.long), torch.randn(10, device=device).to(dtype))
]
for input in inputs:
actual = torch.hypot(input[0], input[1])
if dtype == torch.bfloat16:
expected = torch.sqrt(input[0] * input[0] + input[1] * input[1])
else:
expected = np.hypot(input[0].cpu().numpy(), input[1].cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyOnCPUAndCUDA
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_gcd(self, device, dtype):
# Tests gcd(0, 0), gcd(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.gcd(t1, t2)
expected = np.gcd([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
if dtype == torch.uint8:
# Test unsigned integers with potential sign issues (i.e., uint8 with value >= 128)
a = torch.tensor([190, 210], device=device, dtype=dtype)
b = torch.tensor([190, 220], device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = torch.tensor([190, 10], device=device, dtype=dtype)
self.assertEqual(actual, expected)
else:
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = np.gcd(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@onlyOnCPUAndCUDA
@dtypes(torch.int16, torch.int32, torch.int64)
def test_lcm(self, device, dtype):
# Tests lcm(0, 0), lcm(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.lcm(t1, t2)
expected = np.lcm([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.lcm(a, b)
expected = np.lcm(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_nextafter(self, device, dtype):
# Test special cases
t1 = torch.tensor([0, 0, 10], device=device, dtype=dtype)
t2 = torch.tensor([inf, -inf, 10], device=device, dtype=dtype)
actual = torch.nextafter(t1, t2)
expected = np.nextafter(t1.cpu().numpy(), t2.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
actual = torch.nextafter(t2, t1)
expected = np.nextafter(t2.cpu().numpy(), t1.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
t1 = torch.tensor([0, nan], device=device, dtype=dtype)
t2 = torch.tensor([nan, 0], device=device, dtype=dtype)
self.assertTrue(torch.nextafter(t1, t2).isnan().all())
a = torch.randn(100, device=device, dtype=dtype)
b = torch.randn(100, device=device, dtype=dtype)
actual = torch.nextafter(a, b)
expected = np.nextafter(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
def _test_cop(self, torchfn, mathfn, dtype, device):
def reference_implementation(res2):
for i, j in iter_indices(sm1):
idx1d = i * sm1.size(0) + j
res2[i, j] = mathfn(sm1[i, j], sm2[idx1d])
return res2
# contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[4]
sm2 = m2[4]
res1 = torchfn(sm1, sm2.view(10, 10))
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10 * 10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[:, 4]
sm2 = m2[:, 4]
# view as sm1.size()
sm2.set_(sm2.storage(), sm2.storage_offset(), sm1.size(), (sm2.stride()[0] * 10, sm2.stride()[0]))
res1 = torchfn(sm1, sm2)
# reference_implementation assumes 1-d sm2
sm2.set_(sm2.storage(), sm2.storage_offset(), m2[:, 4].size(), m2[:, 4].stride())
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
@onlyCPU
@dtypes(torch.float)
def test_cdiv(self, device, dtype):
self._test_cop(torch.div, lambda x, y: x / y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cremainder(self, device, dtype):
self._test_cop(torch.remainder, lambda x, y: x % y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cmul(self, device, dtype):
self._test_cop(torch.mul, lambda x, y: x * y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cpow(self, device, dtype):
self._test_cop(torch.pow, lambda x, y: nan if x < 0 else math.pow(x, y), dtype, device)
@onlyCPU
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_floor_divide_zero(self, device, dtype):
a = torch.tensor([0, 1], dtype=dtype, device=device)
b = torch.tensor([0, 1], dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'ZeroDivisionError'):
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a // b
@unittest.skipIf(TEST_WITH_ASAN, "Integer overflows are not allowed under ASAN")
@dtypes(*torch.testing.get_all_dtypes())
def test_muldiv_scalar(self, device, dtype):
x = make_tensor((10, 3), device, dtype, low=None, high=None)
s = make_tensor((1,), 'cpu', dtype, low=None, high=None).item()
y = torch.full_like(x, s)
self.assertEqual(x * s, x * y)
self.assertEqual(s * x, y * x)
self.assertEqual(x / s, x / y)
self.assertEqual(s / x, y / x)
@dtypes(*tuple(itertools.combinations_with_replacement(torch.testing.get_all_dtypes(), 2)))
def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes):
# issue #42660
# testing all combinations of broadcasting and type promotion
# with a range of dtypes and input shapes, and with extremal values
def compare_with_numpy_bin_op(torch_fn, np_fn, x, y, out=None):
# working around the fact that numpy doesn't support bfloat16
# by letting numpy treat them as float32's
x_np = x if x.dtype != torch.bfloat16 else x.to(torch.float32)
y_np = y.cpu().numpy() if y.dtype != torch.bfloat16 else y.to(torch.float32).cpu().numpy()
self.compare_with_numpy(lambda inp: torch_fn(inp, y, out=out) if out else torch_fn(inp, y),
lambda inp: np_fn(inp, y_np, out=out) if out else np_fn(inp, y_np),
x_np)
complex_op_denylist = [torch.lt, torch.le, torch.gt, torch.ge] # complex not supported
input_sizes = [
(1,),
(10,),
(10, 1),
(1, 10),
(4, 10),
(64, 10),
(12, 3)]
op_pairs = [(torch.lt, np.less),
(torch.le, np.less_equal),
(torch.gt, np.greater),
(torch.ge, np.greater_equal),
(torch.eq, np.equal),
(torch.ne, np.not_equal),
(torch.logical_and, np.logical_and),
(torch.logical_or, np.logical_or),
(torch.logical_xor, np.logical_xor)]
for size1 in input_sizes:
size2 = (2,) + size1 # perform broadcasting
for with_extremal in [False, True]:
a = _generate_input(size1, dtypes[0], device, with_extremal)
b = _generate_input(size2, dtypes[1], device, with_extremal)
for torch_op, numpy_op in op_pairs:
if (dtypes[0].is_complex or dtypes[1].is_complex) and torch_op in complex_op_denylist:
continue
# functional version of op
compare_with_numpy_bin_op(torch_op, numpy_op, a, b)
# functional comparison ops always return bool tensors
self.assertEqual(torch_op(a, b).dtype, torch.bool)
# out version of op
out = torch.zeros(1, dtype=torch.complex128) # all casts to complex128 are safe
compare_with_numpy_bin_op(torch_op, numpy_op, a, b, out=out)
@onlyOnCPUAndCUDA
@dtypes(torch.int8, torch.int16, torch.int32, torch.int64)
def test_signed_shift(self, device, dtype):
"Ensure that signed integer bit shifting works as expected."
a = torch.tensor([-10, 10], device=device, dtype=dtype) # [11...1110110, 1010]
expected_l = torch.tensor([-40, 40], device=device, dtype=dtype) # [11...11011000, 101000]
self.assertEqual(a << 2, expected_l)
self.compare_with_numpy(lambda x: x << 2, lambda x: np.left_shift(x, 2), a)
expected_r = torch.tensor([-5, 5], device=device, dtype=dtype) # [1111...111011, 101]
self.assertEqual(a >> 1, expected_r)
self.compare_with_numpy(lambda x: x >> 1, lambda x: np.right_shift(x, 1), a)
def test_bitwise_and(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([0, 0, 3], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([0, 2, 2], dtype=dtype, device=device)
# standard version
self.assertEqual(torch.bitwise_and(a, b), expected_res)
self.assertEqual(torch.bitwise_and(a, b_scalar), expected_res_scalar)
# out
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_and(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_and(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
# in-place
a1 = a.clone()
a1.bitwise_and_(b)
self.assertEqual(a1, expected_res)
a.bitwise_and_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([False, True, False], device=device),
torch.bitwise_and(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
def test_bitwise_or(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([3, -1, 3], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([3, -2, 3], dtype=dtype, device=device)
# standard version
self.assertEqual(torch.bitwise_or(a, b), expected_res)
self.assertEqual(torch.bitwise_or(a, b_scalar), expected_res_scalar)
# out
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_or(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_or(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
# in-place
a1 = a.clone()
a1.bitwise_or_(b)
self.assertEqual(a1, expected_res)
a.bitwise_or_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([True, True, False], device=device),
torch.bitwise_or(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
def test_bitwise_xor(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([3, -1, 0], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([3, -4, 1], dtype=dtype, device=device)
# standard version
self.assertEqual(torch.bitwise_xor(a, b), expected_res)
self.assertEqual(torch.bitwise_xor(a, b_scalar), expected_res_scalar)
# out
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_xor(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_xor(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
# in-place
a1 = a.clone()
a1.bitwise_xor_(b)
self.assertEqual(a1, expected_res)
a.bitwise_xor_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([True, False, False], device=device),
torch.bitwise_xor(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_bitwise_shift(self, device, dtype):
ops = [
(torch.bitwise_left_shift, np.left_shift),
(operator.lshift, operator.lshift),
(torch.bitwise_right_shift, np.right_shift),
(operator.rshift, operator.rshift),
]
for torch_op, numpy_op in ops:
a = torch.tensor([19, -20, -21, 22], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3, 1], dtype=dtype, device=device)
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
# Tensor x Tensor
self.assertEqual(torch_op(a, b), torch.tensor(numpy_op(a_np, b_np), device=device))
# Tensor x int scalar
self.assertEqual(torch_op(a, 2), torch.tensor(numpy_op(a_np, 2), device=device))
def test_bitwise_shift_float(self, device):
ops = [
(torch.bitwise_left_shift, lambda x, y: x * 2. ** y),
(operator.lshift, lambda x, y: x * 2. ** y),
(torch.bitwise_right_shift, lambda x, y: x / 2. ** y),
(operator.rshift, lambda x, y: x / 2. ** y),
]
for torch_op, expected_op in ops:
# int tensor x float
a = torch.tensor([19, -20, -21, 22], dtype=torch.int64, device=device)
self.assertEqual(torch_op(a, 1.8), torch.floor(expected_op(a, 1)).to(a.dtype))
# float tensor x int scalar
a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)
self.assertEqual(torch_op(a, 2), expected_op(a, 2))
# float tensor x float scalar
a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)
self.assertEqual(torch_op(a, 2.2), expected_op(a, 2.2))
@onlyOnCPUAndCUDA
@dtypes(*list(product(torch.testing.get_all_dtypes(include_complex=False),
torch.testing.get_all_dtypes(include_complex=False))))
def test_heaviside(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
rng = np.random.default_rng()
input = np.array(rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[input_dtype if (input_dtype != torch.bfloat16) else torch.float64])
input[0] = input[3] = input[7] = 0
values = np.array(rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[values_dtype if (values_dtype != torch.bfloat16) else torch.float64])
np_result = torch.from_numpy(np.heaviside(input, values)).to(device=device, dtype=input_dtype)
input = torch.from_numpy(input).to(device=device, dtype=input_dtype)
values = torch.from_numpy(values).to(device=device, dtype=values_dtype)
out = torch.empty_like(input)
if input_dtype == values_dtype:
torch_result = torch.heaviside(input, values)
self.assertEqual(np_result, torch_result)
torch_result = input.heaviside(values)
self.assertEqual(np_result, torch_result)
torch.heaviside(input, values, out=out)
self.assertEqual(np_result, out)
input.heaviside_(values)
self.assertEqual(np_result, input)
else:
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
torch.heaviside(input, values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
input.heaviside(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
torch.heaviside(input, values, out=out)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
input.heaviside_(values)
@onlyCUDA
def test_heaviside_cross_device(self, device):
x = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
y = torch.tensor(0)
result = torch.heaviside(x, y)
expect = torch.tensor([0, 1, 0, 1, 0, 1], device=device)
self.assertEqual(result, expect)
result = torch.heaviside(y, x)
expect = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
self.assertEqual(result, expect)
x = torch.tensor([-9, 5, 0, 6, -2, 2])
y = torch.tensor(0, device=device)
with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):
torch.heaviside(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):
torch.heaviside(y, x)
@dtypes(*list(product(torch.testing.get_all_complex_dtypes(),
torch.testing.get_all_complex_dtypes())))
def test_heaviside_complex(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
data = (complex(0, -6), complex(-1, 3), complex(1, 1))
input = torch.tensor(data, device=device, dtype=input_dtype)
values = torch.tensor(data, device=device, dtype=values_dtype)
out = torch.empty_like(input)
real = input.real
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
torch.heaviside(input, real)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
real.heaviside(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
input.heaviside_(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
torch.heaviside(real, real, out=out)
def _test_logical(self, device, dtypes, op, a_, b_, expected_res_):
expected_res = torch.tensor(expected_res_, dtype=dtypes[0], device=device)
a = torch.tensor(a_, dtype=dtypes[0], device=device)
b = torch.tensor(b_, dtype=dtypes[1], device=device)
# new tensor
self.assertEqual(expected_res.bool(), getattr(a, op)(b))
# out
c = torch.empty(0, dtype=torch.bool, device=device)
getattr(torch, op)(a, b, out=c)
self.assertEqual(expected_res.bool(), c)
# in-place
# TODO: remove when different dtypes as operands are supported
if dtypes[0] != dtypes[1]:
with self.assertRaises(RuntimeError):
getattr(a, op + '_')(b)
return
getattr(a, op + '_')(b)
self.assertEqual(expected_res, a)
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_xor(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_xor', [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1])
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_and(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_and', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0])
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_or(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_or', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1])
def test_remainder_overflow(self, device):
# Check Integer Overflows
x = torch.tensor(23500, dtype=torch.int64, device=device)
q = 392486996410368
self.assertEqual(x % q, x)
self.assertEqual(-x % q, q - x)
self.assertEqual(x % -q, x - q)
self.assertEqual(-x % -q, -x)
def test_rpow(self, device):
m = torch.randn(10, 10, device=device)
self.assertEqual(torch.pow(2, m), 2**m)
# test with scalar
m = torch.randn(1, device=device).squeeze()
assert m.dim() == 0, "m is intentionally a scalar"
self.assertEqual(torch.pow(2, m), 2**m)
@onlyCPU
def test_ldexp(self, device):
# random values
mantissas = torch.randn(64, device=device)
exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)
# basic test
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome_1 = torch.ldexp(mantissas, exponents)
pt_outcome_2 = mantissas.ldexp(exponents)
self.assertEqual(np_outcome, pt_outcome_1)
self.assertEqual(np_outcome, pt_outcome_2)
mantissas.ldexp_(exponents)
self.assertEqual(np_outcome, mantissas)
# test bounds
mantissas = torch.tensor([float('inf'), float('-inf'), float('inf'), float('nan')], device=device)
exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome = torch.ldexp(mantissas, exponents)
self.assertEqual(np_outcome, pt_outcome)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp(self, device, dtype):
start_end_weight_shapes = [(), (5,), (5, 5)]
for shapes in product(start_end_weight_shapes, start_end_weight_shapes, start_end_weight_shapes):
start = torch.randn(shapes[0], device=device, dtype=dtype)
end = torch.randn(shapes[1], device=device, dtype=dtype)
# Tensor weights
weights = [torch.randn(shapes[2], device=device, dtype=dtype), random.random()]
if dtype.is_complex:
weights += [complex(0, 1), complex(0.4, 1.2)]
for weight in weights:
actual = torch.lerp(start, end, weight)
actual_method = start.lerp(end, weight)
self.assertEqual(actual, actual_method)
actual_out = torch.tensor(1., dtype=dtype, device=device)
torch.lerp(start, end, weight, out=actual_out)
self.assertEqual(actual, actual_out)
expected = start + weight * (end - start)
self.assertEqual(expected, actual)
def _test_logaddexp(self, device, dtype, base2):
if base2:
ref_func = np.logaddexp2
our_func = torch.logaddexp2
else:
ref_func = np.logaddexp
our_func = torch.logaddexp
def _test_helper(a, b):
ref = ref_func(a.cpu().numpy(), b.cpu().numpy())
v = our_func(a, b)
self.assertEqual(ref, v)
# simple test
a = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
b = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
_test_helper(a, b)
_test_helper(a[:3], b[:3])
# large value test for numerical stability
a *= 10000
b *= 10000
_test_helper(a, b)
_test_helper(a[:3], b[:3])
a = torch.tensor([float('inf'), float('-inf'), float('inf'), float("nan")], dtype=dtype, device=device)
b = torch.tensor([float('inf'), float('-inf'), float('-inf'), float("nan")], dtype=dtype, device=device)
_test_helper(a, b)
@dtypes(torch.float32, torch.float64)
def test_logaddexp(self, device, dtype):
self._test_logaddexp(device, dtype, base2=False)
@dtypes(torch.float32, torch.float64)
def test_logaddexp2(self, device, dtype):
self._test_logaddexp(device, dtype, base2=True)
def test_add(self, device):
dtypes = [torch.float, torch.double] + torch.testing.get_all_complex_dtypes()
for dtype in dtypes:
# [res] torch.add([res,] tensor1, tensor2)
m1 = torch.randn(100, 100, dtype=dtype, device=device)
v1 = torch.randn(100, dtype=dtype, device=device)
# contiguous
res1 = torch.add(m1[4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(1)):
res2[i] = m1[4, i] + v1[i]
self.assertEqual(res1, res2)
m1 = torch.randn(100, 100, device=device)
v1 = torch.randn(100, device=device)
# non-contiguous
res1 = torch.add(m1[:, 4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(0)):
res2[i] = m1[i, 4] + v1[i]
self.assertEqual(res1, res2)
# [res] torch.add([res,] tensor, value)
m1 = torch.randn(10, 10, device=device)
# contiguous
res1 = m1.clone()
res1[3].add_(2)
res2 = m1.clone()
for i in range(m1.size(1)):
res2[3, i] = res2[3, i] + 2
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].add_(2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] + 2
self.assertEqual(res1, res2)
# inter-type
m1 = torch.randn(10, 10, dtype=dtype, device=device)
self.assertEqual(m1 + 3, m1 + torch.tensor(3))
self.assertEqual(3 + m1, torch.tensor(3) + m1)
# contiguous + non-contiguous
m1 = torch.randn(10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10, dtype=dtype, device=device).t()
res = m1 + m2
self.assertTrue(res.is_contiguous())
self.assertEqual(res, m1 + m2.contiguous())
# 1d + empty
m1 = torch.tensor([1.0], dtype=dtype, device=device)
m2 = torch.tensor([], dtype=dtype, device=device)
self.assertEqual(m1 + m2, [])
# inter-type unint8
one = torch.tensor(1, dtype=torch.uint8, device=device)
self.assertEqual(torch.add(one, 1), 2)
self.assertEqual(torch.add(one, 1).dtype, torch.uint8)
# bool
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
expected = torch.tensor([True, True, False, True, False, True], dtype=torch.bool, device=device)
self.assertEqual(m1 + m2, expected)
# fused multiply add
a = torch.zeros(2, 3, dtype=torch.bool, device=device)
res = torch.add(a, a, alpha=0)
expected = torch.zeros(2, 3, device=device).bool()
self.assertEqual(res, expected)
# bfloat16
m1 = torch.tensor([1., 2.], dtype=torch.bfloat16)
m2 = torch.tensor([3., 4.], dtype=torch.bfloat16)
self.assertEqual(m1 + m2, torch.tensor([4., 6.], dtype=torch.bfloat16))
# different alpha types
m1 = torch.tensor([2 + 3j, 4 + 5j], dtype=torch.complex64, device=device)
m2 = torch.tensor([4 + 5j, 2 + 3j], dtype=torch.complex64, device=device)
# add complex numbers with float alpha
res = torch.add(m1, m2, alpha=0.1)
expected = torch.tensor([2.4000 + 3.5000j, 4.2000 + 5.3000j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
# add complex numbers with complex alpha
res = torch.add(m1, m2, alpha=complex(0.1, 0.2))
expected = torch.tensor([1.4000 + 4.3000j, 3.6000 + 5.7000j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
# add complex numbers with integer alpha
res = torch.add(m1, m2, alpha=2)
expected = torch.tensor([10. + 13.j, 8. + 11.j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.add(m1, m2, alpha=True))
self.assertRaisesRegex(RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.add(m1, m2, alpha=1.0))
# mismatched alpha, float / double tensor and complex alpha
msg = r"For non-complex input tensors, argument alpha must not be a complex number\."
m1 = torch.tensor([3., 4.], device=device)
m2 = torch.tensor([4., 3.], device=device)
self.assertRaisesRegex(RuntimeError, msg,
lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))
m1 = torch.tensor([3., 4.], dtype=torch.double, device=device)
m2 = torch.tensor([4., 3.], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, msg,
lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))
# complex
m1 = torch.tensor((4.0000 + 4.0000j), dtype=torch.complex64)
m2 = torch.tensor(4., dtype=torch.float64)
self.assertRaisesRegex(RuntimeError, r"result type ComplexFloat can't be cast to the desired output type Double",
lambda: torch.add(m1, m1, out=m2))
@onlyCUDA
def test_addsub_half_tensor(self, device):
x = torch.tensor([60000.0], dtype=torch.half, device=device)
for op, y, alpha in (
(torch.add, torch.tensor([-60000.0], dtype=torch.half, device=device), 2),
(torch.sub, torch.tensor([60000.0], dtype=torch.half, device=device), 2),
(torch.add, -70000.0, 1),
(torch.sub, 70000.0, 1),
):
actual = op(x, y, alpha=alpha)
self.assertTrue(not (actual.isnan() or actual.isinf()))
def test_sub_typing(self, device):
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with two bool tensors is not supported. "
r"Use the `\^` or `logical_xor\(\)` operator instead.",
lambda: m1 - m2)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: 1 - m1)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: m2 - 1)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.sub(m1, m2, alpha=True))
self.assertRaisesRegex(RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.sub(m1, m2, alpha=1.0))
def test_mul(self, device):
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].mul_(2)
res2 = m1.clone()
for i in range(res1.size(0)):
res2[i, 3] = res2[i, 3] * 2
self.assertEqual(res1, res2)
a1 = torch.tensor([True, False, False, True], dtype=torch.bool, device=device)
a2 = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)
self.assertEqual(a1 * a2, torch.tensor([True, False, False, False], dtype=torch.bool, device=device))
if device == 'cpu':
a1 = torch.tensor([0.1, 0.1], dtype=torch.bfloat16, device=device)
a2 = torch.tensor([1.1, 0.1], dtype=torch.bfloat16, device=device)
self.assertEqual(a1 * a2, torch.tensor([0.11, 0.01], dtype=torch.bfloat16, device=device), atol=0.01, rtol=0)
self.assertEqual(a1.mul(a2), a1 * a2)
def test_bool_tensor_comparison_ops(self, device):
a = torch.tensor([True, False, True, False, True, False], dtype=torch.bool, device=device)
b = torch.tensor([True, False, True, True, True, True], dtype=torch.bool, device=device)
self.assertEqual(a == b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a != b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertEqual(a < b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertEqual(a > b, torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.bool, device=device))
self.assertEqual(a >= b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a <= b, torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.bool, device=device))
self.assertEqual(a > False, torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a == torch.tensor(True, dtype=torch.bool, device=device),
torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a == torch.tensor(0, dtype=torch.bool, device=device),
torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertFalse(a.equal(b))
@dtypes(*torch.testing.get_all_dtypes(include_complex=False))
def test_logical(self, device, dtype):
if dtype != torch.bool:
x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype)
b = torch.tensor([2], device=device, dtype=dtype)
self.assertEqual(x.lt(2), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(2), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(2), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(2), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(2), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(2), torch.tensor([True, False, True, True]))
self.assertEqual(x.lt(b), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(b), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(b), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(b), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(b), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(b), torch.tensor([True, False, True, True]))
else:
x = torch.tensor([True, False, True, False], device=device)
self.assertEqual(x.lt(True), torch.tensor([False, True, False, True]))
self.assertEqual(x.le(True), torch.tensor([True, True, True, True]))
self.assertEqual(x.ge(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.gt(True), torch.tensor([False, False, False, False]))
self.assertEqual(x.eq(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.ne(True), torch.tensor([False, True, False, True]))
def test_atan2(self, device):
def _test_atan2_with_size(size, device):
a = torch.rand(size=size, device=device, dtype=torch.double)
b = torch.rand(size=size, device=device, dtype=torch.double)
actual = a.atan2(b)
x = a.view(-1)
y = b.view(-1)
expected = torch.tensor([math.atan2(x[i].item(), y[i].item()) for i in range(x.numel())],
device=device, dtype=torch.double)
self.assertEqual(expected, actual.view(-1), rtol=0, atol=0.02)
_test_atan2_with_size((2, 2), device)
_test_atan2_with_size((3, 3), device)
_test_atan2_with_size((5, 5), device)
def test_atan2_edgecases(self, device):
def _test_atan2(x, y, expected, device, dtype):
expected_tensor = torch.tensor([expected], dtype=dtype, device=device)
x_tensor = torch.tensor([x], dtype=dtype, device=device)
y_tensor = torch.tensor([y], dtype=dtype, device=device)
actual = torch.atan2(y_tensor, x_tensor)
self.assertEqual(expected_tensor, actual, rtol=0, atol=0.02)
for dtype in [torch.float, torch.double]:
_test_atan2(0, 0, 0, device, dtype)
_test_atan2(0, 1, math.pi / 2, device, dtype)
_test_atan2(0, -1, math.pi / -2, device, dtype)
_test_atan2(-1, 0, math.pi, device, dtype)
_test_atan2(1, 0, 0, device, dtype)
_test_atan2(-1, -1, math.pi * -3 / 4 , device, dtype)
_test_atan2(1, 1, math.pi / 4 , device, dtype)
_test_atan2(1, -1, math.pi / -4 , device, dtype)
_test_atan2(-1, 1, math.pi * 3 / 4 , device, dtype)
def test_trapz(self, device):
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
actual = torch.trapz(t, dx=dx, dim=dim)
expected = np.trapz(t.cpu().numpy(), dx=dx, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.trapz(t, x=torch.tensor(x, device=device), dim=dim)
expected = np.trapz(t.cpu().numpy(), x=x, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual.cpu(), exact_dtype=False)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 0, [], device)
test_x((0, 2), 1, [1.0, 2.0], device)
with self.assertRaisesRegex(
IndexError,
'Dimension out of range'):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError,
'There must be one `x` value for each sample point'):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
@dtypes(torch.double)
def test_pow_scalar_overloads_mem_overlap(self, device, dtype):
sz = 3
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
self.check_internal_mem_overlap(
lambda t: t.pow_(42), 1, dtype, device)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(input, 42, out=out))
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(42, input, out=out))
@dtypes(*list(product(torch.testing.get_all_dtypes(include_bool=False),
torch.testing.get_all_dtypes(include_bool=False))))
def test_float_power(self, device, dtypes):
def to_np(value):
if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16:
return value.to(torch.float).cpu().numpy()
return value.cpu().numpy() if isinstance(value, torch.Tensor) else value
base_dtype = dtypes[0]
exp_dtype = dtypes[1]
out_dtype = torch.complex128 if base_dtype.is_complex or exp_dtype.is_complex else torch.float64
base = make_tensor((30,), device, base_dtype, low=1, high=100)
# Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0
# Related: https://github.com/pytorch/pytorch/issues/48000
# base[0] = base[3] = base[7] = 0
exp = make_tensor((30,), device, exp_dtype, low=-2, high=2)
exp[0] = exp[4] = exp[6] = 0
expected = torch.from_numpy(np.float_power(to_np(base), to_np(exp)))
exponents = [-2.8, -2, -1, -0.5, 0.5, 1, 2]
complex_exponents = exponents + [-2.5j, -1.0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]
for op in (torch.float_power, torch.Tensor.float_power, torch.Tensor.float_power_):
# Case of Tensor x Tensor
if op is torch.Tensor.float_power_ and base_dtype != out_dtype:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
op(base.clone(), exp)
else:
result = op(base.clone(), exp)
self.assertEqual(expected, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype)
op(base, exp, out=out)
self.assertEqual(expected, out)
# Case of Tensor x Scalar
for i in complex_exponents if exp_dtype.is_complex else exponents:
out_dtype_scalar_exp = torch.complex128 if base_dtype.is_complex or type(i) == complex else torch.float64
expected_scalar_exp = torch.from_numpy(np.float_power(to_np(base), i))
if op is torch.Tensor.float_power_ and base_dtype != out_dtype_scalar_exp:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
op(base.clone(), i)
else:
result = op(base.clone(), i)
self.assertEqual(expected_scalar_exp, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype_scalar_exp)
op(base, i, out=out)
self.assertEqual(expected_scalar_exp, out)
# Case of Scalar x Tensor
for i in complex_exponents if base_dtype.is_complex else exponents:
out_dtype_scalar_base = torch.complex128 if exp_dtype.is_complex or type(i) == complex else torch.float64
expected_scalar_base = torch.from_numpy(np.float_power(i, to_np(exp)))
result = torch.float_power(i, exp)
self.assertEqual(expected_scalar_base, result)
out = torch.empty_like(exp).to(device=device, dtype=out_dtype_scalar_base)
torch.float_power(i, exp, out=out)
self.assertEqual(expected_scalar_base, out)
def test_float_power_exceptions(self, device):
def _promo_helper(x, y):
for i in (x, y):
if type(i) == complex:
return torch.complex128
elif type(i) == torch.Tensor and i.is_complex():
return torch.complex128
return torch.double
test_cases = ((torch.tensor([-2, -1, 0, 1, 2], device=device), -.25),
(torch.tensor([-1.0j, 0j, 1.0j, 1.0 + 1.0j, -1.0 - 1.5j], device=device), 2.))
for base, exp in test_cases:
for out_dtype in (torch.long, torch.float, torch.double, torch.cdouble):
out = torch.empty(1, device=device, dtype=out_dtype)
required_dtype = _promo_helper(base, exp)
if out.dtype == required_dtype:
torch.float_power(base, exp, out=out)
else:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
torch.float_power(base, exp, out=out)
if base.dtype == required_dtype:
torch.Tensor.float_power_(base.clone(), exp)
else:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
torch.Tensor.float_power_(base.clone(), exp)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False),
torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False)))
def test_xlogy_xlog1py(self, device, dtypes):
x_dtype, y_dtype = dtypes
def out_variant_helper(torch_fn, x, y):
expected = torch_fn(x, y)
out = torch.empty_like(expected)
torch_fn(x, y, out=out)
self.assertEqual(expected, out)
def xlogy_inplace_variant_helper(x, y):
if x.dtype in torch.testing.get_all_int_dtypes() + [torch.bool]:
with self.assertRaisesRegex(RuntimeError,
"can't be cast to the desired output type"):
x.clone().xlogy_(y)
else:
expected = torch.empty_like(x)
torch.xlogy(x, y, out=expected)
inplace_out = x.clone().xlogy_(y)
self.assertEqual(expected, inplace_out)
def test_helper(torch_fn, reference_fn, inputs, scalar=None):
x, y, z = inputs
torch_fn_partial = partial(torch_fn, x)
reference_fn_partial = partial(reference_fn, x.cpu().numpy())
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, x, exact_dtype=False)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, y, exact_dtype=False)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, z, exact_dtype=False)
val = scalar if scalar is not None else x
out_variant_helper(torch_fn, val, x)
out_variant_helper(torch_fn, val, y)
out_variant_helper(torch_fn, val, z)
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)
z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)
x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.5, high=1000)
y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.5, high=1000)
z_1p = make_tensor((4, 5), device, y_dtype, low=-0.5, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
test_helper(*xlogy_fns, (x, y, z))
xlogy_inplace_variant_helper(x, x)
xlogy_inplace_variant_helper(x, y)
xlogy_inplace_variant_helper(x, z)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p))
# Scalar-Tensor Test
test_helper(*xlogy_fns, (x, y, z), 3.14)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p), 3.14)
# Special Values Tensor-Tensor
t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)
zeros = torch.zeros(7, dtype=y_dtype, device=device)
def test_zeros_special_helper(torch_fn, reference_fn, scalar=False):
zeros_t = 0 if scalar else zeros
zeros_np = 0 if scalar else zeros.cpu().numpy()
torch_fn_partial = partial(torch_fn, zeros_t)
reference_fn_partial = partial(reference_fn, zeros_np)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, t, exact_dtype=False)
out_variant_helper(torch_fn, zeros_t, t)
test_zeros_special_helper(*xlogy_fns)
xlogy_inplace_variant_helper(zeros, t)
test_zeros_special_helper(*xlog1py_fns)
# Special Values Scalar-Tensor
test_zeros_special_helper(*xlogy_fns, scalar=True)
test_zeros_special_helper(*xlog1py_fns, scalar=True)
def test_xlogy_xlog1py_scalar_type_promotion(self, device):
# Test that python numbers don't participate in type promotion at the same
# priority level as 0-dim tensors
t = torch.randn((), dtype=torch.float32, device=device)
self.assertEqual(t.dtype, torch.xlogy(t, 5).dtype)
self.assertEqual(t.dtype, torch.xlogy(t, 5.).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5.).dtype)
self.assertEqual(t.dtype, torch.xlogy(5, t).dtype)
self.assertEqual(t.dtype, torch.xlogy(5., t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5., t).dtype)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_xlogy_xlog1py_bfloat16(self, device):
def _compare_helper(x, y, torch_fn, reference_fn):
x_np = x if isinstance(x, float) else x.cpu().to(torch.float).numpy()
y_np = y if isinstance(y, float) else y.cpu().to(torch.float).numpy()
expected = torch.from_numpy(reference_fn(x_np, y_np))
actual = torch_fn(x, y)
self.assertEqual(expected, actual, exact_dtype=False)
x_dtype, y_dtype = torch.bfloat16, torch.bfloat16
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)
z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)
x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.8, high=1000)
y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.8, high=1000)
z_1p = make_tensor((4, 5), device, y_dtype, low=-0.8, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
_compare_helper(x, x, *xlogy_fns)
_compare_helper(x, y, *xlogy_fns)
_compare_helper(x, z, *xlogy_fns)
_compare_helper(x, 3.14, *xlogy_fns)
_compare_helper(y, 3.14, *xlogy_fns)
_compare_helper(z, 3.14, *xlogy_fns)
_compare_helper(x_1p, x_1p, *xlog1py_fns)
_compare_helper(x_1p, y_1p, *xlog1py_fns)
_compare_helper(x_1p, z_1p, *xlog1py_fns)
_compare_helper(x_1p, 3.14, *xlog1py_fns)
_compare_helper(y_1p, 3.14, *xlog1py_fns)
_compare_helper(z_1p, 3.14, *xlog1py_fns)
# Special Values Tensor-Tensor
t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)
zeros = torch.tensor(7, dtype=y_dtype, device=device)
_compare_helper(t, zeros, *xlogy_fns)
_compare_helper(t, 0., *xlogy_fns)
_compare_helper(t, zeros, *xlog1py_fns)
_compare_helper(t, 0., *xlog1py_fns)
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False,
include_half=False, include_bfloat16=False),
torch.testing.get_all_dtypes(include_complex=False,
include_half=False, include_bfloat16=False)))
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_zeta(self, device, dtypes):
x_dtype, q_dtype = dtypes
def test_helper(x, q):
x_np = x if isinstance(x, float) else x.cpu().numpy()
q_np = q if isinstance(q, float) else q.cpu().numpy()
expected = torch.from_numpy(scipy.special.zeta(x_np, q_np))
actual = torch.special.zeta(x, q)
rtol, atol = None, None
if self.device_type == 'cpu':
rtol, atol = 1e-6, 1e-6
self.assertEqual(expected, actual, rtol=rtol, atol=atol, exact_dtype=False)
# x tensor - q tensor same size
x = make_tensor((2, 3, 4), device, x_dtype)
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast lhs
x = make_tensor((2, 1, 4), device, x_dtype)
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast rhs
x = make_tensor((2, 3, 4), device, x_dtype)
q = make_tensor((2, 1, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast all
x = make_tensor((2, 3, 1), device, x_dtype)
q = make_tensor((2, 1, 4), device, q_dtype)
test_helper(x, q)
# x scalar - q tensor
for x in np.linspace(-5, 5, num=10).tolist():
if not q_dtype.is_floating_point:
q_dtype = torch.get_default_dtype()
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q scalar
for q in np.linspace(-5, 5, num=10).tolist():
if not x_dtype.is_floating_point:
x_dtype = torch.get_default_dtype()
x = make_tensor((2, 3, 4), device, x_dtype)
test_helper(x, q)
tensor_binary_ops = [
'__lt__', '__le__',
'__gt__', '__ge__',
'__eq__', '__ne__',
'__add__', '__radd__', '__iadd__',
'__sub__', '__rsub__', '__isub__',
'__mul__', '__rmul__', '__imul__',
'__matmul__', '__rmatmul__',
'__truediv__', '__rtruediv__', '__itruediv__',
'__floordiv__', '__rfloordiv__', '__ifloordiv__',
'__mod__', '__rmod__', '__imod__',
'__pow__', '__rpow__', '__ipow__',
'__lshift__', '__rlshift__', '__ilshift__',
'__rshift__', '__rrshift__', '__irshift__',
'__and__', '__iand__',
'__xor__', '__ixor__',
'__or__', '__ior__',
# Unsupported operators
# '__imatmul__',
# '__divmod__', '__rdivmod__', '__idivmod__',
# '__rand__', '__ror__', '__rxor__',
]
# Test that binary math operations return NotImplemented for unknown types.
def generate_not_implemented_tests(cls):
class UnknownType:
pass
# TODO: refactor to inline these
_types = [
torch.half, torch.float, torch.double,
torch.int8, torch.short, torch.int, torch.long,
torch.uint8
]
# TODO: refactor to use make_tensor
def _small_2d(dtype, device, has_zeros=True, fill_ones=False, oneish=False):
t = _make_tensor((5, 5), dtype, device, fill_ones=fill_ones)
if oneish:
return t.clamp(min=_number(.99, 1, dtype), max=1.01)
if not has_zeros:
return t.clamp(min=(_number(_div_min, 1, dtype)))
return t
def create_test_func(op):
@dtypes(*_types)
def test(self, device, dtype):
# Generate the inputs
tensor = _small_2d(dtype, device)
# Runs the tensor op on the device
result = getattr(tensor, op)(UnknownType())
self.assertEqual(result, NotImplemented)
return test
for op in tensor_binary_ops:
test_name = "test_{}_not_implemented".format(op)
assert not hasattr(cls, test_name), "{0} already in {1}".format(
test_name, cls.__name__)
setattr(cls, test_name, create_test_func(op))
generate_not_implemented_tests(TestBinaryUfuncs)
instantiate_device_type_tests(TestBinaryUfuncs, globals())
if __name__ == '__main__':
run_tests()
| 46.203609 | 131 | 0.584753 | import torch
import numpy as np
import itertools
from itertools import product
import math
import random
import unittest
import warnings
import operator
from functools import partial
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, iter_indices, TEST_WITH_ASAN, run_tests,
torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,
skipCUDAIfRocm, skipIf)
from torch.testing import all_types_and_complex_and
if TEST_SCIPY:
import scipy.special
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
def _convert_t(dtype, device):
if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:
return torch.float
return dtype
def _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:
if fill_ones:
return torch.ones(*shape, dtype=_convert_t(dtype, device), device=device)
if not (dtype.is_floating_point or dtype.is_complex):
t = torch.randint(0, 10, shape, device=device)
if dtype != torch.uint8:
t = t - 5
return t.to(_convert_t(dtype, device))
if dtype == torch.half and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).half().float()
if dtype == torch.bfloat16 and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).bfloat16().float()
return torch.randn(shape, dtype=dtype, device=device).to(dtype=dtype)
class TestBinaryUfuncs(TestCase):
def test_add_broadcast_empty(self, device):
self.assertRaises(RuntimeError, lambda: torch.randn(5, 0, device=device) + torch.randn(0, 5, device=device))
self.assertEqual(torch.randn(5, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, device=device))
self.assertEqual(torch.randn(5, 0, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, 1, device=device))
self.assertEqual(torch.randn(5, 0, 6, device=device), torch.randn((), device=device) + torch.randn(5, 0, 6, device=device))
self.assertEqual(torch.randn(0, device=device), torch.randn(0, device=device) + torch.randn(1, device=device))
self.assertEqual(torch.randn(0, 7, 0, 6, 5, 0, 7, device=device),
torch.randn(0, 7, 0, 6, 5, 0, 1, device=device) + torch.randn(1, 1, 5, 1, 7, device=device))
self.assertRaises(RuntimeError, lambda: torch.randn(7, 0, device=device) + torch.randn(2, 1, device=device))
def test_addcmul_scalars_as_floats(self, device):
x = torch.tensor(2.)
y = torch.tensor(3., device=device)
# 3 + (3 * 3) * 2
self.assertEqual(y.addcmul(y, y, value=x), 21)
x = torch.tensor(2., requires_grad=True)
self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops(self, device):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertEqual(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertEqual(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertEqual(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertEqual(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertEqual(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertEqual(x[idx] >= y[idx], ge[idx] == 1)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_must_take_bool_output(self, device):
for op in [torch.lt, torch.le, torch.gt, torch.ge, torch.eq, torch.ne,
torch.logical_and, torch.logical_or, torch.logical_xor]:
self.assertEqual(op(torch.tensor([True]), torch.tensor([False])).dtype, torch.bool)
# TODO: update to work on CUDA, too
@onlyCPU
def test_inplace_comparison_ops_require_inputs_have_same_dtype(self, device):
with self.assertRaisesRegex(RuntimeError, 'Expected object of scalar type'):
for op in ['lt_', 'le_', 'gt_', 'ge_', 'eq_', 'ne_', 'logical_xor_', 'logical_and_', 'logical_or_']:
x = torch.tensor([1], dtype=torch.int)
y = torch.tensor([2], dtype=torch.long)
in_place_method = getattr(x, op)
in_place_method(y)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_check_for_scalar_overflow(self, device):
s = 1 << 20
t = torch.tensor([1 << 5], dtype=torch.uint8)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t < s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s < t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t <= s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s <= t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t > s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s > t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t >= s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s >= t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t == s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s == t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t != s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s != t)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_check_for_zerodim_tensor_overflow(self, device):
t1 = torch.tensor([1 << 5], dtype=torch.uint8)
t2 = torch.tensor([1 << 30], dtype=torch.int32)
ts1 = torch.tensor(1 << 20, dtype=torch.int32)
ts2 = torch.tensor(1 << 40, dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 < ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 < t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 <= ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 <= t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 > ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 > t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 >= ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 >= t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 == ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 == t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 != ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 != t2)
# TODO: update to work on CUDA, too
@onlyCPU
def test_bitwise_ops(self, device):
x = torch.randn(5, 5).gt(0)
y = torch.randn(5, 5).gt(0)
and_result = x & y
for idx in iter_indices(x):
if and_result[idx]:
self.assertTrue(x[idx] and y[idx])
else:
self.assertFalse(x[idx] and y[idx])
or_result = x | y
for idx in iter_indices(x):
if or_result[idx]:
self.assertTrue(x[idx] or y[idx])
else:
self.assertFalse(x[idx] or y[idx])
xor_result = x ^ y
for idx in iter_indices(x):
if xor_result[idx]:
self.assertTrue(x[idx] ^ y[idx])
else:
self.assertFalse(x[idx] ^ y[idx])
x_clone = x.clone()
x_clone &= y
self.assertEqual(x_clone, and_result)
x_clone = x.clone()
x_clone |= y
self.assertEqual(x_clone, or_result)
x_clone = x.clone()
x_clone ^= y
self.assertEqual(x_clone, xor_result)
def test_inplace_division(self, device):
t = torch.rand(5, 5, device=device)
id_before = id(t)
t /= 2
id_after = id(t)
self.assertEqual(id_before, id_after)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_complex=False))
def test_div_rounding_modes(self, device, dtype):
if dtype.is_floating_point:
low, high = -10.0, 10.0
else:
info = torch.iinfo(dtype)
low, high = info.min, info.max
a = make_tensor((100,), device, dtype, low=low, high=high)
b = make_tensor((100,), device, dtype, low=low, high=high)
# Avoid division by zero so we can test (a / b) * b == a
if dtype.is_floating_point:
eps = 0.1
b[(-eps < b) & (b < eps)] = eps
else:
b[b == 0] = 1
if not dtype.is_floating_point:
# floor(a / b) * b can be < a, so fixup slightly to avoid underflow
a = torch.where(a < 0, a + b, a)
d_true = torch.divide(a, b, rounding_mode=None)
self.assertTrue(d_true.is_floating_point())
self.assertEqual(d_true * b, a.to(d_true.dtype))
d_floor = torch.divide(a, b, rounding_mode='floor')
if dtype not in (torch.bfloat16, torch.half):
self.assertEqual(d_floor * b + torch.remainder(a, b), a)
else:
self.assertEqual(d_floor * b + torch.remainder(a.float(), b.float()), a,
exact_dtype=False)
d_trunc = torch.divide(a, b, rounding_mode='trunc')
rounding_unsupported = (
dtype == torch.half and device != 'cuda' or
dtype == torch.bfloat16 and device != 'cpu')
d_ref = d_true.float() if rounding_unsupported else d_true
self.assertEqual(d_trunc, d_ref.trunc().to(dtype))
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_div_rounding_nonfinite(self, device, dtype):
# Compare division of special floating point values against NumPy
num = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype)
# Divide by zero is tested seperately
denom = num[num != 0]
a, b = num[None, :].clone(), denom[:, None].clone()
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in ((None, np.true_divide), ("floor", np.floor_divide)):
with np.errstate(all='ignore'):
expect = np_ref(an, bn)
kwargs = dict(rounding_mode=mode) if mode is not None else {}
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(actual, torch.from_numpy(expect),
exact_device=False, exact_dtype=exact_dtype)
# Compare contiguous (likely vectorized) against non-contiguous (not vectorized)
a_noncontig = torch.empty([2 * i for i in a.shape], dtype=dtype, device=device)[::2, ::2]
a_noncontig[:] = a
b_noncontig = torch.empty([2 * i for i in b.shape], dtype=dtype, device=device)[::2, ::2]
b_noncontig[:] = b
for rounding_mode in (None, "trunc", "floor"):
expect = torch.divide(a_noncontig, b_noncontig, rounding_mode=rounding_mode)
actual = torch.divide(a, b, rounding_mode=rounding_mode)
self.assertEqual(actual, expect)
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_divide_by_zero_rounding(self, device, dtype):
a = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype)
exact_dtype = (dtype != torch.bfloat16)
if exact_dtype:
an = a.cpu().numpy()
else:
an = a.float().cpu().numpy()
zero = torch.zeros_like(a)
# NOTE: NumPy's floor_divide rounding changed in 1.20.0 to be consistent with divide
expect = np.divide(an, 0)
for rounding_mode in (None, 'floor'):
actual = torch.divide(a, 0, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
actual = torch.divide(a, zero, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
@dtypes(*torch.testing.get_all_dtypes(
include_bool=False, include_complex=False, include_bfloat16=False))
def test_div_rounding_numpy(self, device, dtype):
info = (torch.finfo(dtype) if dtype.is_floating_point
else torch.iinfo(dtype))
low, high = info.min, info.max
a = make_tensor((4096,), device, dtype, low=low, high=high)
b = make_tensor((4096,), device, dtype, low=low, high=high)
b[b == 0] = 1
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in (
(None, np.true_divide),
("floor", np.floor_divide),
("trunc", lambda a, b: np.trunc(np.true_divide(a, b)).astype(a.dtype))
):
with np.errstate(all='ignore'):
expect = torch.from_numpy(np_ref(an, bn))
kwargs = dict(rounding_mode=mode) if mode is not None else {}
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)
expect = expect[::2]
with set_default_dtype(torch.double):
actual = torch.divide(a[::2], b[::2], **kwargs)
self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)
@onlyCUDA
def test_cross_device_inplace_error_msg(self, device):
a = torch.tensor(2.)
b = torch.tensor(2., device=device)
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
a += b
@onlyOnCPUAndCUDA
def test_out_resize_warning(self, device):
a = torch.tensor((1, 2, 3), device=device, dtype=torch.float32)
b = torch.tensor((4, 5, 6), device=device, dtype=torch.float32)
unary_inputs = (a,)
binary_inputs = (a, b)
unary_ops = (torch.ceil, torch.exp)
binary_ops = (torch.add, torch.sub)
for op in (unary_ops + binary_ops):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inputs = unary_inputs if op in unary_ops else binary_inputs
# No warnings
op(*inputs, out=torch.empty(3, device=device))
op(*inputs, out=torch.empty(0, device=device))
self.assertEqual(len(w), 0)
# Cases that throw warnings
op(*inputs, out=torch.empty(2, device=device))
self.assertEqual(len(w), 1)
# Verifies that the inplace dunders (like idiv) actually are in place
@onlyOnCPUAndCUDA
def test_inplace_dunders(self, device):
t = torch.randn((1,), device=device)
expected = t.data_ptr()
t += 1
t -= 1
t *= 1
t /= 1
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
t //= 1
t %= 1
self.assertEqual(expected, t.data_ptr())
def check_internal_mem_overlap(self, inplace_op, num_inputs,
dtype, device,
expected_failure=False):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input)
for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(self, data, sz, op,
expected_failure=False):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz:2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
def binary_check_input_output_mem_overlap(self, op, device,
expected_failure=False):
sz = 3
data = torch.randn(2 * sz, device=device)
other = torch.randn(sz, device=device)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out: op(other, input, out=out),
expected_failure=expected_failure)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out: op(input, other, out=out),
expected_failure=expected_failure)
@dtypes(torch.double)
def test_binary_op_mem_overlap(self, device, dtype):
ops = [
("add", True, True, 'cpu'),
("add", True, True, 'cuda'),
("mul", True, True, 'cpu'),
("mul", True, True, 'cuda'),
("sub", True, True, 'cpu'),
("sub", True, True, 'cuda'),
("div", True, True, 'cpu'),
("div", True, True, 'cuda'),
("pow", True, True, 'cpu'),
("pow", True, True, 'cuda'),
("fmod", True, True, 'cpu'),
("fmod", True, True, 'cuda'),
("atan2", True, True, 'cpu'),
("atan2", True, True, 'cuda'),
("hypot", True, True, 'cpu'),
("hypot", True, True, 'cuda'),
("igamma", True, True, 'cpu'),
("igamma", True, True, 'cuda'),
("igammac", True, True, 'cpu'),
("igammac", True, True, 'cuda'),
("nextafter", True, True, 'cpu'),
("nextafter", True, True, 'cuda'),
("le", True, True, 'cpu'),
("le", True, True, 'cuda'),
("lt", True, True, 'cpu'),
("lt", True, True, 'cuda'),
("ge", True, True, 'cpu'),
("ge", True, True, 'cuda'),
("gt", True, True, 'cpu'),
("gt", True, True, 'cuda'),
("eq", True, True, 'cpu'),
("eq", True, True, 'cuda'),
("ne", True, True, 'cpu'),
("ne", True, True, 'cuda'),
("logical_and", True, True, 'cpu'),
("logical_and", True, True, 'cuda'),
("logical_or", True, True, 'cpu'),
("logical_or", True, True, 'cuda'),
("logical_xor", True, True, 'cpu'),
("logical_xor", True, True, 'cuda'),
]
for (fn, has_input_output_mem_overlap_check,
has_internal_mem_overlap_check, dev) in ops:
if dev != device:
continue
out_op = getattr(torch, fn)
inplace_op = getattr(torch.Tensor, fn + '_')
self.check_internal_mem_overlap(
inplace_op, 2, dtype, device,
expected_failure=not has_internal_mem_overlap_check)
self.binary_check_input_output_mem_overlap(out_op, device,
expected_failure=not has_input_output_mem_overlap_check)
def _do_pow_for_exponents(self, m1, exponents, pow_fn, atol):
for num in exponents:
if isinstance(num, int) and num < 0 and not m1.is_floating_point() and not m1.is_complex():
with self.assertRaisesRegex(RuntimeError,
r'Integers to negative integer powers are not allowed\.'):
torch.pow(m1[4], num)
else:
# base - tensor, exponent - number
# contiguous
res1 = torch.pow(m1[4], num)
res2 = res1.clone().zero_()
# `math.pow` has issues with complex exponentiation so we need to resort to normal `pow`.
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[4][i], num)
rtol = 0 if atol is not None else None
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# non-contiguous
res1 = torch.pow(m1[:, 4], num)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[i, 4], num)
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# scalar ** tensor to enforce correct handling of dtypes for __rpow__().
expected_dtype = torch.result_type(num, m1)
res1 = num ** m1[4]
res2 = torch.tensor(num, dtype=expected_dtype, device=m1.device) ** m1[4]
self.assertEqual(res1, res2)
self.assertEqual(res1.dtype, expected_dtype)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_pow(self, device, dtype):
m1 = torch.empty(0, dtype=dtype, device=device)
if m1.is_floating_point() or m1.is_complex():
m1 = make_tensor((100, 100), low=0, high=1, dtype=dtype, device=device) + 0.5
else:
# math.pow will overflow and throw exceptions for large integers
range_high = 4 if dtype in (torch.int8, torch.uint8) else 10
m1 = make_tensor((100, 100), low=1, high=range_high, dtype=dtype, device=device)
exponents = [-2.8, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 4, 3.3]
complex_exponents = [-2.5j, -1.0j, 0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]
if m1.is_complex():
self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, exponents, math.pow, None)
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
# base - number, exponent - tensor
# contiguous
res1 = torch.pow(3, m1[4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[4, i])
self.assertEqual(res1, res2)
# non-contiguous
res1 = torch.pow(3, m1[:, 4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[i][4])
self.assertEqual(res1, res2)
# TODO: refactor all these tests using opinfos properly
def _test_pow(self, base, exponent, np_exponent=None):
if np_exponent is None:
np_exponent = exponent
def to_np(value):
if isinstance(value, torch.Tensor):
return value.cpu().numpy()
return value
try:
np_res = np.power(to_np(base), to_np(np_exponent))
expected = torch.from_numpy(np_res) if isinstance(np_res, np.ndarray) else torch.tensor(np_res, dtype=base.dtype)
except ValueError as e:
err_msg = "Integers to negative integer powers are not allowed."
self.assertEqual(str(e), err_msg)
out = torch.empty_like(base)
test_cases = [
lambda: base.pow(exponent),
lambda: base.pow_(exponent),
lambda: torch.pow(base, exponent),
lambda: torch.pow(base, exponent, out=out)
]
for test_case in test_cases:
self.assertRaisesRegex(RuntimeError, err_msg, test_case)
else:
if isinstance(base, torch.Tensor):
actual = base.pow(exponent)
self.assertEqual(actual, expected.to(actual))
actual = base.clone()
# When base is a 0-dim cpu tensor and exp is a cuda tensor, we exp `pow` to work but `pow_` to fail, since
# `pow` will try to create the output tensor on a cuda device, but `pow_` needs to use the cpu tensor as the output
if (isinstance(exponent, torch.Tensor) and base.dim() == 0 and base.device.type == 'cpu' and
exponent.device.type == 'cuda'):
regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
elif torch.can_cast(torch.result_type(base, exponent), base.dtype):
actual2 = actual.pow_(exponent)
self.assertEqual(actual, expected)
self.assertEqual(actual2, expected)
else:
self.assertRaisesRegex(RuntimeError, "Found dtype \\w+ but expected \\w+", lambda: actual.pow_(exponent))
actual = torch.pow(base, exponent)
self.assertEqual(actual, expected.to(actual))
actual2 = torch.pow(base, exponent, out=actual)
self.assertEqual(actual, expected.to(actual))
self.assertEqual(actual2, expected.to(actual))
# Tests pow() for integral, floating-type tensors, with integral, floating-type
# exponents (tensor or scalar), respectively. noncontiguous tensors are also tested.
def test_int_and_float_pow(self, device):
def _test_int_and_float_pow(dt, low, high, dev):
test_cases = (
((4, 4), 0, (4, 1)),
((3, 1), 4, (3, 1)),
((2,), 4, (1,)),
((1,), 2, ()),
((513, 513), 4, (513,)),
((5, 5, 5), 5, (5,)),
((), 2, ()),
)
for base_shape, exp_scalar, exp_shape in test_cases:
base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high)
# int tensors don't take negative exponents
if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high)
else:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high,
noncontiguous=True)
if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high,
noncontiguous=True)
else:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high,
noncontiguous=True)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
_test_int_and_float_pow(torch.int8, -2, 2, device)
_test_int_and_float_pow(torch.uint8, 0, 3, device)
_test_int_and_float_pow(torch.int16, -5, 5, device)
_test_int_and_float_pow(torch.int64, -10, 10, device)
_test_int_and_float_pow(torch.int32, -10, 10, device)
_test_int_and_float_pow(torch.float16, 0., 5., device)
_test_int_and_float_pow(torch.float32, 0., 10., device)
_test_int_and_float_pow(torch.float64, 0., 10., device)
_test_int_and_float_pow(torch.float32, -10., 10., device)
_test_int_and_float_pow(torch.float64, -10., 10., device)
# Tests that a Runtime error occurs when a base tensor cannot be resized
# by pow's inplace variant due to PyTorch's broadcasting semantics.
def test_pow_inplace_resizing_exception(self, device):
test_cases = (
((), (3,)),
((2,), (2, 1)),
((2, 1), (2, 2)),
((2, 2), (2, 1, 1)),
)
test_inputs = list((make_tensor(base_size, dtype=torch.float64, device=device,
high=10., low=0.),
make_tensor(exp_size, dtype=torch.float64, device=device,
high=10., low=0.))
for base_size, exp_size in test_cases)
for base, exponent in test_inputs:
regex = "doesn't match the broadcast shape"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
def test_int_tensor_pow_neg_ints(self, device):
ints = [torch.iinfo(torch.int32).min,
-3, -2, -1, 0, 1, 2, 3,
torch.iinfo(torch.int32).max]
neg_ints = [torch.iinfo(torch.int32).min, -3, -2, -1]
tensor = torch.tensor(ints, dtype=torch.int32, device=device)
for pow in neg_ints:
self._test_pow(tensor, pow)
def test_long_tensor_pow_floats(self, device):
ints = [0, 1, 23, 4567]
floats = [0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
tensor = torch.tensor(ints, dtype=torch.int64, device=device)
for pow in floats:
self._test_pow(tensor, pow)
@dtypes(*[torch.float32, torch.float64])
def test_float_scalar_pow_float_tensor(self, device, dtype):
floats = [2.0, -3 / 2, -1.0, -1 / 2, -1 / 3, 0.0,
1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
exponent_shapes = (
(1,),
(2, 2),
(2, 1),
(2, 2, 2),
)
tensors = list(make_tensor(shape, dtype=dtype, device=device, low=0)
for shape in exponent_shapes)
floats_tensor = torch.tensor(floats, dtype=dtype, device=device)
for base in floats:
self._test_pow(base, floats_tensor)
for tensor in tensors:
self._test_pow(base, tensor)
@onlyCUDA
def test_cuda_tensor_pow_scalar_tensor(self, device):
cuda_tensors = [torch.randn((3, 3), device=device), torch.tensor(3.0, device=device)]
scalar_tensors = [torch.tensor(5.0, device='cpu'), torch.tensor(-3), torch.tensor(1)]
for base, exp in product(cuda_tensors, scalar_tensors):
self._test_pow(base, exp)
@onlyCUDA
def test_cpu_tensor_pow_cuda_scalar_tensor(self, device):
cuda_tensors = [torch.tensor(5.0, device='cuda'), torch.tensor(-3, device='cuda')]
for exp in cuda_tensors:
base = torch.randn((3, 3), device='cpu')
regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'
self.assertRaisesRegex(RuntimeError, regex, torch.pow, base, exp)
for exp in cuda_tensors:
base = torch.tensor(3.0, device='cpu')
self._test_pow(base, exp)
@onlyCUDA
@dtypes(torch.complex64, torch.complex128)
def test_pow_cuda_complex_extremal_failing(self, device, dtype):
t = torch.tensor(complex(-1., float('inf')), dtype=dtype, device=device)
with self.assertRaises(AssertionError):
cuda_out = t.pow(2)
cpu_out = t.cpu().pow(2)
self.assertEqual(cpu_out, cuda_out)
@onlyOnCPUAndCUDA
@dtypes(*(torch.testing.get_all_dtypes(include_bool=False, include_bfloat16=False)))
def test_complex_scalar_pow_tensor(self, device, dtype):
complexes = [0.5j, 1. + 1.j, -1.5j, 2.2 - 1.6j, 1 + 0j]
first_exp = make_tensor((100,), device, dtype, low=-2, high=2)
second_exp = make_tensor((100,), device, dtype, low=-2, high=2, noncontiguous=True)
first_exp[0] = first_exp[10] = first_exp[20] = 0
second_exp[0] = second_exp[10] = second_exp[20] = 0
for base in complexes:
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
@onlyOnCPUAndCUDA
def test_pow_scalar_type_promotion(self, device):
inputs = [17, [17]]
for input in inputs:
input_tensor_uint8 = torch.tensor(input, dtype=torch.uint8, device=device)
out_uint8_computation = torch.pow(2, input_tensor_uint8, out=torch.tensor(0, dtype=torch.int64, device=device))
input_tensor_int64 = torch.tensor(input, dtype=torch.int64, device=device)
out_int64_computation = torch.pow(2, input_tensor_int64, out=torch.tensor(0, dtype=torch.int64, device=device))
self.assertNotEqual(out_uint8_computation, out_int64_computation)
self.assertEqual(out_uint8_computation.to(dtype=torch.uint8), out_int64_computation.to(dtype=torch.uint8))
def test_tensor_pow_tensor(self, dev):
def rotate(l, n):
return l[-n:] + l[:-n]
def test_tensor_pow_tensor(values, torch_type, numpy_type):
vals_tensor = torch.tensor(values, dtype=torch_type, device=dev)
for i in range(len(values)):
pows = rotate(values, i)
pows_tensor = torch.tensor(pows, dtype=torch_type, device=dev)
self._test_pow(vals_tensor, pows_tensor)
ints = [0, 1, 2, 3]
test_tensor_pow_tensor(ints, torch.uint8, np.uint8)
test_tensor_pow_tensor(ints, torch.int8, np.int8)
test_tensor_pow_tensor(ints, torch.int16, np.int16)
test_tensor_pow_tensor(ints, torch.int32, np.int32)
test_tensor_pow_tensor(ints, torch.int64, np.int64)
floats = [-3.0, -2.0, -1.0, -1 / 2, -1 / 3,
0.0, 1 / 3, 1 / 2, 1.0, 2.0, 3.0]
test_tensor_pow_tensor(floats, torch.float16, np.float16)
test_tensor_pow_tensor(floats, torch.float32, np.float32)
test_tensor_pow_tensor(floats, torch.float64, np.float64)
def test_logical_xor_with_nontrivial_alignment(self, device):
size = 128
a = (torch.randn(size, device=device) > 0)
b = (torch.randn(size, device=device) > 0)
c = (torch.randn(size, device=device) > 0)
non_trivial_alignment = [1, 2, 4, 8, 15]
for i in non_trivial_alignment:
for j in non_trivial_alignment:
for k in non_trivial_alignment:
a_ = a[i: 100 + i]
b_ = b[j: 100 + j]
c_ = c[k: 100 + k]
torch.logical_xor(a_, b_, out=c_)
for x, y, z in zip(a_.tolist(), b_.tolist(), c_.tolist()):
self.assertEqual(x ^ y, z)
@dtypes(torch.float)
def test_add_with_tail(self, device, dtype):
for tail_size in [1, 63, 67, 130]:
size = 4096 + tail_size
a = torch.randn(size, device=device, dtype=dtype)
b = torch.randn(size, device=device, dtype=dtype)
c = a + b
for x, y, z in zip(a.tolist(), b.tolist(), c.tolist()):
self.assertEqual(x + y, z)
@deviceCountAtLeast(2)
@onlyCUDA
def test_cross_device_binary_ops(self, devices):
vals = (1., (2.,))
cpu_tensor = torch.randn(2, 2)
def do_test(op, a, b):
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, b)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(b, a)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, cpu_tensor)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(cpu_tensor, a)
for op in (operator.add, torch.add,
operator.sub, torch.sub,
operator.mul, torch.mul,
operator.truediv, torch.true_divide,
operator.floordiv, torch.floor_divide):
for a, b in product(vals, vals):
a = torch.tensor(a, device=devices[0])
b = torch.tensor(b, device=devices[1])
do_test(op, a, b)
@deviceCountAtLeast(2)
@onlyCUDA
def test_binary_op_scalar_device_unspecified(self, devices):
scalar_val = torch.tensor(1.)
for default_device in devices:
with torch.cuda.device(default_device):
for device in devices:
device_obj = torch.device(device)
x = torch.rand(3, device=device)
y0 = x * scalar_val
self.assertEqual(y0.device, device_obj)
y1 = scalar_val * x
self.assertEqual(y1.device, device_obj)
self.assertEqual(y0, y1)
def test_div_and_floordiv_vs_python(self, device):
def _scalar_helper(python_op, torch_op):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
if b == 0:
continue
expected = python_op(a, b)
for op in (operator.truediv, torch.true_divide):
actual_scalar = torch_op(a, b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
actual_tensor = torch_op(a_t, b_t)
actual_first_tensor = torch_op(a_t, b)
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected_div)
self.assertEqual(actual_tensor.item(), expected_div)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
_scalar_helper(operator.truediv, torch.true_divide)
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
_scalar_helper(lambda a, b: math.trunc(a / b), operator.floordiv)
_scalar_helper(lambda a, b: math.trunc(a / b), torch.floor_divide)
@onlyOnCPUAndCUDA
def test_div_and_floordiv_script_vs_python(self, device):
def _wrapped_div(a, b):
return a / b
def _wrapped_floordiv(a, b):
return a // b
scripted_div = torch.jit.script(_wrapped_div)
scripted_floordiv = torch.jit.script(_wrapped_floordiv)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
if b == 0:
continue
expected_div = a / b
expected_truncdiv = math.trunc(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
self.assertEqual(scripted_div(a_t, b_t), expected_div)
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
self.assertEqual(scripted_floordiv(a_t, b_t), expected_truncdiv)
def _wrapped_div_scalar(a):
return a / 5
def _wrapped_rdiv_scalar(a):
return 5 / a
def _wrapped_floordiv_scalar(a):
return a // 5
def _wrapped_rfloordiv_scalar(a):
return 5 // a
scripted_div_scalar = torch.jit.script(_wrapped_div_scalar)
scripted_rdiv_scalar = torch.jit.script(_wrapped_rdiv_scalar)
scripted_floordiv_scalar = torch.jit.script(_wrapped_floordiv_scalar)
scripted_rfloordiv_scalar = torch.jit.script(_wrapped_rfloordiv_scalar)
for a in range(-10, 10):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
a_t = torch.tensor(a, device=device)
self.assertEqual(a / 5, scripted_div_scalar(a_t))
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
self.assertEqual(math.trunc(a / 5), scripted_floordiv_scalar(a_t))
if a == 0:
continue
self.assertEqual(5 / a, scripted_rdiv_scalar(a_t))
if a_t.is_floating_point():
with self.assertRaises(RuntimeError):
scripted_rfloordiv_scalar(a_t)
else:
# See issue gh-52387
self.assertEqual(5 // a, scripted_rfloordiv_scalar(a_t))
# NOTE: torch.floor_divide currently truncates instead of flooring
# the quotient. See https://github.com/pytorch/pytorch/issues/43874.
@onlyOnCPUAndCUDA
def test_idiv_and_ifloordiv_vs_python(self, device):
def _wrapped_idiv_tensor(a, b):
a /= b
return a
def _wrapped_idiv_scalar(a):
a /= 5
return a
def _wrapped_true_divide__tensor(a, b):
a.true_divide_(b)
return a
def _wrapped_true_divide__scalar(a):
a.true_divide_(5)
return a
def _wrapped_floor_divide__tensor(a, b):
a.floor_divide_(b)
return a
def _wrapped_floor_divide__scalar(a):
a.floor_divide_(5)
return a
# The following functions are unsupported by the JIT
def _wrapped_ifloordiv_tensor(a, b):
a //= b
return a
def _wrapped_ifloordiv_scalar(a):
a //= 5
return a
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_tensor = torch.jit.script(_wrapped_ifloordiv_tensor)
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_scalar = torch.jit.script(_wrapped_ifloordiv_scalar)
scripted_idiv_tensor = torch.jit.script(_wrapped_idiv_tensor)
scripted_idiv_scalar = torch.jit.script(_wrapped_idiv_scalar)
scripted_true_divide__tensor = torch.jit.script(_wrapped_true_divide__tensor)
scripted_true_divide__scalar = torch.jit.script(_wrapped_true_divide__scalar)
scripted_floor_divide__tensor = torch.jit.script(_wrapped_floor_divide__tensor)
scripted_floor_divide__scalar = torch.jit.script(_wrapped_floor_divide__scalar)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_idiv = a / b
expected_ifloordiv = a // b
expected_itruncdiv = math.trunc(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
if a_t.is_floating_point():
tmp0 = a_t.clone()
tmp0 /= b
tmp1 = a_t.clone()
tmp1 /= b_t
self.assertEqual(tmp0.item(), expected_idiv)
self.assertEqual(tmp1.item(), expected_idiv)
self.assertEqual(scripted_true_divide__tensor(a_t.clone(), b_t).item(), expected_idiv)
self.assertEqual(scripted_true_divide__scalar(a_t.clone()).item(), a / 5)
else:
tmp = a_t.clone()
with self.assertRaises(RuntimeError):
tmp /= b
with self.assertRaises(RuntimeError):
tmp /= b_t
with self.assertRaises(RuntimeError):
scripted_true_divide__tensor(tmp, b_t)
with self.assertRaises(RuntimeError):
scripted_true_divide__scalar(tmp)
if not a_t.is_floating_point() and b_t.is_floating_point():
# Inplace modification fails because a float tensor is required
# if the divisor is a float tensor
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a_t.clone().floor_divide_(b_t)
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
scripted_floor_divide_tensor(a_t.clone(), b_t)
tmp = a_t.clone()
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
else:
# Inplace modification is OK when both or neither tensor is
# a float tensor
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(a_t.clone().floor_divide_(b_t).item(), expected_itruncdiv)
self.assertEqual(scripted_floor_divide__tensor(a_t.clone(), b_t).item(), expected_itruncdiv)
tmp = a_t.clone()
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
self.assertEqual(tmp.item(), expected_itruncdiv)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(scripted_floor_divide__scalar(a_t), math.trunc(a / 5))
# Tests binary op equivalence with Python builtin ops
# Also tests that reverse operations are equivalent to forward ops
# NOTE: division ops are tested separately above
def test_binary_ops_with_scalars(self, device):
for ops in ((operator.add, torch.add),
(operator.sub, torch.sub),
(operator.mul, torch.mul),
(operator.truediv, torch.div)):
python_op, torch_op = ops
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0 or a == 0:
continue
a_tensor = torch.tensor(a, device=device)
b_tensor = torch.tensor(b, device=device)
a_tensor_cpu = a_tensor.cpu()
b_tensor_cpu = b_tensor.cpu()
vals = (a, b, a_tensor, b_tensor, a_tensor_cpu, b_tensor_cpu)
for args in product(vals, vals):
first, second = args
first_scalar = first if not isinstance(first, torch.Tensor) else first.item()
second_scalar = second if not isinstance(second, torch.Tensor) else second.item()
expected = python_op(first_scalar, second_scalar)
self.assertEqual(expected, python_op(first, second))
self.assertEqual(expected, torch_op(first, second))
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False), torch.testing.get_all_dtypes(include_complex=False)))
def test_maximum_minimum_type_promotion(self, device, dtypes):
a = torch.tensor((0, 1), device=device, dtype=dtypes[0])
b = torch.tensor((1, 0), device=device, dtype=dtypes[1])
for op in (torch.maximum, torch.max, torch.fmax, torch.minimum, torch.min, torch.fmin):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
@dtypes(*(torch.testing.get_all_int_dtypes() + [torch.bool]))
def test_maximum_minimum_int_and_bool(self, device, dtype):
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
rng = np.random.default_rng()
a_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
numpy_result = numpy_op(a_np, b_np)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@precisionOverride({torch.bfloat16: 1e-2})
@dtypes(*(torch.testing.get_all_fp_dtypes()))
def test_maximum_minimum_float(self, device, dtype):
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
if dtype == torch.bfloat16:
a_np = np.random.randn(10).astype(np.float64)
b_np = np.random.randn(10).astype(np.float64)
else:
a_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
b_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result, exact_dtype=False)
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
@dtypes(*(torch.testing.get_all_fp_dtypes()))
def test_maximum_minimum_float_nan_and_inf(self, device, dtype):
# np.maximum and np.minimum functions compare input arrays element-wisely.
# if one of the elements being compared is a NaN, then that element is returned.
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
a_vals = (float('inf'), -float('inf'), float('nan'), float('inf'), float('nan'), float('nan'), 1, float('nan'))
b_vals = (-float('inf'), float('inf'), float('inf'), float('nan'), float('nan'), 0, float('nan'), -5)
if dtype == torch.bfloat16:
a_np = np.array(a_vals, dtype=np.float64)
b_np = np.array(b_vals, dtype=np.float64)
else:
a_np = np.array(a_vals, dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(b_vals, dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
if dtype == torch.bfloat16:
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
else:
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@dtypes(*product(torch.testing.get_all_complex_dtypes(), torch.testing.get_all_dtypes()))
def test_maximum_minimum_complex(self, device, dtypes):
for torch_op in (torch.maximum, torch.minimum, torch.max, torch.min, torch.fmax, torch.fmin):
with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):
torch_op(torch.ones(1, device=device, dtype=dtypes[0]),
torch.ones(1, device=device, dtype=dtypes[1]))
with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):
torch_op(torch.ones(1, device=device, dtype=dtypes[1]),
torch.ones(1, device=device, dtype=dtypes[0]))
@onlyCUDA
def test_maximum_minimum_cross_device(self, device):
a = torch.tensor((1, 2, -1))
b = torch.tensor((3, 0, 4), device=device)
ops = (torch.maximum, torch.minimum)
for torch_op in ops:
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch_op(a, b)
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch_op(b, a)
# test cuda tensor and cpu scalar
ops = ((torch.maximum, np.maximum), (torch.minimum, np.minimum))
a_np = np.array(1)
b_np = np.array([3, 0, 4])
for torch_op, numpy_op in ops:
a_tensor = torch.from_numpy(a_np)
b_tensor = torch.from_numpy(b_np).to(device=device)
tensor_result_1 = torch_op(a_tensor, b_tensor)
numpy_result_1 = numpy_op(a_np, b_np)
tensor_result_2 = torch_op(b_tensor, a_tensor)
numpy_result_2 = numpy_op(b_np, a_np)
self.assertEqual(tensor_result_1, numpy_result_1)
self.assertEqual(tensor_result_2, numpy_result_2)
# TODO: tests like this should be generic
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_mul_intertype_scalar(self, device, dtype):
x = torch.tensor(1.5, dtype=dtype, device=device)
y = torch.tensor(3, dtype=torch.int32, device=device)
self.assertEqual(x * y, 4.5)
self.assertEqual(y * x, 4.5)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
y *= x
x *= y
self.assertEqual(x, 4.5)
@onlyCPU
@dtypes(*torch.testing.get_all_dtypes())
def test_sub(self, device, dtype):
m1 = torch.tensor([2.34, 4.44], dtype=dtype, device=device)
m2 = torch.tensor([1.23, 2.33], dtype=dtype, device=device)
if dtype == torch.bool:
self.assertRaises(RuntimeError, lambda: m1 - m2)
elif (dtype == torch.bfloat16 or dtype == torch.half):
self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype), atol=0.01, rtol=0)
else:
self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype))
@onlyCPU
@dtypes(torch.float)
def test_csub(self, device, dtype):
a = torch.randn(100, 90, dtype=dtype, device=device)
b = a.clone().normal_()
res_add = torch.add(a, b, alpha=-1)
res_csub = a.clone()
res_csub.sub_(b)
self.assertEqual(res_add, res_csub)
a = torch.randn(100, 100, dtype=dtype, device=device)
scalar = 123.5
res_add = torch.add(a, -scalar)
res_csub = a.clone()
res_csub.sub_(scalar)
self.assertEqual(res_add, res_csub)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_min_max_binary_op_nan(self, device, dtype):
a = torch.rand(1000, dtype=dtype, device=device)
b = torch.rand(1000, dtype=dtype, device=device)
a[:250] = float('nan')
b[250:500] = float('nan')
a[500:750] = float('nan')
b[500:750] = float('nan')
ma = torch.max(a, b)
mi = torch.min(a, b)
for i in range(750):
self.assertTrue(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]))
self.assertTrue(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]))
for i in range(750, 1000):
self.assertFalse(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]))
self.assertFalse(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]))
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False),
torch.testing.get_all_dtypes(include_complex=False)))
def test_copysign(self, device, dtypes):
def _test_copysign_numpy(a, b):
torch_result = torch.copysign(a, b)
if a.dtype == torch.bfloat16:
np_a = a.to(torch.float).cpu().numpy()
else:
np_a = a.cpu().numpy()
if b.dtype == torch.bfloat16:
np_b = b.to(torch.float).cpu().numpy()
else:
np_b = b.cpu().numpy()
expected = torch.from_numpy(np.copysign(np_a, np_b))
types = [torch.bool, torch.bfloat16] + torch.testing.get_all_int_dtypes()
if a.dtype in types or b.dtype in types:
promoted_type = torch.promote_types(torch_result.dtype, expected.dtype)
torch_result = torch_result.to(promoted_type)
expected = expected.to(promoted_type)
self.assertEqual(torch_result, expected)
if a.dtype != torch.float16 and b.dtype != torch.float16:
self.assertEqual(torch.copysign(torch.tensor(1.0), torch_result),
torch.copysign(torch.tensor(1.0), expected))
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
a = make_tensor((10, 1, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 1, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
cases = [0.0, -0.0, float('inf'), float('-inf'), float('nan')]
types = [torch.float32, torch.float64]
if device == 'cpu':
types.append(torch.float16)
if dtypes[0] in types:
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
for case in cases:
_test_copysign_numpy(torch.tensor([case], device=device, dtype=dtypes[0]), b)
if dtypes[1] in torch.testing.get_all_fp_dtypes():
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
for case in cases:
_test_copysign_numpy(a, torch.tensor([case], device=device, dtype=dtypes[1]))
@dtypes(torch.bfloat16, torch.float)
def test_div(self, device, dtype):
for op, method, inplace in ((torch.div, torch.Tensor.div, torch.Tensor.div_),
(torch.true_divide, torch.Tensor.true_divide,
torch.Tensor.true_divide_)):
m1 = torch.randn(10, 10, dtype=torch.float, device=device).to(dtype=dtype)
res1 = m1.clone()
inplace(res1[:, 3], 2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] / 2
self.assertEqual(res1, res2)
if dtype == torch.bfloat16:
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2., 2.], dtype=dtype, device=device)
self.assertEqual(op(a1, a2),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01, rtol=0)
self.assertEqual(method(a1, a2), op(a1, a2))
@dtypes(torch.bfloat16, torch.float)
def test_true_divide_out(self, device, dtype):
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2., 2.], dtype=dtype, device=device)
res = torch.empty_like(a1)
self.assertEqual(torch.true_divide(a1, a2, out=res),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01, rtol=0)
@onlyCUDA
@dtypes(torch.half)
def test_divmul_scalar(self, device, dtype):
x = torch.tensor(100., device=device, dtype=dtype)
x_ref = x.float()
scale = 1e5
res = x.div(scale)
expected = x_ref.div(scale)
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
x = torch.tensor(1e-5, device=device, dtype=dtype)
x_ref = x.float()
res = x.mul(scale)
expected = x_ref.mul(scale)
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
res = scale * x
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
@dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})
@dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})
def test_floor_divide_tensor(self, device, dtype):
x = torch.randn(10, device=device).mul(30).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
z = x // y
z_alt = torch.trunc(x.double() / y.double()).to(dtype)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})
@dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})
def test_floor_divide_scalar(self, device, dtype):
x = torch.randn(100, device=device).mul(10).to(dtype)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
z = x // 3
z_alt = torch.tensor([math.trunc(v.item() / 3.) for v in x], dtype=x.dtype, device=device)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.long)
def test_floor_divide_out(self, device, dtype):
x = torch.randn(10, device=device).mul(10).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
o = torch.empty(10, dtype=dtype, device=device)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
torch.floor_divide(x, y, out=o)
self.assertEqual(o, x // y)
torch.floor_divide(x, 2, out=o)
self.assertEqual(o, x // 2)
if dtype == torch.int:
o = torch.empty(10, dtype=torch.float, device=device)
torch.floor_divide(x, y, out=o)
self.assertEqual(o, torch.floor_divide(x.float(), y.float()))
@onlyCPU
@dtypes(*torch.testing.get_all_math_dtypes('cpu'))
def test_rdiv(self, device, dtype):
if dtype is torch.float16:
return
elif dtype.is_complex:
x = torch.rand(100, dtype=dtype, device=device).add(1).mul(4)
else:
x = torch.rand(100, device=device).add(1).mul(4).to(dtype)
y = 30 / x
z = torch.tensor([30 / v.item() for v in x], device=device)
self.assertEqual(y, z, exact_dtype=False)
@dtypes(*torch.testing.get_all_fp_dtypes(include_bfloat16=False))
def test_fmod_remainder_by_zero_float(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
self.assertTrue(torch.all(fn(x, 0.0).isnan()))
self.assertTrue(torch.all(fn(x, zero).isnan()))
@onlyOnCPUAndCUDA
@skipCUDAIfRocm
@dtypes(*torch.testing.get_all_int_dtypes())
def test_fmod_remainder_by_zero_integral(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
if self.device_type == 'cpu':
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
fn(x, zero)
# for integral dividend (other than int64) divided by zero. For int64,
# CUDA returns all 1s for negative dividend, half 1s for positive dividend.
# uint8: 0xff -> 255
# int32: 0xffffffff -> -1
else:
if dtype == torch.int64:
self.assertEqual(fn(x, zero) == 4294967295, x >= 0)
self.assertEqual(fn(x, zero) == -1, x < 0)
else:
value = 255 if dtype == torch.uint8 else -1
self.assertTrue(torch.all(fn(x, zero) == value))
@dtypes(*torch.testing.get_all_dtypes(include_bfloat16=False, include_bool=False, include_complex=False))
def test_fmod_remainder(self, device, dtype):
# Use numpy as reference
def _helper(x, mod, fns_list):
for fn, inplace_fn, ref_fn in fns_list:
np_x = x.cpu().numpy() if torch.is_tensor(x) else x
np_mod = mod.cpu().numpy() if torch.is_tensor(mod) else mod
exp = ref_fn(np_x, np_mod)
exp = torch.from_numpy(exp)
res = fn(x, mod)
self.assertEqual(res, exp, exact_dtype=False)
if torch.is_tensor(x):
# out
out = torch.empty(0, device=device, dtype=res.dtype)
fn(x, mod, out=out)
self.assertEqual(out, exp, exact_dtype=False)
self.assertEqual(out.size(), torch.Size([10, 10]))
# in-place (Type cast runtime error)
try:
inplace_fn(x, mod)
self.assertEqual(x, exp, exact_dtype=False)
except RuntimeError as e:
self.assertRegex(str(e), "result type (Half|Float|Double) "
"can't be cast to the desired output "
"type (Byte|Char|Short|Int|Long)")
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
mod = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
mod[mod == 0] = 1
mods = [3, 2.3, mod, mod.t()]
if dtype in torch.testing.get_all_int_dtypes():
mod_float = make_tensor((10, 10), device=device, dtype=torch.float, low=-9, high=9)
mod[mod == 0] = 1
mods.append(mod_float)
for dividend, mod in product([x, x.t()], mods):
_helper(dividend, mod,
((torch.fmod, torch.Tensor.fmod_, np.fmod),
(torch.remainder, torch.Tensor.remainder_, np.remainder),))
for dividend, mod in product([5, 3.14], mods):
if torch.is_tensor(mod):
_helper(dividend, mod,
((torch.remainder, torch.Tensor.remainder_, np.remainder),))
@dtypes(torch.float, torch.double)
def test_remainder_fmod_large_dividend(self, device, dtype):
alarge = 1e9
pi = 3.14159265358979
for avalue in [alarge, -alarge]:
for bvalue in [pi, -pi]:
a = torch.tensor([avalue], dtype=dtype, device=device)
b = torch.tensor([bvalue], dtype=dtype, device=device)
c = torch.remainder(a, b)
d = torch.fmod(a, b)
self.assertTrue((b[0] > 0) == (c[0] > 0))
self.assertTrue((a[0] > 0) == (d[0] > 0))
self.assertTrue(abs(c[0]) < abs(b[0]))
self.assertTrue(abs(d[0]) < abs(b[0]))
if ((a[0] > 0) == (b[0] > 0)):
self.assertTrue(c[0] == d[0])
else:
self.assertTrue(abs(c[0] - d[0]) == abs(b[0]))
@dtypesIfCPU(torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
def test_hypot(self, device, dtype):
inputs = [
(torch.randn(10, device=device).to(dtype), torch.randn(10, device=device).to(dtype)),
(torch.randn((3, 3, 3), device=device).to(dtype), torch.randn((3, 3, 3), device=device).to(dtype)),
(torch.randn((10, 1), device=device).to(dtype), torch.randn((10, 1), device=device).to(dtype).transpose(0, 1)),
(torch.randint(100, (10, ), device=device, dtype=torch.long), torch.randn(10, device=device).to(dtype))
]
for input in inputs:
actual = torch.hypot(input[0], input[1])
if dtype == torch.bfloat16:
expected = torch.sqrt(input[0] * input[0] + input[1] * input[1])
else:
expected = np.hypot(input[0].cpu().numpy(), input[1].cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyOnCPUAndCUDA
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_gcd(self, device, dtype):
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.gcd(t1, t2)
expected = np.gcd([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
if dtype == torch.uint8:
a = torch.tensor([190, 210], device=device, dtype=dtype)
b = torch.tensor([190, 220], device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = torch.tensor([190, 10], device=device, dtype=dtype)
self.assertEqual(actual, expected)
else:
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = np.gcd(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@onlyOnCPUAndCUDA
@dtypes(torch.int16, torch.int32, torch.int64)
def test_lcm(self, device, dtype):
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.lcm(t1, t2)
expected = np.lcm([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.lcm(a, b)
expected = np.lcm(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_nextafter(self, device, dtype):
t1 = torch.tensor([0, 0, 10], device=device, dtype=dtype)
t2 = torch.tensor([inf, -inf, 10], device=device, dtype=dtype)
actual = torch.nextafter(t1, t2)
expected = np.nextafter(t1.cpu().numpy(), t2.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
actual = torch.nextafter(t2, t1)
expected = np.nextafter(t2.cpu().numpy(), t1.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
t1 = torch.tensor([0, nan], device=device, dtype=dtype)
t2 = torch.tensor([nan, 0], device=device, dtype=dtype)
self.assertTrue(torch.nextafter(t1, t2).isnan().all())
a = torch.randn(100, device=device, dtype=dtype)
b = torch.randn(100, device=device, dtype=dtype)
actual = torch.nextafter(a, b)
expected = np.nextafter(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
def _test_cop(self, torchfn, mathfn, dtype, device):
def reference_implementation(res2):
for i, j in iter_indices(sm1):
idx1d = i * sm1.size(0) + j
res2[i, j] = mathfn(sm1[i, j], sm2[idx1d])
return res2
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[4]
sm2 = m2[4]
res1 = torchfn(sm1, sm2.view(10, 10))
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10 * 10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[:, 4]
sm2 = m2[:, 4]
sm2.set_(sm2.storage(), sm2.storage_offset(), sm1.size(), (sm2.stride()[0] * 10, sm2.stride()[0]))
res1 = torchfn(sm1, sm2)
sm2.set_(sm2.storage(), sm2.storage_offset(), m2[:, 4].size(), m2[:, 4].stride())
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
@onlyCPU
@dtypes(torch.float)
def test_cdiv(self, device, dtype):
self._test_cop(torch.div, lambda x, y: x / y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cremainder(self, device, dtype):
self._test_cop(torch.remainder, lambda x, y: x % y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cmul(self, device, dtype):
self._test_cop(torch.mul, lambda x, y: x * y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cpow(self, device, dtype):
self._test_cop(torch.pow, lambda x, y: nan if x < 0 else math.pow(x, y), dtype, device)
@onlyCPU
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_floor_divide_zero(self, device, dtype):
a = torch.tensor([0, 1], dtype=dtype, device=device)
b = torch.tensor([0, 1], dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'ZeroDivisionError'):
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a // b
@unittest.skipIf(TEST_WITH_ASAN, "Integer overflows are not allowed under ASAN")
@dtypes(*torch.testing.get_all_dtypes())
def test_muldiv_scalar(self, device, dtype):
x = make_tensor((10, 3), device, dtype, low=None, high=None)
s = make_tensor((1,), 'cpu', dtype, low=None, high=None).item()
y = torch.full_like(x, s)
self.assertEqual(x * s, x * y)
self.assertEqual(s * x, y * x)
self.assertEqual(x / s, x / y)
self.assertEqual(s / x, y / x)
@dtypes(*tuple(itertools.combinations_with_replacement(torch.testing.get_all_dtypes(), 2)))
def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes):
def compare_with_numpy_bin_op(torch_fn, np_fn, x, y, out=None):
# by letting numpy treat them as float32's
x_np = x if x.dtype != torch.bfloat16 else x.to(torch.float32)
y_np = y.cpu().numpy() if y.dtype != torch.bfloat16 else y.to(torch.float32).cpu().numpy()
self.compare_with_numpy(lambda inp: torch_fn(inp, y, out=out) if out else torch_fn(inp, y),
lambda inp: np_fn(inp, y_np, out=out) if out else np_fn(inp, y_np),
x_np)
complex_op_denylist = [torch.lt, torch.le, torch.gt, torch.ge]
input_sizes = [
(1,),
(10,),
(10, 1),
(1, 10),
(4, 10),
(64, 10),
(12, 3)]
op_pairs = [(torch.lt, np.less),
(torch.le, np.less_equal),
(torch.gt, np.greater),
(torch.ge, np.greater_equal),
(torch.eq, np.equal),
(torch.ne, np.not_equal),
(torch.logical_and, np.logical_and),
(torch.logical_or, np.logical_or),
(torch.logical_xor, np.logical_xor)]
for size1 in input_sizes:
size2 = (2,) + size1
for with_extremal in [False, True]:
a = _generate_input(size1, dtypes[0], device, with_extremal)
b = _generate_input(size2, dtypes[1], device, with_extremal)
for torch_op, numpy_op in op_pairs:
if (dtypes[0].is_complex or dtypes[1].is_complex) and torch_op in complex_op_denylist:
continue
compare_with_numpy_bin_op(torch_op, numpy_op, a, b)
self.assertEqual(torch_op(a, b).dtype, torch.bool)
out = torch.zeros(1, dtype=torch.complex128)
compare_with_numpy_bin_op(torch_op, numpy_op, a, b, out=out)
@onlyOnCPUAndCUDA
@dtypes(torch.int8, torch.int16, torch.int32, torch.int64)
def test_signed_shift(self, device, dtype):
a = torch.tensor([-10, 10], device=device, dtype=dtype)
expected_l = torch.tensor([-40, 40], device=device, dtype=dtype)
self.assertEqual(a << 2, expected_l)
self.compare_with_numpy(lambda x: x << 2, lambda x: np.left_shift(x, 2), a)
expected_r = torch.tensor([-5, 5], device=device, dtype=dtype)
self.assertEqual(a >> 1, expected_r)
self.compare_with_numpy(lambda x: x >> 1, lambda x: np.right_shift(x, 1), a)
def test_bitwise_and(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([0, 0, 3], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([0, 2, 2], dtype=dtype, device=device)
self.assertEqual(torch.bitwise_and(a, b), expected_res)
self.assertEqual(torch.bitwise_and(a, b_scalar), expected_res_scalar)
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_and(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_and(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
a1 = a.clone()
a1.bitwise_and_(b)
self.assertEqual(a1, expected_res)
a.bitwise_and_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([False, True, False], device=device),
torch.bitwise_and(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
def test_bitwise_or(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([3, -1, 3], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([3, -2, 3], dtype=dtype, device=device)
self.assertEqual(torch.bitwise_or(a, b), expected_res)
self.assertEqual(torch.bitwise_or(a, b_scalar), expected_res_scalar)
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_or(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_or(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
a1 = a.clone()
a1.bitwise_or_(b)
self.assertEqual(a1, expected_res)
a.bitwise_or_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([True, True, False], device=device),
torch.bitwise_or(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
def test_bitwise_xor(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([3, -1, 0], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([3, -4, 1], dtype=dtype, device=device)
self.assertEqual(torch.bitwise_xor(a, b), expected_res)
self.assertEqual(torch.bitwise_xor(a, b_scalar), expected_res_scalar)
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_xor(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_xor(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
a1 = a.clone()
a1.bitwise_xor_(b)
self.assertEqual(a1, expected_res)
a.bitwise_xor_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([True, False, False], device=device),
torch.bitwise_xor(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_bitwise_shift(self, device, dtype):
ops = [
(torch.bitwise_left_shift, np.left_shift),
(operator.lshift, operator.lshift),
(torch.bitwise_right_shift, np.right_shift),
(operator.rshift, operator.rshift),
]
for torch_op, numpy_op in ops:
a = torch.tensor([19, -20, -21, 22], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3, 1], dtype=dtype, device=device)
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
self.assertEqual(torch_op(a, b), torch.tensor(numpy_op(a_np, b_np), device=device))
self.assertEqual(torch_op(a, 2), torch.tensor(numpy_op(a_np, 2), device=device))
def test_bitwise_shift_float(self, device):
ops = [
(torch.bitwise_left_shift, lambda x, y: x * 2. ** y),
(operator.lshift, lambda x, y: x * 2. ** y),
(torch.bitwise_right_shift, lambda x, y: x / 2. ** y),
(operator.rshift, lambda x, y: x / 2. ** y),
]
for torch_op, expected_op in ops:
a = torch.tensor([19, -20, -21, 22], dtype=torch.int64, device=device)
self.assertEqual(torch_op(a, 1.8), torch.floor(expected_op(a, 1)).to(a.dtype))
a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)
self.assertEqual(torch_op(a, 2), expected_op(a, 2))
a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)
self.assertEqual(torch_op(a, 2.2), expected_op(a, 2.2))
@onlyOnCPUAndCUDA
@dtypes(*list(product(torch.testing.get_all_dtypes(include_complex=False),
torch.testing.get_all_dtypes(include_complex=False))))
def test_heaviside(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
rng = np.random.default_rng()
input = np.array(rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[input_dtype if (input_dtype != torch.bfloat16) else torch.float64])
input[0] = input[3] = input[7] = 0
values = np.array(rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[values_dtype if (values_dtype != torch.bfloat16) else torch.float64])
np_result = torch.from_numpy(np.heaviside(input, values)).to(device=device, dtype=input_dtype)
input = torch.from_numpy(input).to(device=device, dtype=input_dtype)
values = torch.from_numpy(values).to(device=device, dtype=values_dtype)
out = torch.empty_like(input)
if input_dtype == values_dtype:
torch_result = torch.heaviside(input, values)
self.assertEqual(np_result, torch_result)
torch_result = input.heaviside(values)
self.assertEqual(np_result, torch_result)
torch.heaviside(input, values, out=out)
self.assertEqual(np_result, out)
input.heaviside_(values)
self.assertEqual(np_result, input)
else:
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
torch.heaviside(input, values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
input.heaviside(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
torch.heaviside(input, values, out=out)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
input.heaviside_(values)
@onlyCUDA
def test_heaviside_cross_device(self, device):
x = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
y = torch.tensor(0)
result = torch.heaviside(x, y)
expect = torch.tensor([0, 1, 0, 1, 0, 1], device=device)
self.assertEqual(result, expect)
result = torch.heaviside(y, x)
expect = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
self.assertEqual(result, expect)
x = torch.tensor([-9, 5, 0, 6, -2, 2])
y = torch.tensor(0, device=device)
with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):
torch.heaviside(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):
torch.heaviside(y, x)
@dtypes(*list(product(torch.testing.get_all_complex_dtypes(),
torch.testing.get_all_complex_dtypes())))
def test_heaviside_complex(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
data = (complex(0, -6), complex(-1, 3), complex(1, 1))
input = torch.tensor(data, device=device, dtype=input_dtype)
values = torch.tensor(data, device=device, dtype=values_dtype)
out = torch.empty_like(input)
real = input.real
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
torch.heaviside(input, real)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
real.heaviside(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
input.heaviside_(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
torch.heaviside(real, real, out=out)
def _test_logical(self, device, dtypes, op, a_, b_, expected_res_):
expected_res = torch.tensor(expected_res_, dtype=dtypes[0], device=device)
a = torch.tensor(a_, dtype=dtypes[0], device=device)
b = torch.tensor(b_, dtype=dtypes[1], device=device)
self.assertEqual(expected_res.bool(), getattr(a, op)(b))
c = torch.empty(0, dtype=torch.bool, device=device)
getattr(torch, op)(a, b, out=c)
self.assertEqual(expected_res.bool(), c)
if dtypes[0] != dtypes[1]:
with self.assertRaises(RuntimeError):
getattr(a, op + '_')(b)
return
getattr(a, op + '_')(b)
self.assertEqual(expected_res, a)
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_xor(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_xor', [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1])
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_and(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_and', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0])
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_or(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_or', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1])
def test_remainder_overflow(self, device):
x = torch.tensor(23500, dtype=torch.int64, device=device)
q = 392486996410368
self.assertEqual(x % q, x)
self.assertEqual(-x % q, q - x)
self.assertEqual(x % -q, x - q)
self.assertEqual(-x % -q, -x)
def test_rpow(self, device):
m = torch.randn(10, 10, device=device)
self.assertEqual(torch.pow(2, m), 2**m)
m = torch.randn(1, device=device).squeeze()
assert m.dim() == 0, "m is intentionally a scalar"
self.assertEqual(torch.pow(2, m), 2**m)
@onlyCPU
def test_ldexp(self, device):
mantissas = torch.randn(64, device=device)
exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome_1 = torch.ldexp(mantissas, exponents)
pt_outcome_2 = mantissas.ldexp(exponents)
self.assertEqual(np_outcome, pt_outcome_1)
self.assertEqual(np_outcome, pt_outcome_2)
mantissas.ldexp_(exponents)
self.assertEqual(np_outcome, mantissas)
mantissas = torch.tensor([float('inf'), float('-inf'), float('inf'), float('nan')], device=device)
exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome = torch.ldexp(mantissas, exponents)
self.assertEqual(np_outcome, pt_outcome)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp(self, device, dtype):
start_end_weight_shapes = [(), (5,), (5, 5)]
for shapes in product(start_end_weight_shapes, start_end_weight_shapes, start_end_weight_shapes):
start = torch.randn(shapes[0], device=device, dtype=dtype)
end = torch.randn(shapes[1], device=device, dtype=dtype)
weights = [torch.randn(shapes[2], device=device, dtype=dtype), random.random()]
if dtype.is_complex:
weights += [complex(0, 1), complex(0.4, 1.2)]
for weight in weights:
actual = torch.lerp(start, end, weight)
actual_method = start.lerp(end, weight)
self.assertEqual(actual, actual_method)
actual_out = torch.tensor(1., dtype=dtype, device=device)
torch.lerp(start, end, weight, out=actual_out)
self.assertEqual(actual, actual_out)
expected = start + weight * (end - start)
self.assertEqual(expected, actual)
def _test_logaddexp(self, device, dtype, base2):
if base2:
ref_func = np.logaddexp2
our_func = torch.logaddexp2
else:
ref_func = np.logaddexp
our_func = torch.logaddexp
def _test_helper(a, b):
ref = ref_func(a.cpu().numpy(), b.cpu().numpy())
v = our_func(a, b)
self.assertEqual(ref, v)
a = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
b = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
_test_helper(a, b)
_test_helper(a[:3], b[:3])
a *= 10000
b *= 10000
_test_helper(a, b)
_test_helper(a[:3], b[:3])
a = torch.tensor([float('inf'), float('-inf'), float('inf'), float("nan")], dtype=dtype, device=device)
b = torch.tensor([float('inf'), float('-inf'), float('-inf'), float("nan")], dtype=dtype, device=device)
_test_helper(a, b)
@dtypes(torch.float32, torch.float64)
def test_logaddexp(self, device, dtype):
self._test_logaddexp(device, dtype, base2=False)
@dtypes(torch.float32, torch.float64)
def test_logaddexp2(self, device, dtype):
self._test_logaddexp(device, dtype, base2=True)
def test_add(self, device):
dtypes = [torch.float, torch.double] + torch.testing.get_all_complex_dtypes()
for dtype in dtypes:
m1 = torch.randn(100, 100, dtype=dtype, device=device)
v1 = torch.randn(100, dtype=dtype, device=device)
res1 = torch.add(m1[4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(1)):
res2[i] = m1[4, i] + v1[i]
self.assertEqual(res1, res2)
m1 = torch.randn(100, 100, device=device)
v1 = torch.randn(100, device=device)
res1 = torch.add(m1[:, 4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(0)):
res2[i] = m1[i, 4] + v1[i]
self.assertEqual(res1, res2)
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[3].add_(2)
res2 = m1.clone()
for i in range(m1.size(1)):
res2[3, i] = res2[3, i] + 2
self.assertEqual(res1, res2)
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].add_(2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] + 2
self.assertEqual(res1, res2)
m1 = torch.randn(10, 10, dtype=dtype, device=device)
self.assertEqual(m1 + 3, m1 + torch.tensor(3))
self.assertEqual(3 + m1, torch.tensor(3) + m1)
m1 = torch.randn(10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10, dtype=dtype, device=device).t()
res = m1 + m2
self.assertTrue(res.is_contiguous())
self.assertEqual(res, m1 + m2.contiguous())
m1 = torch.tensor([1.0], dtype=dtype, device=device)
m2 = torch.tensor([], dtype=dtype, device=device)
self.assertEqual(m1 + m2, [])
one = torch.tensor(1, dtype=torch.uint8, device=device)
self.assertEqual(torch.add(one, 1), 2)
self.assertEqual(torch.add(one, 1).dtype, torch.uint8)
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
expected = torch.tensor([True, True, False, True, False, True], dtype=torch.bool, device=device)
self.assertEqual(m1 + m2, expected)
a = torch.zeros(2, 3, dtype=torch.bool, device=device)
res = torch.add(a, a, alpha=0)
expected = torch.zeros(2, 3, device=device).bool()
self.assertEqual(res, expected)
m1 = torch.tensor([1., 2.], dtype=torch.bfloat16)
m2 = torch.tensor([3., 4.], dtype=torch.bfloat16)
self.assertEqual(m1 + m2, torch.tensor([4., 6.], dtype=torch.bfloat16))
m1 = torch.tensor([2 + 3j, 4 + 5j], dtype=torch.complex64, device=device)
m2 = torch.tensor([4 + 5j, 2 + 3j], dtype=torch.complex64, device=device)
res = torch.add(m1, m2, alpha=0.1)
expected = torch.tensor([2.4000 + 3.5000j, 4.2000 + 5.3000j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
res = torch.add(m1, m2, alpha=complex(0.1, 0.2))
expected = torch.tensor([1.4000 + 4.3000j, 3.6000 + 5.7000j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
res = torch.add(m1, m2, alpha=2)
expected = torch.tensor([10. + 13.j, 8. + 11.j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.add(m1, m2, alpha=True))
self.assertRaisesRegex(RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.add(m1, m2, alpha=1.0))
msg = r"For non-complex input tensors, argument alpha must not be a complex number\."
m1 = torch.tensor([3., 4.], device=device)
m2 = torch.tensor([4., 3.], device=device)
self.assertRaisesRegex(RuntimeError, msg,
lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))
m1 = torch.tensor([3., 4.], dtype=torch.double, device=device)
m2 = torch.tensor([4., 3.], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, msg,
lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))
m1 = torch.tensor((4.0000 + 4.0000j), dtype=torch.complex64)
m2 = torch.tensor(4., dtype=torch.float64)
self.assertRaisesRegex(RuntimeError, r"result type ComplexFloat can't be cast to the desired output type Double",
lambda: torch.add(m1, m1, out=m2))
@onlyCUDA
def test_addsub_half_tensor(self, device):
x = torch.tensor([60000.0], dtype=torch.half, device=device)
for op, y, alpha in (
(torch.add, torch.tensor([-60000.0], dtype=torch.half, device=device), 2),
(torch.sub, torch.tensor([60000.0], dtype=torch.half, device=device), 2),
(torch.add, -70000.0, 1),
(torch.sub, 70000.0, 1),
):
actual = op(x, y, alpha=alpha)
self.assertTrue(not (actual.isnan() or actual.isinf()))
def test_sub_typing(self, device):
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with two bool tensors is not supported. "
r"Use the `\^` or `logical_xor\(\)` operator instead.",
lambda: m1 - m2)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: 1 - m1)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: m2 - 1)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.sub(m1, m2, alpha=True))
self.assertRaisesRegex(RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.sub(m1, m2, alpha=1.0))
def test_mul(self, device):
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].mul_(2)
res2 = m1.clone()
for i in range(res1.size(0)):
res2[i, 3] = res2[i, 3] * 2
self.assertEqual(res1, res2)
a1 = torch.tensor([True, False, False, True], dtype=torch.bool, device=device)
a2 = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)
self.assertEqual(a1 * a2, torch.tensor([True, False, False, False], dtype=torch.bool, device=device))
if device == 'cpu':
a1 = torch.tensor([0.1, 0.1], dtype=torch.bfloat16, device=device)
a2 = torch.tensor([1.1, 0.1], dtype=torch.bfloat16, device=device)
self.assertEqual(a1 * a2, torch.tensor([0.11, 0.01], dtype=torch.bfloat16, device=device), atol=0.01, rtol=0)
self.assertEqual(a1.mul(a2), a1 * a2)
def test_bool_tensor_comparison_ops(self, device):
a = torch.tensor([True, False, True, False, True, False], dtype=torch.bool, device=device)
b = torch.tensor([True, False, True, True, True, True], dtype=torch.bool, device=device)
self.assertEqual(a == b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a != b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertEqual(a < b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertEqual(a > b, torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.bool, device=device))
self.assertEqual(a >= b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a <= b, torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.bool, device=device))
self.assertEqual(a > False, torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a == torch.tensor(True, dtype=torch.bool, device=device),
torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a == torch.tensor(0, dtype=torch.bool, device=device),
torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertFalse(a.equal(b))
@dtypes(*torch.testing.get_all_dtypes(include_complex=False))
def test_logical(self, device, dtype):
if dtype != torch.bool:
x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype)
b = torch.tensor([2], device=device, dtype=dtype)
self.assertEqual(x.lt(2), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(2), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(2), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(2), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(2), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(2), torch.tensor([True, False, True, True]))
self.assertEqual(x.lt(b), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(b), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(b), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(b), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(b), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(b), torch.tensor([True, False, True, True]))
else:
x = torch.tensor([True, False, True, False], device=device)
self.assertEqual(x.lt(True), torch.tensor([False, True, False, True]))
self.assertEqual(x.le(True), torch.tensor([True, True, True, True]))
self.assertEqual(x.ge(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.gt(True), torch.tensor([False, False, False, False]))
self.assertEqual(x.eq(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.ne(True), torch.tensor([False, True, False, True]))
def test_atan2(self, device):
def _test_atan2_with_size(size, device):
a = torch.rand(size=size, device=device, dtype=torch.double)
b = torch.rand(size=size, device=device, dtype=torch.double)
actual = a.atan2(b)
x = a.view(-1)
y = b.view(-1)
expected = torch.tensor([math.atan2(x[i].item(), y[i].item()) for i in range(x.numel())],
device=device, dtype=torch.double)
self.assertEqual(expected, actual.view(-1), rtol=0, atol=0.02)
_test_atan2_with_size((2, 2), device)
_test_atan2_with_size((3, 3), device)
_test_atan2_with_size((5, 5), device)
def test_atan2_edgecases(self, device):
def _test_atan2(x, y, expected, device, dtype):
expected_tensor = torch.tensor([expected], dtype=dtype, device=device)
x_tensor = torch.tensor([x], dtype=dtype, device=device)
y_tensor = torch.tensor([y], dtype=dtype, device=device)
actual = torch.atan2(y_tensor, x_tensor)
self.assertEqual(expected_tensor, actual, rtol=0, atol=0.02)
for dtype in [torch.float, torch.double]:
_test_atan2(0, 0, 0, device, dtype)
_test_atan2(0, 1, math.pi / 2, device, dtype)
_test_atan2(0, -1, math.pi / -2, device, dtype)
_test_atan2(-1, 0, math.pi, device, dtype)
_test_atan2(1, 0, 0, device, dtype)
_test_atan2(-1, -1, math.pi * -3 / 4 , device, dtype)
_test_atan2(1, 1, math.pi / 4 , device, dtype)
_test_atan2(1, -1, math.pi / -4 , device, dtype)
_test_atan2(-1, 1, math.pi * 3 / 4 , device, dtype)
def test_trapz(self, device):
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
actual = torch.trapz(t, dx=dx, dim=dim)
expected = np.trapz(t.cpu().numpy(), dx=dx, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.trapz(t, x=torch.tensor(x, device=device), dim=dim)
expected = np.trapz(t.cpu().numpy(), x=x, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual.cpu(), exact_dtype=False)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 0, [], device)
test_x((0, 2), 1, [1.0, 2.0], device)
with self.assertRaisesRegex(
IndexError,
'Dimension out of range'):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError,
'There must be one `x` value for each sample point'):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
@dtypes(torch.double)
def test_pow_scalar_overloads_mem_overlap(self, device, dtype):
sz = 3
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
self.check_internal_mem_overlap(
lambda t: t.pow_(42), 1, dtype, device)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(input, 42, out=out))
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(42, input, out=out))
@dtypes(*list(product(torch.testing.get_all_dtypes(include_bool=False),
torch.testing.get_all_dtypes(include_bool=False))))
def test_float_power(self, device, dtypes):
def to_np(value):
if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16:
return value.to(torch.float).cpu().numpy()
return value.cpu().numpy() if isinstance(value, torch.Tensor) else value
base_dtype = dtypes[0]
exp_dtype = dtypes[1]
out_dtype = torch.complex128 if base_dtype.is_complex or exp_dtype.is_complex else torch.float64
base = make_tensor((30,), device, base_dtype, low=1, high=100)
# Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0
# Related: https://github.com/pytorch/pytorch/issues/48000
# base[0] = base[3] = base[7] = 0
exp = make_tensor((30,), device, exp_dtype, low=-2, high=2)
exp[0] = exp[4] = exp[6] = 0
expected = torch.from_numpy(np.float_power(to_np(base), to_np(exp)))
exponents = [-2.8, -2, -1, -0.5, 0.5, 1, 2]
complex_exponents = exponents + [-2.5j, -1.0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]
for op in (torch.float_power, torch.Tensor.float_power, torch.Tensor.float_power_):
# Case of Tensor x Tensor
if op is torch.Tensor.float_power_ and base_dtype != out_dtype:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
op(base.clone(), exp)
else:
result = op(base.clone(), exp)
self.assertEqual(expected, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype)
op(base, exp, out=out)
self.assertEqual(expected, out)
for i in complex_exponents if exp_dtype.is_complex else exponents:
out_dtype_scalar_exp = torch.complex128 if base_dtype.is_complex or type(i) == complex else torch.float64
expected_scalar_exp = torch.from_numpy(np.float_power(to_np(base), i))
if op is torch.Tensor.float_power_ and base_dtype != out_dtype_scalar_exp:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
op(base.clone(), i)
else:
result = op(base.clone(), i)
self.assertEqual(expected_scalar_exp, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype_scalar_exp)
op(base, i, out=out)
self.assertEqual(expected_scalar_exp, out)
# Case of Scalar x Tensor
for i in complex_exponents if base_dtype.is_complex else exponents:
out_dtype_scalar_base = torch.complex128 if exp_dtype.is_complex or type(i) == complex else torch.float64
expected_scalar_base = torch.from_numpy(np.float_power(i, to_np(exp)))
result = torch.float_power(i, exp)
self.assertEqual(expected_scalar_base, result)
out = torch.empty_like(exp).to(device=device, dtype=out_dtype_scalar_base)
torch.float_power(i, exp, out=out)
self.assertEqual(expected_scalar_base, out)
def test_float_power_exceptions(self, device):
def _promo_helper(x, y):
for i in (x, y):
if type(i) == complex:
return torch.complex128
elif type(i) == torch.Tensor and i.is_complex():
return torch.complex128
return torch.double
test_cases = ((torch.tensor([-2, -1, 0, 1, 2], device=device), -.25),
(torch.tensor([-1.0j, 0j, 1.0j, 1.0 + 1.0j, -1.0 - 1.5j], device=device), 2.))
for base, exp in test_cases:
for out_dtype in (torch.long, torch.float, torch.double, torch.cdouble):
out = torch.empty(1, device=device, dtype=out_dtype)
required_dtype = _promo_helper(base, exp)
if out.dtype == required_dtype:
torch.float_power(base, exp, out=out)
else:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
torch.float_power(base, exp, out=out)
if base.dtype == required_dtype:
torch.Tensor.float_power_(base.clone(), exp)
else:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
torch.Tensor.float_power_(base.clone(), exp)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False),
torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False)))
def test_xlogy_xlog1py(self, device, dtypes):
x_dtype, y_dtype = dtypes
def out_variant_helper(torch_fn, x, y):
expected = torch_fn(x, y)
out = torch.empty_like(expected)
torch_fn(x, y, out=out)
self.assertEqual(expected, out)
def xlogy_inplace_variant_helper(x, y):
if x.dtype in torch.testing.get_all_int_dtypes() + [torch.bool]:
with self.assertRaisesRegex(RuntimeError,
"can't be cast to the desired output type"):
x.clone().xlogy_(y)
else:
expected = torch.empty_like(x)
torch.xlogy(x, y, out=expected)
inplace_out = x.clone().xlogy_(y)
self.assertEqual(expected, inplace_out)
def test_helper(torch_fn, reference_fn, inputs, scalar=None):
x, y, z = inputs
torch_fn_partial = partial(torch_fn, x)
reference_fn_partial = partial(reference_fn, x.cpu().numpy())
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, x, exact_dtype=False)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, y, exact_dtype=False)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, z, exact_dtype=False)
val = scalar if scalar is not None else x
out_variant_helper(torch_fn, val, x)
out_variant_helper(torch_fn, val, y)
out_variant_helper(torch_fn, val, z)
x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)
z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)
x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.5, high=1000)
y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.5, high=1000)
z_1p = make_tensor((4, 5), device, y_dtype, low=-0.5, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
test_helper(*xlogy_fns, (x, y, z))
xlogy_inplace_variant_helper(x, x)
xlogy_inplace_variant_helper(x, y)
xlogy_inplace_variant_helper(x, z)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p))
test_helper(*xlogy_fns, (x, y, z), 3.14)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p), 3.14)
t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)
zeros = torch.zeros(7, dtype=y_dtype, device=device)
def test_zeros_special_helper(torch_fn, reference_fn, scalar=False):
zeros_t = 0 if scalar else zeros
zeros_np = 0 if scalar else zeros.cpu().numpy()
torch_fn_partial = partial(torch_fn, zeros_t)
reference_fn_partial = partial(reference_fn, zeros_np)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, t, exact_dtype=False)
out_variant_helper(torch_fn, zeros_t, t)
test_zeros_special_helper(*xlogy_fns)
xlogy_inplace_variant_helper(zeros, t)
test_zeros_special_helper(*xlog1py_fns)
test_zeros_special_helper(*xlogy_fns, scalar=True)
test_zeros_special_helper(*xlog1py_fns, scalar=True)
def test_xlogy_xlog1py_scalar_type_promotion(self, device):
# priority level as 0-dim tensors
t = torch.randn((), dtype=torch.float32, device=device)
self.assertEqual(t.dtype, torch.xlogy(t, 5).dtype)
self.assertEqual(t.dtype, torch.xlogy(t, 5.).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5.).dtype)
self.assertEqual(t.dtype, torch.xlogy(5, t).dtype)
self.assertEqual(t.dtype, torch.xlogy(5., t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5., t).dtype)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_xlogy_xlog1py_bfloat16(self, device):
def _compare_helper(x, y, torch_fn, reference_fn):
x_np = x if isinstance(x, float) else x.cpu().to(torch.float).numpy()
y_np = y if isinstance(y, float) else y.cpu().to(torch.float).numpy()
expected = torch.from_numpy(reference_fn(x_np, y_np))
actual = torch_fn(x, y)
self.assertEqual(expected, actual, exact_dtype=False)
x_dtype, y_dtype = torch.bfloat16, torch.bfloat16
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)
z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)
x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.8, high=1000)
y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.8, high=1000)
z_1p = make_tensor((4, 5), device, y_dtype, low=-0.8, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
_compare_helper(x, x, *xlogy_fns)
_compare_helper(x, y, *xlogy_fns)
_compare_helper(x, z, *xlogy_fns)
_compare_helper(x, 3.14, *xlogy_fns)
_compare_helper(y, 3.14, *xlogy_fns)
_compare_helper(z, 3.14, *xlogy_fns)
_compare_helper(x_1p, x_1p, *xlog1py_fns)
_compare_helper(x_1p, y_1p, *xlog1py_fns)
_compare_helper(x_1p, z_1p, *xlog1py_fns)
_compare_helper(x_1p, 3.14, *xlog1py_fns)
_compare_helper(y_1p, 3.14, *xlog1py_fns)
_compare_helper(z_1p, 3.14, *xlog1py_fns)
# Special Values Tensor-Tensor
t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)
zeros = torch.tensor(7, dtype=y_dtype, device=device)
_compare_helper(t, zeros, *xlogy_fns)
_compare_helper(t, 0., *xlogy_fns)
_compare_helper(t, zeros, *xlog1py_fns)
_compare_helper(t, 0., *xlog1py_fns)
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False,
include_half=False, include_bfloat16=False),
torch.testing.get_all_dtypes(include_complex=False,
include_half=False, include_bfloat16=False)))
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_zeta(self, device, dtypes):
x_dtype, q_dtype = dtypes
def test_helper(x, q):
x_np = x if isinstance(x, float) else x.cpu().numpy()
q_np = q if isinstance(q, float) else q.cpu().numpy()
expected = torch.from_numpy(scipy.special.zeta(x_np, q_np))
actual = torch.special.zeta(x, q)
rtol, atol = None, None
if self.device_type == 'cpu':
rtol, atol = 1e-6, 1e-6
self.assertEqual(expected, actual, rtol=rtol, atol=atol, exact_dtype=False)
# x tensor - q tensor same size
x = make_tensor((2, 3, 4), device, x_dtype)
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast lhs
x = make_tensor((2, 1, 4), device, x_dtype)
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast rhs
x = make_tensor((2, 3, 4), device, x_dtype)
q = make_tensor((2, 1, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast all
x = make_tensor((2, 3, 1), device, x_dtype)
q = make_tensor((2, 1, 4), device, q_dtype)
test_helper(x, q)
# x scalar - q tensor
for x in np.linspace(-5, 5, num=10).tolist():
if not q_dtype.is_floating_point:
q_dtype = torch.get_default_dtype()
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q scalar
for q in np.linspace(-5, 5, num=10).tolist():
if not x_dtype.is_floating_point:
x_dtype = torch.get_default_dtype()
x = make_tensor((2, 3, 4), device, x_dtype)
test_helper(x, q)
tensor_binary_ops = [
'__lt__', '__le__',
'__gt__', '__ge__',
'__eq__', '__ne__',
'__add__', '__radd__', '__iadd__',
'__sub__', '__rsub__', '__isub__',
'__mul__', '__rmul__', '__imul__',
'__matmul__', '__rmatmul__',
'__truediv__', '__rtruediv__', '__itruediv__',
'__floordiv__', '__rfloordiv__', '__ifloordiv__',
'__mod__', '__rmod__', '__imod__',
'__pow__', '__rpow__', '__ipow__',
'__lshift__', '__rlshift__', '__ilshift__',
'__rshift__', '__rrshift__', '__irshift__',
'__and__', '__iand__',
'__xor__', '__ixor__',
'__or__', '__ior__',
# Unsupported operators
# '__imatmul__',
# '__divmod__', '__rdivmod__', '__idivmod__',
# '__rand__', '__ror__', '__rxor__',
]
# Test that binary math operations return NotImplemented for unknown types.
def generate_not_implemented_tests(cls):
class UnknownType:
pass
# TODO: refactor to inline these
_types = [
torch.half, torch.float, torch.double,
torch.int8, torch.short, torch.int, torch.long,
torch.uint8
]
# TODO: refactor to use make_tensor
def _small_2d(dtype, device, has_zeros=True, fill_ones=False, oneish=False):
t = _make_tensor((5, 5), dtype, device, fill_ones=fill_ones)
if oneish:
return t.clamp(min=_number(.99, 1, dtype), max=1.01)
if not has_zeros:
return t.clamp(min=(_number(_div_min, 1, dtype)))
return t
def create_test_func(op):
@dtypes(*_types)
def test(self, device, dtype):
# Generate the inputs
tensor = _small_2d(dtype, device)
# Runs the tensor op on the device
result = getattr(tensor, op)(UnknownType())
self.assertEqual(result, NotImplemented)
return test
for op in tensor_binary_ops:
test_name = "test_{}_not_implemented".format(op)
assert not hasattr(cls, test_name), "{0} already in {1}".format(
test_name, cls.__name__)
setattr(cls, test_name, create_test_func(op))
generate_not_implemented_tests(TestBinaryUfuncs)
instantiate_device_type_tests(TestBinaryUfuncs, globals())
if __name__ == '__main__':
run_tests()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.