index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
75,895 | Cogitus/Est-dios-Mapinguari | refs/heads/master | /estudiosMapinguari/siteEM/migrations/0003_auto_20190719_1437.py | # Generated by Django 2.2.3 on 2019-07-19 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteEM', '0002_auto_20190718_2242'),
]
operations = [
migrations.AddField(
model_name='retorno',
name='nome',
field=models.CharField(default='', max_length=500),
),
migrations.AlterField(
model_name='session',
name='password',
field=models.CharField(default='', max_length=20),
),
migrations.AlterField(
model_name='session',
name='user',
field=models.CharField(default='', max_length=20),
),
]
| {"/estudiosMapinguari/siteEM/forms.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/views.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/admin.py": ["/estudiosMapinguari/siteEM/models.py"]} |
75,896 | Cogitus/Est-dios-Mapinguari | refs/heads/master | /estudiosMapinguari/siteEM/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.upload, name='inicial'),
path('analisar/', views.analisar, name='analisar'),
] | {"/estudiosMapinguari/siteEM/forms.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/views.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/admin.py": ["/estudiosMapinguari/siteEM/models.py"]} |
75,897 | Cogitus/Est-dios-Mapinguari | refs/heads/master | /estudiosMapinguari/siteEM/migrations/0005_dados_nome.py | # Generated by Django 2.2.3 on 2019-07-20 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteEM', '0004_auto_20190719_1551'),
]
operations = [
migrations.AddField(
model_name='dados',
name='nome',
field=models.CharField(default='', max_length=200),
),
]
| {"/estudiosMapinguari/siteEM/forms.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/views.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/admin.py": ["/estudiosMapinguari/siteEM/models.py"]} |
75,898 | Cogitus/Est-dios-Mapinguari | refs/heads/master | /estudiosMapinguari/siteEM/views.py | import io
import pandas as pd
from django_pandas.io import read_frame
from .models import Session, Dados, Retorno
from django.conf import settings
from reportlab.pdfgen import canvas
from django.http import FileResponse
from django.shortcuts import render_to_response
from django.shortcuts import render, get_object_or_404
from django.core.files.storage import FileSystemStorage
posicao = [-1]
nome = ''
def analisar(request):
pos = posicao
tabDifAbs, tabDifPer, lisRepAbs, lisRepPerc = gera_tabelas(pos[0].data)
return render(request, 'siteEM/analise.html', {'pos' : pos[0].data.name, 'tda' : tabDifAbs, 'tdp' : tabDifPer, 'lra' : lisRepAbs, 'lrp' : lisRepPerc})
def upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
nome = myfile.name
dt = Dados.objects.create(data = myfile);
posicao[0] = Dados.objects.all()[len(Dados.objects.all()) - 1]
return render(request, 'siteEM/inicial.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'siteEM/inicial.html')
def gera_tabelas(uri):
dados = pd.read_csv(uri) #lê a tabela (a primeira linha do arquivo csv JÁ VAI SER o cabeçalho)
dados.dropna(inplace=True) #apaga as linhas com NaN
dados.reset_index(drop=True, inplace=True) #ajeitando os índices
qtdEmp = dados.shape[1] - 4 #número de colunas excluindo as 4 primeiras
dados.iloc[:,4] = dados.iloc[:,4].astype(float) #convertemos pra float, pois essa coluna não estava sendo identificada (type = object) (é o preço de referência)
lista_series_empresas = list(dados.columns.values[4:]) #A partir da quinta coluna ou quarto índice (começa em 0) temos as empresas e o preço de referência (primeiro termo)
copia_lista_series_empresas = lista_series_empresas.copy() #copia para poder utilizar futuramente o método list.remove()
#criando as tabelas de diferenças
tabela_diff_abs_empresas = pd.DataFrame()
tabela_diff_perc_empresas = pd.DataFrame()
for nome_empresa in lista_series_empresas: #interando em cima dos nomes das empresas
#remove o primeiro elemento da LISTA
copia_lista_series_empresas.remove(nome_empresa)
for nome_empresa2 in copia_lista_series_empresas:
#gerando as colunas de diferenças absolutas
coluna_abs = dados[nome_empresa] - dados[nome_empresa2]
titulo_coluna_abs = f'{nome_empresa}_{nome_empresa2}'
#gerando as colunas de diferenças relativas
coluna_perc = 100*abs((dados[nome_empresa] - dados[nome_empresa2])/dados[nome_empresa])
titulo_coluna_perc = f'{nome_empresa}_{nome_empresa2}'
tabela_diff_abs_empresas[titulo_coluna_abs] = coluna_abs
tabela_diff_perc_empresas[titulo_coluna_perc] = coluna_perc
#gerando duas listas que recebem as séries oriundas das contagens de repetições (lista de série)
lista_repeticoes_abs = list()
lista_repeticoes_perc = list()
#precisamos dos nomes das colunas do NOVO dataframe pra poder iterar sobre os dataframes tabela_diff_abs_empresas
#e tabela_diff_perc_empresas
colunas_dataframe_abs = list(tabela_diff_abs_empresas.columns.values) #nomes das colunas da tabela_diff_abs_empresas
colunas_dataframe_perc = list(tabela_diff_perc_empresas.columns.values) #nomes das colunas da tabela_diff_perc_empresas
#construindo as listas de contagens de repetições
#ambos dataframes têm o mesmmo nome para as colunas, logo, iterando sobre qualquer dataframe dá no mesmo
for titulo in colunas_dataframe_abs:
# aqui geraremos as contagens das repetições que existem na coluna nomeada com 'titulo'
# pro dataframe tabela_diff_perc_empresas e tabela_diff_abs_empresas.
contagem_abs = tabela_diff_abs_empresas.groupby(tabela_diff_abs_empresas[titulo].map(lambda x: "%.2f" % x)).size()
contagem_perc = tabela_diff_perc_empresas.groupby(tabela_diff_perc_empresas[titulo].map(lambda x: "%.2f" % x)).size()
#adicionando as contagens nas suas respectivas listas de contagem
lista_repeticoes_abs.append(contagem_abs)
lista_repeticoes_perc.append(contagem_perc)
return tabela_diff_abs_empresas, tabela_diff_perc_empresas, lista_repeticoes_abs, lista_repeticoes_perc | {"/estudiosMapinguari/siteEM/forms.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/views.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/admin.py": ["/estudiosMapinguari/siteEM/models.py"]} |
75,899 | Cogitus/Est-dios-Mapinguari | refs/heads/master | /estudiosMapinguari/siteEM/admin.py | from django.contrib import admin
from .models import Session
from .models import Dados
from .models import Retorno
admin.site.register(Session)
admin.site.register(Dados)
admin.site.register(Retorno) | {"/estudiosMapinguari/siteEM/forms.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/views.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/admin.py": ["/estudiosMapinguari/siteEM/models.py"]} |
75,900 | Cogitus/Est-dios-Mapinguari | refs/heads/master | /estudiosMapinguari/siteEM/migrations/0004_auto_20190719_1551.py | # Generated by Django 2.2.3 on 2019-07-19 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteEM', '0003_auto_20190719_1437'),
]
operations = [
migrations.AddField(
model_name='dados',
name='data',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='retorno',
name='mapaFreq',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='retorno',
name='mapaRisco',
field=models.FileField(default='', upload_to=''),
),
]
| {"/estudiosMapinguari/siteEM/forms.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/views.py": ["/estudiosMapinguari/siteEM/models.py"], "/estudiosMapinguari/siteEM/admin.py": ["/estudiosMapinguari/siteEM/models.py"]} |
75,951 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/weak.py | """Mix-in providing weakref-based storage for properties"""
import weakref
from basicproperty import basic
class WeakStore( object ):
"""Mix-in providing weak-references to held objects"""
def _getValue( self, client ):
"""Perform a low-level retrieval of the "raw" value for the client
"""
base = super( WeakStore, self )._getValue( client )
if isinstance( base, weakref.ReferenceType):
base = base()
if base is None:
self._delValue( client )
raise AttributeError( """Property %r on object %r references an object which has been garbage collected"""%(
self.name,
client,
))
return base
def _setValue( self, client, value ):
"""Perform a low-level set of the "raw" value for the client
"""
try:
value = weakref.ref( value )
except TypeError:
raise TypeError( """Attempted to set non weak-referencable object %r for property %r of object %r"""%(
value, self.name, client,
))
return super( WeakStore, self)._setValue( client, value )
class WeakProperty( WeakStore, basic.BasicProperty ):
"""Weak-referencing version of BasicProperty"""
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,952 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_linearise.py | from basicproperty import linearise
if __name__ == "__main__":
from pytable import dbschema
#Lineariser.registerHelper( dbschema.TableSchemas, SequenceLin() )
class _x:
def __getinitargs__( self ):
return (1,2,3)
class _y:
def __getstate__( self ):
return {'a':'b',1:3}
class _z:
pass
x = _x()
y = _y()
z = _z()
z.this = 'that'
q = dbschema.DatabaseSchema(
name="this", tables=[
dbschema.TableSchema( name = "blah" ),
],
)
print linearise.linearise( [
2,3,4,
"this",
(4,5,6),
{'a':4,2:[],'8':{}},
('a',23L,23.4),
4j,
linearise.Lineariser,
linearise.saxutils.XMLGenerator,
linearise.Lineariser,
float,
linearise.linearise,
x,y,z,
q,
None,
])
from cinemon import tableschema
result = linearise.linearise( tableschema.schema )
open('v:\\cinemon\\table_schema.xml','w').write( result )
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,953 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/__init__.py | """Testing package for basicproperty
"""
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,954 | eshikvtumane/basicproperty | refs/heads/master | /basictypes/vfs/test_filepath.py | import unittest, os
from basictypes.vfs import path
class NTFilePathTests(unittest.TestCase):
"""Note: these tests are Windows-specific"""
def testRepr( self ):
"""Test representation is as expected"""
value = path( "test" )
result = repr( value )
expected = "%s(%s)"%( value.__class__.__name__, repr(str(value)))
self.failUnlessEqual( result, expected, """Got:\n%s\t\nExpected:\n\t%s"""% (result, expected))
def testSimpleRelative( self ):
"""Test a simple relative path for basic operations"""
fileName = "test.tmp"
testFile = path(fileName)
assert testFile == fileName,"""Path did not match an equal string"""
assert str(testFile) == fileName,"""str(Path) did not match an equal string"""
assert not testFile.isAbsolute(),"""relative path declared itself absolute"""
assert not testFile.isRoot(),"""non-root path declared itself root"""
assert testFile.baseOnly(),"""base file system path declared itself non-base"""
open( fileName,'w' ).write( 'ha' )
assert testFile.exists(),"""could not create file in current working directory, or exists method failure"""
assert testFile.parent() == os.getcwd(),"""file created in current working directory does not report current working directory as parent"""
assert testFile.size() == 2,"""file with two bytes written reports size other than 2, possible bug in size (or data writing)"""
open( testFile, 'w').write( 'ham' )
assert testFile.exists(),"""could not create file in current working directory, or exists method failure"""
assert testFile.parent() == os.getcwd(),"""file created in current working directory does not report current working directory as parent"""
assert testFile.size() == 3,"""file with 3 bytes written reports size other than 3, possible bug in size (or data writing)"""
def testFullySpecified (self):
"""Test a fully specified file path"""
# now a fully-specified path
fileName = "c:\\test.tmp"
testFile = path(fileName)
assert testFile == fileName,"""Path did not match an equal string"""
assert str(testFile) == fileName,"""str(Path) did not match an equal string"""
assert testFile.isAbsolute(),"""absolute path declared itself relative"""
assert not testFile.isRoot(),"""root path declared itself non-root"""
assert testFile.baseOnly(),"""base file system path declared itself non-base"""
result = testFile.parent()
assert result == "c:\\","""parent reported as %s"""% (result)
assert result == path( "c:\\" ),"""parent reported as %s"""% (result)
assert testFile.isChild( "c:\\" )
assert testFile.drive() == 'c:\\', "got %s"%( repr(testFile.drive()))
assert testFile.root() == 'c:\\'
assert path( "c:\\" ).isParent( testFile )
def testRoot(self):
"""Test a root for the file system"""
# test a real root
roots = [path("c:\\"), path( r"\\Raistlin\c")]
for root in roots:
assert root.isRoot()
assert root.parent() == None
assert root.root() == root
assert path("c:\\").unc() == None
assert path("c:\\").drive() == "c:\\"
assert path("c:\\temp").drive() == "c:\\"
assert path("c:\\temp").root() == "c:\\"
assert path("c:\\temp").drive()[-1] == os.sep
assert path(r"\\Raistlin\c").drive() == None
assert path(r"\\Raistlin\c").unc() == r"\\raistlin\c"
assert path(r"\\Raistlin\c").parent() == None
assert path(r"\\Raistlin\c").root() == r"\\raistlin\c"
def testFragments (self):
"""Test ability to break paths into fragments"""
assert path(
"p:\\temp\\this\\that\\thos\\test.tmp"
).fragments() == [ 'p:\\', 'temp','this','that','thos','test.tmp']
assert path(
"c:"
).fragments() == [ 'c:\\']
assert path(
"p:\\temp\\this\\that\\thos\\test.tmp"
).parents() == [
'p:\\',
'p:\\temp',
'p:\\temp\\this',
'p:\\temp\\this\\that',
'p:\\temp\\this\\that\\thos',
], "Got: %s"%( path(
"p:\\temp\\this\\that\\thos\\test.tmp"
).parents() )
def testWalk (self):
"""Test three-method walking functions"""
dir = path(
"p:\\temp"
)
assert dir.join('test') == "p:\\temp\\test"
result = []
# need a testing directory for this to be automated...
dir.walk( result.append )
def testFileDirectoryOperations (self):
"""Test file and/or directory-specific operations"""
dir = path(
"p:\\temp\\this\\that"
)
try:
dir.walk( file=os.remove, post= os.rmdir)
except OSError, err:
print 'failed to remove', err
dir.createDirectory( mode = 0777 )
f = dir+'test.txt'
f.open('w').write( 'testing' )
assert f.exists()
assert f.open('r').read() == 'testing'
def getSuite():
return unittest.makeSuite(NTFilePathTests,'test')
if __name__ == "__main__":
unittest.main(defaultTest="getSuite")
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,955 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/standard/fileproperty.py | """XXX Lots left to do here
* Create a more efficient way of specifying the various boundaries
* Support for saving/restoring open file connections
* Support for defining UI-specific values in the property
* Should overwrite silently?
* Should append extension if doesn't match a glob
* Extensions/types for use in selection dialogs
* Allow creation of intermediate directories (require parent to exist or not)
* Should have basic support in form of a "Path" class for
Interacting with the paths in an object-oriented way
* Support for standard os and os.path queries
* listdir
* isfile/dir/symlink
* create sub-directory
* join-with-string to get sub-path
* absolute-path -> Path object
* support .. and . directory resolution (and elimination)
* Mime-types
* ftype and assoc values
* comparisons
* parent, child -> boolean
* shared path -> fragments
* shared root -> boolean
* open( *, ** )
* for directories, create/open file based on standard file() call
* for zipfile-embedded paths, use zipfile transparently to create/open sub-file
* for directories, create the directory (possibly recursively)
* for files, error normally, possibly useful for things
like zipfiles and db-interface files which want to provide
directory-like interface
* file( name ) -> Path
* create a sub-file path
* sub( name ) -> Path
* create a sub-directory path
* newfile( name, *, **)
* newsub( name, *, ** )
* create a new sub-directory Path object
* for real directories, also
* Eventually support zipfiles as Directory paths
"""
from basicproperty import basic
from basictypes.vfs import filepath
import os
class MustExistBoundary( boundary.Boundary ):
"""Require that a path exist"""
def __call__(self, value, property, client):
"""Check value against boundary conditions"""
testValue = normalise(value)
if not os.path.exists( testValue ):
raise boundary.BoundaryValueError(
property, self, client, value,
"""This property requires an already-existing path.
The path specified: %s
Resolves to: %s
Which does not appear to exist."""%( repr(value), repr(testValue))
)
class MustNotExistBoundary( boundary.Boundary ):
"""Require that a path not exist"""
def __call__(self, value, property, client):
"""Check value against boundary conditions"""
testValue = normalise(value)
if os.path.exists( testValue ):
raise boundary.BoundaryValueError(
property, self, client, value,
"""This property requires a path which doesn't currently exist
The path specified: %s
Resolves to: %s
Which does not appear to exist."""%( repr(value), repr(testValue))
)
class IsFileBoundary( boundary.Boundary ):
"""Require that the path, if it points to anything, points to a file"""
def __call__(self, value, property, client):
"""Check value against boundary conditions"""
testValue = normalise(value)
if os.path.exists( testValue ) and not os.path.isfile(testValue):
raise boundary.BoundaryValueError(
property, self, client, value,
"""This property can only point to a file, not diretories or symbolic links
The path specified: %s
Resolves to: %s
Which, though it exists, does not appear to be a file."""%( repr(value), repr(testValue))
)
class IsDirBoundary( boundary.Boundary ):
"""Require that the path, if it points to anything, points to a directory"""
def __call__(self, value, property, client):
"""Check value against boundary conditions"""
testValue = normalise(value)
if os.path.exists( testValue ) and not os.path.isdir(testValue):
raise boundary.BoundaryValueError(
property, self, client, value,
"""This property can only point to a directory, not files or symbolic links
The path specified: %s
Resolves to: %s
Which, though it exists, does not appear to be a directory."""%( repr(value), repr(testValue))
)
class GlobBoundary( boundary.Boundary ):
"""Require that the path match a glob specification (fnmatch)"""
def __init__(self, specifier="*.*"):
"""Initialize the boundary with the glob/fnmatch specifier
"""
self.specifier= specifier
def __call__(self, value, property, client):
"""Check value against boundary conditions"""
import fnmatch
if not fnmatch.fnmatch( value, self.specifier ):
raise boundary.BoundaryValueError(
property, self, client, value,
"""This property must match the shell-pattern %s
The path specified: %s
does not match this pattern."""%( repr(value), )
)
class PathProperty( basic.BasicProperty ):
"""Representation of a path as a property"""
baseType = filepath.FilePath
class FilePathProperty( PathProperty ):
"""Representation of a file path as a property"""
boundaries = [ IsFileBoundary() ]
class DirPathProperty( PathProperty ):
"""Representation of a directory path as a property"""
boundaries = [ IsFileBoundary() ]
def normalise( path ):
"""Normalise path (and make it absolute"""
try:
return os.path.normcase(
os.path.normpath(
os.path.expanduser(
os.path.expandvars(
path
)
)
)
)
except:
return path
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,956 | eshikvtumane/basicproperty | refs/heads/master | /setup.py | #!/usr/bin/env python
"""Installs properties using distutils
Run:
python setup.py install
to install the packages from the source archive.
"""
import os, sys
from setuptools import setup, find_packages, Extension
if __name__ == "__main__":
from sys import hexversion
if hexversion >= 0x2030000:
# work around distutils complaints under Python 2.2.x
extraArguments = {
'classifiers': [
"""License :: OSI Approved :: BSD License""",
"""Programming Language :: Python""",
"""Topic :: Software Development :: Libraries :: Python Modules""",
"""Intended Audience :: Developers""",
],
'download_url': "https://sourceforge.net/project/showfiles.php?group_id=87034",
'keywords': 'descriptor,property,basicproperty,coerce,propertied,enumeration',
'long_description' : """Core data-types and property classes
BasicProperty and BasicTypes provide the core datatypes for
both wxoo and the PyTable RDBMS Wrapper project.
""",
'platforms': ['Any'],
}
else:
extraArguments = {
}
setup (
name = "basicproperty",
version = "0.6.12a",
description = "Core data-types and property classes",
author = "Mike C. Fletcher",
author_email = "mcfletch@users.sourceforge.net",
url = "http://basicproperty.sourceforge.net/",
license = "BSD-style, see license.txt for details",
packages = find_packages(),
include_package_data = True,
zip_safe = False,
ext_modules=[
Extension("basicproperty.propertyaccel", [
os.path.join(
'basicproperty', 'accellerate', "propertyaccel.c"
)
]),
],
**extraArguments
)
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,957 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_date.py | import unittest, time
from basicproperty import common
from basictypes import date_types
class TestClass( object ):
dt = common.DateTimeProperty(
"dt", "documentation",
defaultFunction = date_types.now
)
dtd = common.DateTimeDeltaProperty(
"dtd", "documentation",
defaultFunction = date_types.DateTimeDelta
)
tod = common.TimeOfDayProperty(
"tod", "documentation",
defaultFunction = date_types.TimeOfDay
)
from mx.DateTime import RelativeDateTime
t1 = time.mktime((2003, 2, 23, 0, 0, 0, 0, 0, 0))
DATATABLE = [ # attribute, testTable, badValues
('tod', [
(
RelativeDateTime( hour = 2, minute = 3, second = 0.5),
[
RelativeDateTime( hour = 2, minute = 3, second = 0.5),
RelativeDateTime( months = 3, days = 30, hour = 2, minute = 3, second = 0.5),
u"2:3:.5",
u"02:03:0.5",
u"2:3:.5am",
(2,3,0,500),
[2,3,0,500],
],
),
(
RelativeDateTime( hour = 14, minute = 0, second = 0),
[
RelativeDateTime( hour = 14, minute = 0, second = 0),
RelativeDateTime( months = 3, days = 30, hour = 14, minute = 0, second = 0),
u"14",
u"14:0:0",
u"14:00",
u"14:00pm",
u"2pm",
u"2 PM",
"14",
(14,0,0),
(14,),
(14,0,0,0),
14,
14L,
14.0,
],
),
(
RelativeDateTime( hour = 14, minute = 30, second=0),
[
14.5,
],
),
(
RelativeDateTime( hour = 0, minute = 30, second=0),
[
'24:30',
'48:30',
],
),
(
RelativeDateTime( hour = 0, minute = 0, second=0),
[
'24:00',
'12am',
'0am',
],
),
(
RelativeDateTime( hour = 12, minute = 0, second=0),
[
'0pm',
'12pm',
],
),
], [ # (bad value, error expected)
("a", ValueError),
(None, TypeError),
("am", ValueError),
]),
('dt', [
(
date_types.DateTime( "2003-02-23" ),
[
date_types.DateTime( "2003-02-23" ),
'2003-02-23',
u'2003-02-23',
time.localtime(t1),
t1,
]
),
(
date_types.DateTime( "02/23" ),
[
'Feb 23rd',
'February 23',
'02/23',
'feb 23',
]
),
(
date_types.DateTime( "2003-02-23" ),
[
'Feb 23, 2003',
'February 23, 2003',
'Sunday February 23, 2003',
'2003-02-23',
'feb 23, 2003',
'2003-02-23',
]
),
], [ # (bad value, error expected)
## ("2", ValueError), # ambiguous, improper formatting
]),
('dtd', [
(
date_types.DateTimeDelta( 1, 2, 3, 4.0 ),
[
date_types.DateTimeDelta( 1, 2, 3, 4.0 ),
"1 day, 2 hours, 3 minutes and 4 seconds",
u"1 day, 2 hours, 3 minutes and 4 seconds",
"1d2h3m4s",
]
),
(
- date_types.DateTimeDelta( 0, 2, 3, 4.0 ),
[
"-2:3:4.0",
u"-2:3:4.0",
"-2:3:4",
(0, -2,-3,-4.0),
]
),
(
date_types.DateTimeDelta( 0, 2, 3, 4.0 ),
[
"2:3:4.0",
u"2:3:4.0",
"2:3:4",
(0, 2,3,4.0),
]
),
(
date_types.DateTimeDelta( 0, 2, 3, 4.34 ),
[
"2:3:4.34",
u"2:3:4.34",
"2:3:4.34",
(0, 2,3,4.34),
]
),
], [ # (bad value, error expected)
]),
]
if hasattr( common, 'DateTimeDTProperty' ):
from basictypes import datedatetime_types as ddt
TestClass.dt2 = common.DateTimeDTProperty(
'dt2', """Tests the datetime implementation""",
)
DATATABLE.append(
('dt2', [
(
ddt.DateTime( "2003-02-23" ),
[
date_types.DateTime( "2003-02-23" ),
'2003-02-23',
u'2003-02-23',
time.localtime(t1),
t1,
]
),
(
date_types.DateTime( "02/23" ),
[
'Feb 23rd',
'February 23',
'02/23',
'feb 23',
]
),
(
date_types.DateTime( "2003-02-23" ),
[
'Feb 23, 2003',
'February 23, 2003',
'Sunday February 23, 2003',
'2003-02-23',
'feb 23, 2003',
'2003-02-23',
]
),
], [ # (bad value, error expected)
## ("2", ValueError), # ambiguous, improper formatting
])
)
class DTPropertyTest( unittest.TestCase ):
def testCoerce( self ):
"""Test the whole DATATABLE set for coercion"""
object = TestClass()
for attribute, table, bad in DATATABLE:
for value, set in table:
for v in set:
setattr( object, attribute, v)
got = getattr( object, attribute )
assert got == value, """COERCE(%s): %r -> %r failed, got %r"""%( attribute, v, value, got)
for value, err in bad:
try:
setattr(object, attribute, value)
except err:
continue
except Exception, e:
raise TypeError( """Wrong exc: set %(attribute)r to %(value)r\nExpected: %(err)s\nGot: %(e)s"""%(locals()))
else:
result = getattr( object, attribute)
import pdb
pdb.set_trace()
setattr(object, attribute, value)
raise TypeError( """No exc: set %(attribute)r to %(value)r\nExpected: %(err)r %(err)s\nValue:%(result)s"""%(locals()))
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,958 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/common.py | """Commonly used property types
Most of the property types here are simply using the
basictypes.basic_types type stand-ins to provide the
class interfaces which user-defined classes would
normally define directly.
"""
from basicproperty import basic, defaults
from basictypes import basic_types, list_types, typeunion, decimaldt
BasicProperty = basic.BasicProperty
class BooleanProperty(basic.BasicProperty):
"""Boolean property"""
baseType = basic_types.Boolean_DT
class IntegerProperty(basic.BasicProperty):
"""Integer property"""
baseType = basic_types.Int_DT
class LongProperty(basic.BasicProperty):
"""Long/large integer property"""
baseType = basic_types.Long_DT
class FloatProperty(basic.BasicProperty):
"""Floating-point property"""
baseType = basic_types.Float_DT
if decimaldt.decimal:
class DecimalProperty(basic.BasicProperty):
"""Decimal property"""
baseType = decimaldt.DecimalDT
class StringProperty(basic.BasicProperty):
"""Human-friendly (unicode) text property"""
baseType = basic_types.String_DT
class StringLocaleProperty(basic.BasicProperty):
"""Byte-string (locale-specific) property
Normally used for storing low-level types such
as module or file names which are not unicode-
aware.
"""
baseType = basic_types.StringLocale_DT
class ClassNameProperty(basic.BasicProperty):
"""Class-name property"""
baseType = basic_types.ClassName_DT
class ClassProperty(basic.BasicProperty):
"""Class property"""
baseType = basic_types.Class_DT
ClassByNameProperty = ClassProperty
def _defaultList( property, client ):
"""Get a default list value for list-type properties"""
base = property.getBaseType()
if issubclass( base, list ):
return base()
return []
class ListProperty(basic.BasicProperty):
"""Generic list property"""
baseType = basic_types.List_DT
default = defaults.DefaultCallable( _defaultList )
class BooleansProperty(basic.BasicProperty):
"""Booleans list property"""
baseType = list_types.listof_bools
default = defaults.DefaultCallable( _defaultList )
class IntegersProperty(basic.BasicProperty):
"""Ints list property"""
baseType = list_types.listof_ints
default = defaults.DefaultCallable( _defaultList )
class LongsProperty(basic.BasicProperty):
"""Longs list property"""
baseType = list_types.listof_longs
default = defaults.DefaultCallable( _defaultList )
class FloatsProperty(basic.BasicProperty):
"""Floats list property"""
baseType = list_types.listof_floats
default = defaults.DefaultCallable( _defaultList )
class StringsProperty(basic.BasicProperty):
"""Strings list property"""
baseType = list_types.listof_strings
default = defaults.DefaultCallable( _defaultList )
class ClassNamesProperty(basic.BasicProperty):
"""ClassNames list property"""
baseType = list_types.listof_classnames
default = defaults.DefaultCallable( _defaultList )
class ClassesProperty(basic.BasicProperty):
"""Classes list property"""
baseType = list_types.listof_classes
default = defaults.DefaultCallable( _defaultList )
ClassByNamesProperty = ClassesProperty
##class TypedListProperty(ListProperty):
## """List property with dynamically-specified baseType
##
## Under the covers, if there is no baseType specified
## but there are acceptableTypes specified, the
## TypedListProperty will create a new baseType class
## to use as it's baseType. The mechanism to support
## this is provided by the typeunion and list_types
## modules in the basictypes package.
## """
## def __init__( self, *arguments, **named ):
## """Initialise the TypedListProperty
##
## baseType -- if specified, used as the baseType for
## the list
## acceptableTypes -- if false, the default list baseType
## is used. Otherwise should be a single-value tuple
## with baseType (XXX eventually we should allow
## multiple types), a new listof datatype will be
## created to act as baseType.
## """
## baseType = named.get( 'baseType', None )
## if baseType is None:
## # possibly we have an "acceptable types" value
## acceptable = named.get( 'acceptableTypes', None )
## if not acceptable:
## baseType = basic_types.List_DT
## else:
## # construct a new list_of type for given types
## # XXX should be a Union type for the types
## # this is just using the first type...
## if len(acceptable) > 1:
## acceptable = typeunion.TypeUnion( acceptable )
## else:
## acceptable = acceptable[0]
## baseType = list_types.listof(
## baseType = acceptable,
## )
## named["baseType"] = baseType
## super( TypedListProperty, self).__init__( *arguments, **named )
from basictypes import date_types
if date_types.haveImplementation:
class DateTimeProperty(basic.BasicProperty):
"""DateTime property"""
baseType = date_types.DateTime_DT
#default = defaults.DefaultCallable( lambda x,y: date_types.today() )
class DateTimeDeltaProperty(basic.BasicProperty):
"""DateTimeDelta property"""
baseType = date_types.DateTimeDelta_DT
#default = defaults.DefaultCallable( lambda property,client: date_types.DateTimeDelta(0) )
class TimeOfDayProperty(basic.BasicProperty):
"""DateTimeDelta property"""
baseType = date_types.TimeOfDay
#default = defaults.DefaultCallable( lambda property,client: date_types.TimeOfDay() )
# here's where we would do the Python 2.3 datetime module version
# and then possibly a wxDateTime module version
# and maybe even a standard time module version if we really wanted
# then add in any implementation-specific stuff
# such as RelativeDateTime, JulianDateTime or
# whatever is provided by one module and not the others
#else:
# should we print a warning? would rather not in case the
# developer just isn't using it, so didn't include it.
try:
from basictypes import datedatetime_types as ddt
except ImportError, err:
pass
else:
class DateTimeDTProperty( basic.BasicProperty ):
"""DateTime property implemented using Python 2.3 datetime objects"""
baseType = ddt.DateTime
from basicproperty.standard.dictionaryproperty import DictionaryProperty, WeakKeyDictionaryProperty, WeakValueDictionaryProperty
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,959 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/defaults.py | """Holders for property default values/functions
The default object simply provides an interface for
different approaches to defining default arguments.
The current implementation provides static and
dynamic default values.
"""
class Default( object ):
"""Abstract holder for a default-value or function
This is the base class for property default
classes. It is primarily an interface definition.
"""
def __init__( self, value ):
"""Initialize the Default holder
value -- the base value stored in the
object, see sub-classes for semantics
of how the object is treated.
"""
self.value = value
def __call__( self, property, client ):
"""Return appropriate default value
This just returns the results of:
self.get( property, client )
"""
return self.get( property, client )
def get( self, property, client ):
"""Get the value"""
raise AttributeError( """Object %s does not currently have a value for property %s"""%( repr(client), property.name ))
class DefaultValue( Default ):
"""A holder for a default value
The default value is returned without modification,
so using mutable objects will retrieve the same
instance for all calls to get()
"""
def get( self, property, client ):
"""Get the value unmodified"""
return self.value
class DefaultCallable( Default ):
"""A holder for a callable object producing default values
The callable object is called with the signature:
callable( property, client )
where client is the instance object and property
is the BasicProperty itself.
"""
def get( self, property, client ):
"""Return the result of our callable value"""
return self.value( property, client )
def __init__( self, value ):
"""Initialize the Default holder
value -- the base value stored in the
object, see sub-classes for semantics
of how the object is treated.
"""
self.value = value
self.get = value
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,960 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_xmlencode.py | import unittest
from basicproperty import boundary, basic, xmlencoder
class TestClass( object ):
simple = basic.BasicProperty(
"simple", "documentation"
)
withBound = basic.BasicProperty(
"withBound", "documentation",
bounds=( boundary.TypeBoundary(str),)
)
withDefaultValue = basic.BasicProperty(
"withDefaultValue", "documentation",
defaultValue = 'this',
)
withDefaultFunction = basic.BasicProperty(
"withDefaultFunction", "documentation",
defaultFunction = lambda x,y: [],
)
withDefaultValueNoSet = basic.BasicProperty(
"withDefaultValueNoSet", "documentation",
defaultValue = 'this',
setDefaultOnGet = 0,
)
withDefaultFunctionNoSet = basic.BasicProperty(
"withDefaultFunctionNoSet", "documentation",
defaultFunction = lambda x,y: [],
setDefaultOnGet = 0,
)
class TestClass2( object ):
simple = basic.MethodStoreProperty(
"simple", "documentation",
getMethod = "doGet",
setMethod = "doSet",
delMethod = "doDel",
)
def doGet( self, ):
return self.value
def doSet( self, value ):
self.value = value
def doDel( self):
del self.value
storage = xmlencoder.Storage()
class XMLTest( unittest.TestCase ):
def testString( self ):
"""Test simple encoding of string values"""
encoder = xmlencoder.StringEncoder()
result = encoder("this", storage)
expected = xmlencoder.Tag( name = "str", attributes = {"enc":"utf-8"}, content = ["this"])
assert result == expected,"""String encoding:\n\tWanted %r\n\tGot %r"""%(expected, result)
def testRepresentation (self):
"""Test representation of a tag object"""
result = repr(xmlencoder.Tag( name = "str", attributes = {"enc":"utf-8"}, content = ["this"]))
print result
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,961 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/propertied.py | """Generic services for propertied objects"""
class Propertied( object ):
"""Base class providing support for property-using objects
The Propertied class provides a number of basicproperty
support features which are helpful in many situations.
These features are not required by the properties, instead
they are common services which can be provided because of
the properties, such as automatic cloning, fairly intelligent
repr representations, and automatic initialization of properties
from named initialization arguments. The class also provides
some basic introspection features.
"""
def __init__( self, *arguments, **namedarguments ):
"""Propertied object initialisation, allows passing in initial values for properties by name"""
for name, value in namedarguments.items():
setattr(self, name, value )
super( Propertied, self).__init__(*arguments)
def __str__( self ):
"""Get a friendly representation of the object"""
return "%s( @%s )"%( self.__class__.__name__, id(self) )
def getProperties( cls ):
"""Get the BasicProperty properties for a particular object's class"""
import inspect
from basicproperty.basic import BasicProperty
def isABasicProperty( object ):
"""Predicate which checks to see if an object is a property"""
return isinstance( object, BasicProperty )
return dict(getmembers( cls, isABasicProperty)).values()
getProperties = classmethod( getProperties )
def clone( self, ** newValues ):
"""Clone this object, with optional new property values
This method calls the __init__ method of your class with
the current property values of your class. Providing newValues
(a dictionary) overrides property settings with new values.
"""
values = self.getCloneProperties()
values.update( newValues )
return self.__class__( **values )
def getCloneProperties( self ):
"""Get properties dictionary (key:value) for use in cloning of the instance
By default you get getProperties()' values, with an
attempt made to use the property's name, then the property's
direct "__get__" method.
"""
values = {}
for descriptor in self.getProperties():
try:
if descriptor.trueProperty:
values[ descriptor.name ] = getattr( self, descriptor.name )
else:
values[ descriptor.name ] = descriptor.__get__( self, type(self) )
except (AttributeError, ValueError, TypeError):
pass
return values
def toString(self, indentation= "", alreadyDone = None, indentString=' '):
"""Get a nicely formatted representation of this object
This version assumes that getProperties returns
the list of properties which should be presented,
it recursively calls it's children with greater
indents to get their representations.
indentation -- current string indentation level
alreadyDone -- set of object ids which are already finished
XXX Needs a far better API, likely a stand-alone class
without the automatic inheritance problems here :(
"""
properties = self.getProperties()
if alreadyDone is None:
alreadyDone = {}
if alreadyDone.has_key( id(self) ):
if hasattr( self, 'name' ):
return """%s( name = %r)"""%( self.__class__.__name__, self.name)
return """<Already Done %s@%s>"""%(self.__class__.__name__, id(self))
alreadyDone[id(self)] = 1
def sorter( x,y ):
if x.name == 'name':
return -1
if y.name == 'name':
return -1
return cmp( x.name, y.name )
def reprChild( x, indentation= "", alreadyDone=None ):
"""Get representation of child at indentation level if possible"""
if hasattr( x, 'toString'):
try:
return x.toString(indentation=indentation, alreadyDone=alreadyDone)
except TypeError:
# for instance if the object is a class
pass
return repr(x)
properties.sort( sorter )
fragments = ['%s('%(self.__class__.__name__)]
indentation = indentation + indentString
dictionary = self.__dict__
for property in properties:
if dictionary.has_key( property.name ):
value = dictionary.get( property.name )
if (
hasattr( property, 'default' ) and
hasattr( property.default, 'value' ) and
property.default.value == value
):
pass
elif isinstance( value, list ):
fragments.append(
'%s%s = ['%(
indentation,
property.name,
))
indentation = indentation + indentString
for item in value:
fragments.append( '%s%s,'%(indentation,reprChild( item, indentation, alreadyDone)))
indentation = indentation[:-(len(indentString))]
fragments.append( '%s],'%(indentation))
else:
fragments.append(
'%s%s = %s,'%(
indentation,
property.name,
reprChild( value, indentation, alreadyDone )
))
indentation = indentation[:-(len(indentString))]
fragments.append( '%s)'%(indentation))
return "\n".join(fragments)
def getmembers(object, predicate=None):
"""Version of inspect.py which catches keys not available from object"""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError, err:
pass
else:
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,962 | eshikvtumane/basicproperty | refs/heads/master | /website/pydoc/builddocs.py | """Script to automatically generate OpenGLContext documentation"""
import pydoc2
if __name__ == "__main__":
try:
import wx
except ImportError:
pass
else:
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
return True
app = MyApp(0)
app.MainLoop()
excludes = [
"math",
"string",
]
stops = [
]
modules = [
"basictypes",
"basicproperty",
]
pydoc2.PackageDocumentationGenerator(
baseModules = modules,
destinationDirectory = ".",
exclusions = excludes,
recursionStops = stops,
).process ()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,963 | eshikvtumane/basicproperty | refs/heads/master | /website/examples/storage.py | class Original( object ):
currentState = 3
def __getattr__( self, key ):
if key != 'currentState':
if self.currentState == 3:
try:
return self.__dict__[key]
except KeyError:
pass
elif self.currentState == 4:
try:
return self.__dict__['someSource'][key]
except KeyError:
pass
raise AttributeError( """%s instance has no %r attribute"""%(
self.__class__.__name__, key,
))
from basicproperty import common, basic
class StateBased( object ):
"""Mix in: use client.someSource as source if currentState==4"""
def _getValue( self, client ):
"""Called by __get__ of basic.BasicProperty"""
assert self.name not in ('currentState','someSource'), """StateBasedProperty created with %r name!"""%(self.name, )
source = self.getClientSource( client )
return source[ self.name ]
def _setValue( self, client, value ):
assert self.name not in ('currentState','someSource'), """StateBasedProperty created with %r name!"""%(self.name, )
source = self.getClientSource( client )
source[ self.name ] = value
return value
def getClientSource( self, client ):
if client.currentState == 3:
return client.__dict__
elif client.currentState == 4:
return client.someSource
else:
raise TypeError( """%s instance found with inconsistent currentState %r"""%(
client.__class__.__name__, client.currentState,
))
class StateBasedProperty( StateBased, basic.BasicProperty ):
"""Simple state-based property"""
class DictStateBasedProperty( StateBased, common.DictionaryProperty ):
"""A dictionary-typed state-based property"""
class WithProps( object ):
currentState = common.IntegerProperty(
"currentState", """The current state of this instance""",
defaultValue = 3,
)
someSource = common.DictionaryProperty(
"someSource", """Source for properties in state 4""",
)
someProp = StateBasedProperty(
"someProp", """A state-aware generic property""",
defaultFunction = lambda prop, client: client.__class__.__name__,
setDefaultOnGet = 0,
)
someOtherProp = DictStateBasedProperty(
"someOtherProp", """A state-aware dictionary property (with automatic default""",
)
if __name__ == "__main__":
w = WithProps()
print 'currentState %r someSource %r'%( w.currentState, w.someSource )
print 'someProp %r someOtherProp %r'%( w.someProp, w.someOtherProp )
print
print 'changing the properties with same state'
w.someProp = 32
w.someOtherProp[ 'this' ] = 'them'
print 'currentState %r someSource %r'%( w.currentState, w.someSource )
print 'someProp %r someOtherProp %r'%( w.someProp, w.someOtherProp )
print
print 'changing state'
w.currentState = 4
print 'currentState %r someSource %r'%( w.currentState, w.someSource )
print 'someProp %r someOtherProp %r'%( w.someProp, w.someOtherProp )
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,964 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/ext/contactinfo.py | from basicproperty import propertied, basic, common, boundary
try:
import Persistence
class ContactObject( propertied.Propertied, Persistence.Persistent):
"""A basic object within the contact database
Each object within the database supports some number of note fields
"""
notes = basic.BasicProperty(
"notes", "Notes about the given value",
boundaries = [
boundary.Type( list ),
boundary.ForEach( boundary.Type( unicode )),
],
defaultFunction = lambda x,y: [],
)
except ImportError:
class ContactObject( propertied.Propertied ):
"""A basic object within the contact database
Each object within the database supports some number of note fields
"""
notes = basic.BasicProperty(
"notes", "Notes about the given value",
boundaries = [
boundary.Type( list ),
boundary.ForEach( boundary.Type( unicode )),
],
defaultFunction = lambda x,y: [],
)
class Name(ContactObject):
"""A name, which may be an alias for an official legal name
I'm not sure about the internationalization issues
here, given that "family names" in certain countries are
mutable depending on individual gender and the like.
No attempt is made to automatically support display names
which are reverse order (for instance Chinese).
Note: multiple names can be defined for an individual
so you would record a nickname as a separate "given" name,
rather than encoding it as part of the person's primary name.
"""
given = common.StringProperty(
"given", """Given name(s), personal name(s) or only name, for instance "Cher", "Peter" or "Michael Colin".""",
defaultValue = "",
)
family = common.StringProperty(
"family", """Family name, clan name, or group name(s). Note that is value may be unspecified.""",
defaultValue = "",
)
display = common.StringProperty(
"display", """Display name, should be defined if this name should be displayed in the manner other than the application's default display mechanism (for instance "Family, Personal"). Note that is value may be unspecified.""",
defaultValue = "",
)
formOfAddress = common.StringProperty(
"formOfAddress", """Form of address (title), for example "Dr.", "His Eminence", "Her Majesty", "Ms.". Note that is value may be unspecified, applications may use it if present during default display calculation.""",
defaultValue = "",
)
preferred = common.BooleanProperty(
"preferred", """Indicate that this name/nickname is the preferred form of address for the individual""",
defaultValue = 0,
)
class CommunicationsObject( ContactObject ):
"""Base type for communication-oriented contact object properties
"""
type = common.StringProperty(
"type", """Indicates the role of this particular communications address, for example "home", "work", "mobile".""",
defaultValue = ""
)
preferred = common.BooleanProperty(
"preferred", """Indicate that this name/nickname is the preferred address of its type for the individual""",
defaultValue = 0,
)
class PhoneNumber( CommunicationsObject ):
"""A phone number, includes satellite and videophones"""
number = common.StringProperty(
"number", "The telephone number",
defaultValue = "",
)
messaging = common.BooleanProperty(
"messaging", "Whether this connection has messaging (voicemail) support",
defaultValue = 0,
)
class EmailAddress( CommunicationsObject ):
"""An e-mail address, generally an Internet e-mail address"""
address = common.StringProperty(
"address", "The email address",
defaultValue = "",
)
class DeliveryAddress( CommunicationsObject):
"""A physical delivery address (mailing address)
From the vCard spec:
Post Office Address (first field)
Extended Address (second field),
Street (third field),
Locality (fourth field),
Region (fifth field),
Postal Code (six field),
Country (seventh field)
"""
address = common.StringProperty(
"address", """The post-office address e.g. "Widgets Inc." or "Great Guy".""",
defaultValue = "",
)
extendedAddress = common.StringProperty(
"extendedAddress", "The extended address e.g. Purchasing Department",
defaultValue = "",
)
street = common.StringProperty(
"street", "The street address e.g. 1426 Someroad Place.",
defaultValue = "",
)
unit = common.StringProperty(
"unit", "The unit/apartment address e.g. #32b or Apt. 32 or Suite 32b",
defaultValue = "",
)
locality = common.StringProperty(
"locality", "The town, post-office district or city e.g. Toronto",
defaultValue = "",
)
region = common.StringProperty(
"region", "The state, province, district, or territory e.g. Ontario",
defaultValue = "",
)
postalCode = common.StringProperty(
"postalCode", "The post-office designation (zip-code, postal-code) e.g. M5P 3K8 or 90210",
defaultValue = "",
)
country = common.StringProperty(
"country", "The country e.g. Canada or United States of America",
defaultValue = "",
)
domestic = common.BooleanProperty(
"domestic", "Whether this address is domestic or international",
defaultValue = 0,
)
postal = common.BooleanProperty(
"postal", "Whether this address is served by the post office",
defaultValue = 1,
)
parcel = common.BooleanProperty(
"parcel", "Whether this address accepts parcel deliveries",
defaultValue = 1,
)
class Contact( ContactObject ):
names = basic.BasicProperty(
"names", "List of names for the contact",
boundaries = [
boundary.ForEach( boundary.Type( Name ))
],
defaultFunction = lambda x,y: [],
)
phones = basic.BasicProperty(
"phones", "List of phone numbers for the contact",
boundaries = [
boundary.ForEach( boundary.Type( PhoneNumber ))
],
defaultFunction = lambda x,y: [],
)
emails = basic.BasicProperty(
"emails", "List of email addresses for the contact",
boundaries = [
boundary.ForEach( boundary.Type( EmailAddress ))
],
defaultFunction = lambda x,y: [],
)
deliveries = basic.BasicProperty(
"deliveries", "List of delivery addresses for the contact",
boundaries = [
boundary.ForEach( boundary.Type( DeliveryAddress ))
],
defaultFunction = lambda x,y: [],
)
class OrganisationMember( ContactObject ):
title = common.StringProperty(
"title", "The title of this position",
defaultValue = "",
)
_members = basic.BasicProperty(
"members", "The members/contacts in the position",
boundaries = [
boundary.ForEach( boundary.Type( (Contact, OrganisationMember) ))
],
defaultFunction = lambda x,y: [],
)
OrganisationMember.members = _members
class Organisation( Contact ):
members = _members
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,965 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/ext/__init__.py | """extension-specific property classes
"""
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,966 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/xmlencoder.py | """Completely unfinished XML encoder"""
from basicproperty import basic, common, boundary, propertied
class Tag (propertied.Propertied):
"""Represents a particular tag within a document"""
name = common.StringProperty(
"name", "The name of the tag",
defaultValue = "",
)
attributes = common.DictionaryProperty (
"attributes", """The in-tag attributes of the tag""",
defaultFunction = lambda x,y: {},
)
content = common.ListProperty (
"content", """The content (children) of the tag""",
setDefaultOnGet = 1,
defaultFunction = lambda x,y: [],
)
def __cmp__( self, other ):
"""Compare this tag to another"""
if not isinstance(other, Tag):
return -1
if other.name != self.name:
return cmp( self.name, other.name)
if other.attributes != self.attributes:
return cmp( self.attributes, other.attributes)
if other.content != self.content:
return cmp( self.content, other.content)
return 0
def __repr__( self ):
"""Create a decent representation of this tag"""
fragments = []
name = self.name.decode().encode('utf-8')
fragments.append( "<"+name )
for key, value in self.attributes.items ():
fragments.append ("""%s=%r"""%(key, value))
fragments = [ " ".join(fragments)]
if self.content:
fragments.append(">")
for item in self.content:
if isinstance(item, str):
fragments.append(item)
else:
fragments.append(repr(item))
fragments.append ("</%s>"%(name))
else:
fragments.append ("/>")
return "".join(fragments)
class Storage (object):
def tag(self, name, content = None, **attributes):
if content is not None:
return Tag (name = name, content = content, attributes = attributes)
return Tag (name = name, attributes = attributes)
class Encoder (object):
"""Base class for all encoder objects"""
def __call__( self, value, storage):
"""Encode the value for the storage"""
class PrimitiveEncoder(Encoder):
"""Encoder for a primitive object-type
This will include objects such as:
integer, long, float
string, Unicode
boolean
and lists/sequences of objects.
"""
class StringEncoder (PrimitiveEncoder):
"""Encoder for string values"""
def __call__( self, value, storage):
"""Encode the value for the storage"""
tag = storage.tag( "str", enc = "utf-8")
if isinstance (value, str):
tag.content.append( value.decode().encode( 'utf-8' ) )
elif isinstance( value, unicode):
tag.content.append( value.encode( 'utf-8' ) )
else:
raise ValueError ("""Attempt to encode non-string, non-Unicode object with StringEncoder: %r"""%(value,))
return tag
class NumberEncoder (PrimitiveEncoder):
"""Encoder for numeric values"""
class DateEncoder (PrimitiveEncoder):
"""Encoder for mxDateTime values"""
class PropertiedEncoder (Encoder):
"""Encoder for a Propertied-object sub-class"""
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,967 | eshikvtumane/basicproperty | refs/heads/master | /website/examples/simpletypes.py | from basicproperty import common, propertied, basic
class Simple( propertied.Propertied ):
count = common.IntegerProperty(
"count", """Count some value for us""",
defaultValue = 0,
)
names = common.StringsProperty(
"names", """Some names as a list of strings""",
)
mapping = common.DictionaryProperty(
"mapping", """Mapping from name to number""",
defaultValue = [
('tim',3),
('tom',4),
('bryan',5),
],
)
def __repr__( self ):
className = self.__class__.__name__
def clean( value ):
value = value.splitlines()[0]
if len(value) > 30:
value = value[:27] + '...'
return value
props = ", ".join([
'%s=%s'%(prop.name, repr(prop.__get__(self)))
for prop in self.getProperties()
if hasattr(self,prop.name)
])
return '<%(className)s %(props)s>'%locals()
__str__ = __repr__
s = Simple()
s.names.append( 'this' )
s2 = s.clone( count=500 )
s.names.append( 23 )
s.names.append( u'\xf0' )
s.mapping[ 'kim' ] = 32
s.count += 1
print s
print s2 | {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,968 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_boundary.py | import unittest
from basicproperty import basic, propertied, common
from basictypes import boundary
class BoundaryErrorCase( unittest.TestCase ):
def testBaseRepr(self):
"""Test representation code"""
repr( boundary.BoundaryError(None, None, None, None, "somemessage" ) )
def testRepr( self ):
"""Test representation code with real objects"""
property = basic.BasicProperty( "this" )
bound = boundary.Type( str )
client = self
value = "some value"
object = boundary.BoundaryError(property, bound, client, value, "somemessage" )
repr( object )
def testStr( self ):
"""Test string-conversion code with real objects"""
property = basic.BasicProperty( "this" )
bound = boundary.Type( str )
client = self
value = "some value"
object = boundary.BoundaryError(property, bound, client, value, "somemessage" )
str(object)
class ErrorTest:
def _testError( self, bound, badValue ):
"""Test that the error object is properly configured"""
property = basic.BasicProperty( "this" )
client = self
try:
bound( badValue, property, client )
except boundary.BoundaryError, error:
assert error.property is property, """Improper error attribute %s"""% (property,)
assert error.boundary is bound, """Improper error attribute %s"""% (bound,)
assert error.client is client, """Improper error attribute %s"""% (client,)
assert error.value is badValue, """Improper error attribute %s"""% (badValue,)
class BoundaryTestCase(unittest.TestCase):
def setUp(self):
self.boundary = boundary.Boundary()
def testCallParams(self):
self.boundary( None, None, None )
class TypeBoundaryTestCase(unittest.TestCase, ErrorTest):
def testGood(self):
"""Test a string value"""
boundary.Type( str )( "some value" )
def testGood2(self):
"""Test a unicode value"""
bound = boundary.Type( str )
self.failUnlessRaises( boundary.BoundaryTypeError, bound, None, None, u"some value")
def testBad1(self):
"""Test a non-string value"""
bound = boundary.Type( str )
self.failUnlessRaises( boundary.BoundaryTypeError, bound, None, None, 1)
def testImported(self):
"""Test a unicode value"""
bound = boundary.Type( "basictypes.boundary.Boundary" )
self.failUnlessRaises( boundary.BoundaryTypeError, bound, None, None, None)
bound( boundary.Boundary(), None, None)
def testError( self ):
"""Test that the reported error is properly configured"""
bound = boundary.Type( "basictypes.boundary.Boundary" )
self._testError( bound, 'object' )
class RangeBoundaryTestCase(unittest.TestCase, ErrorTest):
def testGood(self):
"""Test values within range"""
bound = boundary.Range( minimum=0,maximum=10 )
bound( 0 )
bound( 10 )
bound( 5 )
bound( 5.0 )
def testBad(self):
"""Test values outside range"""
bound = boundary.Range( minimum=0,maximum=10 )
self.failUnlessRaises( boundary.BoundaryValueError, bound, -1)
self.failUnlessRaises( boundary.BoundaryValueError, bound, 'a')
self.failUnlessRaises( boundary.BoundaryValueError, bound, [])
self.failUnlessRaises( boundary.BoundaryValueError, bound, None)
self.failUnlessRaises( boundary.BoundaryValueError, bound, 11)
self.failUnlessRaises( boundary.BoundaryValueError, bound, 10.00001)
self.failUnlessRaises( boundary.BoundaryValueError, bound, -.00001)
def testError( self ):
"""Test that the reported error is properly configured"""
bound = boundary.Range( minimum=0,maximum=10 )
self._testError( bound, 20 )
class LengthBoundaryTestCase(unittest.TestCase, ErrorTest):
def testGood(self):
"""Test values within range"""
bound = boundary.Length( minimum=0,maximum=3 )
bound( () )
bound( (1,2,3) )
bound( '123' )
bound( [] )
def testBad(self):
"""Test values outside range"""
bound = boundary.Length( minimum=1,maximum=3 )
self.failUnlessRaises( boundary.BoundaryValueError, bound, [])
self.failUnlessRaises( boundary.BoundaryValueError, bound, '')
self.failUnlessRaises( boundary.BoundaryValueError, bound, '1234')
def testError( self ):
"""Test that the reported error is properly configured"""
bound = boundary.Length( minimum=0,maximum=3 )
self._testError( bound, range(5) )
class NonNullBoundaryTestCase(unittest.TestCase, ErrorTest):
def testGood(self):
"""Test values within range"""
bound = boundary.NotNull()
bound( (1,), None, None, )
bound( 1, None, None, )
bound( '1', None, None, )
bound( [1], None, None, )
def testBad(self):
"""Test values outside range"""
bound = boundary.NotNull()
self.failUnlessRaises( boundary.BoundaryValueError, bound, None, None, [])
self.failUnlessRaises( boundary.BoundaryValueError, bound, None, None, '')
self.failUnlessRaises( boundary.BoundaryValueError, bound, None, None, 0)
self.failUnlessRaises( boundary.BoundaryValueError, bound, None, None, None)
self.failUnlessRaises( boundary.BoundaryValueError, bound, None, None, u'')
def testError( self ):
"""Test that the reported error is properly configured"""
bound = boundary.NotNull()
self._testError( bound, None )
class ForEachBoundaryTestCase(unittest.TestCase, ErrorTest):
def testGood(self):
"""Test values within range"""
bound = boundary.ForEach( boundary.NotNull())
bound( (), None, None, )
bound( [1,2,3], None, None, )
bound( '', None, None, )
bound( ' ', None, None, )
def testBad(self):
"""Test values outside range"""
bound = boundary.ForEach( boundary.NotNull())
self.failUnlessRaises( boundary.BoundaryValueError, bound, [0])
self.failUnlessRaises( boundary.BoundaryValueError, bound, [None])
self.failUnlessRaises( boundary.BoundaryValueError, bound, [1,2,3,0])
self.failUnlessRaises( boundary.BoundaryValueError, bound, (0,1,2,3))
def testError( self ):
"""Test that the reported error is properly configured"""
bound = boundary.ForEach( boundary.NotNull())
self._testError( bound, [None] )
class FunctionTestCase(unittest.TestCase, ErrorTest):
def testTrue(self):
"""Test function test with True"""
def x( value ):
return value
bound = boundary.Function( x, boundary.Function.TRUE_VALUES)
bound( 1 )
bound( [1,2] )
bound( "this" )
bound( True )
def testFalse(self):
"""Test function test with False"""
def x( value ):
return value
bound = boundary.Function( x, boundary.Function.FALSE_VALUES)
self.failUnlessRaises( boundary.BoundaryValueError, bound, [0])
self.failUnlessRaises( boundary.BoundaryValueError, bound, [None])
self.failUnlessRaises( boundary.BoundaryValueError, bound, [1,2,3,0])
self.failUnlessRaises( boundary.BoundaryValueError, bound, (0,1,2,3))
def testOther(self):
"""Test function test with other"""
def x( value ):
return value + 1
bound = boundary.Function( x, 5)
bound( 4 )
self.failUnlessRaises( boundary.BoundaryValueError, bound, 3)
class TestData( propertied.Propertied ):
str1 = common.StringProperty(
"str1", """Test string property""",
boundaries = (
boundary.Range( minimum= "", maximum="z" ),
boundary.Length( maximum = 10 ),
)
)
str2 = common.StringProperty(
"str2", """Test string property""",
boundaries = (
boundary.Range( minimum= "a", maximum="z" ),
boundary.Length( maximum = 10 ),
)
)
class PropertyTestCases(unittest.TestCase):
def testGood(self):
"""Test values that should not trigger a property meltdown..."""
TestData().str1 = 'abc'
TestData().str1 = 'z'
TestData().str1 = 's'
TestData().str1 = ''
def testBad(self):
"""Test values that should raise errors"""
self.failUnlessRaises( ValueError, setattr, TestData(), 'str2', '12345678901' )
self.failUnlessRaises( ValueError, setattr, TestData(), 'str2','' )
self.failUnlessRaises( ValueError, setattr, TestData(), 'str2','A' )
self.failUnlessRaises( ValueError, setattr, TestData(), 'str2','Z' )
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,969 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/linearise.py | """Object for linearizing a node-graph to XML
"""
import types, cStringIO, operator, traceback, sys, struct
from xml.sax import saxutils
from copy_reg import dispatch_table
from pickle import whichmodule, PicklingError
class RootCellarError(PicklingError):
"""Root error raised by the rootcellar lineariser"""
def append( self, element ):
"""Append an element to the stack"""
if not hasattr( self, 'stack' ):
self.stack = []
self.stack.append( element )
def __str__( self ):
base = PicklingError.__str__( self )
if hasattr( self, 'stack' ):
stack = self.stack[:3]
stack.reverse()
base = "%s\nLinearisation Stack:\n\t%s"%(
base,
"\n\t".join(map(repr,self.stack[-3:]))
)
return base
__metatype__ = type
defaults = {\
'subelspacer':', ',\
'courtesyspace':' ',\
'curindent':'',\
'indent':'\t',\
'numsep':',',\
'full_element_separator':'\n',\
'mffieldsep':'\n',
# EndComments say: if more than this many lines are between node start
# and end, put a comment at the closing bracket saying what node/proto is
# being closed.
'EndComments': 10 \
}
def linearise( value, linvalues=defaults, **namedargs ):
"""Linearise the given (node) value to a string"""
l = Lineariser( linvalues, **namedargs)
return l.linear( value )
def _findUnusedKey( dictionary, baseName ):
"""Find an unused name similar to baseName in dict"""
count = 1
name = baseName
while dictionary.has_key( name ):
name = '%s%s'%(baseName, count )
count += 1
return name
class Generator( saxutils.XMLGenerator ):
"""Friendly generator for XML code"""
def startElement( self, name, attributes=None, **named ):
"""Start a new element with given attributes"""
if attributes is None:
attributes = {}
attributes.update( named )
saxutils.XMLGenerator.startElement(
self,
name,
self._fixAttributes(attributes)
)
def _fixAttributes( self, attributes=None ):
"""Fix an attribute-set to be all unicode strings"""
for key,value in attributes.items():
if not isinstance( value, (str,unicode)):
attributes[key] = unicode( value )
elif isinstance( value, str ):
attributes[key] = value.decode( 'latin-1' )
return attributes
class Lineariser:
'''
A data structure & methods for linearising
sceneGraphs, nodes, scripts and prototypes.
Should be used only once as member vars are not
cleared after the initial linearisation.
'''
volatile = 0
protoalreadydone = {
# already occupied by core-functionality tags...
# block below populates us with our helpers...
'property': 1,
'globals':1,
'global':1,
'globalid':1,
"globalversion":1,
'tagname':1,
'reference':1,
'picklestate':1,
'pickleargs':1,
'picklereg':1,
'rootcellar':1,
}
lowLevelHelpers = {}
def registerHelper( cls, clientClass, helper ):
"""Register a helper object with the linearise class"""
name = clientClass.__name__.split('.')[-1]
name = _findUnusedKey( cls.protoalreadydone, name )
cls.lowLevelHelpers[clientClass] = helper
cls.protoalreadydone[clientClass] = name
cls.protoalreadydone[name] = clientClass
# now integrate helper's sub-tags into our own...
newSet = {}
for key,name in getattr( helper, 'tagNames', {}).items():
if cls.protoalreadydone.get(name) is not type(helper):
newName = _findUnusedKey( cls.protoalreadydone, name )
cls.protoalreadydone[ newName ] = type(helper)
else:
newName = name
newSet[key] = newName
helper.tagNames = newSet
registerHelper = classmethod( registerHelper )
def __init__(self, linvalues=None, alreadydone=None, encoding='utf-8', *args, **namedargs):
self.encoding = encoding
if linvalues is None:
linvalues = defaults
if namedargs:
linvalues = linvalues.copy()
linvalues.update( namedargs )
self.linvalues = linvalues
if alreadydone is None:
self.alreadydone = {}
else:
self.alreadydone = alreadydone
def linear(
self, clientNode,
buffer=None,
skipProtos = None,
*args, **namedargs
):
'''Linearise a node/object
'''
# prototypes in this dictionary will not be linearised
self.skipProtos = {}
# protobuffer is a seperate buffer into which the class definitions are stored
self.protobuffer = cStringIO.StringIO()
self.protogenerator = Generator( self.protobuffer, self.encoding )
self.protogenerator.startDocument()
self.protogenerator.startElement('rootcellar')
self.protogenerator.startElement('globals')
self.fullsep(1)
# protoalreadydone is used in place of the scenegraph-specific
# node alreadydone. This allows us to push all protos up to the
# top level of the hierarchy (thus making the process of linearisation much simpler)
self.protoalreadydone = self.protoalreadydone.copy()
# main working algo...
self.typecache = {
}
self.buffer = buffer or cStringIO.StringIO()
self.generator = Generator( self.buffer, self.encoding)
self.alreadydone.clear()
self.indentationlevel = 0
self._linear( clientNode )
del( self.typecache ) # to clear references to this node...
self.alreadydone.clear()
# side effect has filled up protobuffer for us
self.protogenerator.endElement('globals')
self.fullsep(1)
self.generator.endElement('rootcellar')
self.generator.endDocument()
rval = self.protobuffer.getvalue() + self.buffer.getvalue()
self.buffer.close()
self.protobuffer.close()
return rval
### High-level constructs...
def _class( self, clientNode):
"""Linearise a class, or more precisely, a class-reference"""
# check that we haven't yet done this prototype
if self.protoalreadydone.has_key( clientNode ):
return self.protoalreadydone.get( clientNode )
# else, want to export a class-definition line at the top of the file...
module,name = self.globalname( clientNode )
tagName = _findUnusedKey(
self.protoalreadydone,
name,
)
self.protoalreadydone[tagName] = clientNode
self.protoalreadydone[clientNode] = tagName
position = self.protobuffer.tell()
start = self.protogenerator.startElement
end = self.protogenerator.endElement
chars = self.protogenerator.characters
start( "global" )
start( "globalname" )
chars( tagName )
end( "globalname" )
start( "globalid" )
chars( "%s.%s"%(module, name) )
end( "globalid" )
if hasattr(clientNode,'__version__'):
start( 'globalversion' )
chars( clientNode.__version__ )
end( "globalversion" )
end( "global" )
self.protobuffer.write( self.linvalues['full_element_separator'] )
self.alreadydone[ clientNode ] = position, self.protobuffer.tell()
return tagName
def _Node( self, clientNode, *args,**namedargs):
'''Linearise an individual node
'''
tagName = self._class( clientNode.__class__ )
buffer = self.buffer
# now calculate the representation of this node...
position = buffer.tell()
self.alreadydone[ id(clientNode) ] = (position,position)
start = self.generator.startElement
end = self.generator.endElement
start( tagName, id=self.refName(clientNode) )
self._indent()
self.nodeContent( clientNode )
self._dedent()
self.fullsep()
self.indent()
end( tagName )
self.alreadydone[ id(clientNode) ] = position, buffer.tell()
return None
def nodeContent( self, clientNode ):
"""Export the contents of a given node"""
start = self.generator.startElement
end = self.generator.endElement
if hasattr( clientNode, 'getProperties' ):
## self.fullsep()
## self.indent()
self._properties( clientNode )
elif hasattr( clientNode, '__getstate__' ):
self.fullsep()
self.indent()
start( 'picklestate' )
self._linear( clientNode.__getstate__(), volatile=1 )
end( 'picklestate' )
elif hasattr( clientNode, '__getinitargs__' ):
self.fullsep()
self.indent()
start( 'pickleargs' )
self._linear( clientNode.__getinitargs__(), volatile=1)
end( 'pickleargs')
elif dispatch_table.has_key( clientNode.__class__ ):
self.fullsep()
self.indent()
start( 'picklereg' )
reducer = dispatch_table.get( clientNode.__class__ )
self._linear( reducer( clientNode ), volatile=1)
end( 'picklereg')
elif hasattr( clientNode, '__reduce__') and callable(clientNode.__reduce__):
tup = clientNode.__reduce__()
if len(tup) == 3:
target,args,state = tup
else:
target,args = tup
state = None
self.fullsep()
self.indent()
start( 'picklereduce' )
self._linear( target )
self._linear( args, volatile=1 )
self._linear( state, volatile=1 )
end( 'picklereduce' )
elif hasattr( clientNode, '__dict__'):
self.fullsep()
self.indent()
start( 'picklestate' )
self._linear( clientNode.__dict__, volatile=1 )
end( 'picklestate' )
else:
raise RootCellarError( """Don't know how to store %r object"""%(
clientNode,
))
def reference( self, target ):
"""Reference an object"""
if hasattr( target, '__class__'):
refType = self._class( target.__class__ )
else:
refType = "class"
self.generator.startElement( 'reference', id = self.refName(target), type=refType)
self.generator.endElement( 'reference')
def refName( self, target ):
"""Get a reference id/name for a given object"""
return hex(id(target))[2:].lower()
def _properties(self, object ):
"""Write out the attribute dictionary for an object"""
properties = object.getProperties()
propertyTags = []
for property in properties:
if not getattr( property, 'volatile', None):
getState = getattr( property, 'getState', None )
try:
if getState:
value = getState( object )
else:
value = property.__get__( object )
except (AttributeError,KeyError):
# no non-default value set, so ignore...
pass
#print 'skipping', property.name
# XXX strip values where value is default? option maybe?
else:
propertyTags.append( (property, value ) )
start = self.generator.startElement
end = self.generator.endElement
for property, value in propertyTags:
self.fullsep()
self.indent()
start( 'property', {"name":property.name} )
self.fullsep()
self._indent()
self.indent()
self._linear( value )
self._dedent()
self.fullsep()
self.indent()
end( 'property' )
### Utility functions...
def _dedent( self ):
self.indentationlevel = self.indentationlevel-1
self.linvalues['curindent'] = self.linvalues['indent']*self.indentationlevel
def _indent( self, exact = None ):
if exact is not None:
self.indentationlevel = exact
else:
self.indentationlevel = self.indentationlevel+1
self.linvalues['curindent'] = self.linvalues['indent']*self.indentationlevel
def fullsep( self, proto=0 ):
"""Add full seperator to the indicated buffer"""
if proto:
buffer = self.protobuffer
else:
buffer = self.buffer
buffer.write( self.linvalues['full_element_separator'] )
def indent( self, proto=0 ):
"""Add current indent to the indicated buffer"""
if proto:
buffer = self.protobuffer
else:
buffer = self.buffer
buffer.write( self.linvalues['curindent'])
def _linear( self, clientNode, volatile=0):
'''Linearise a particular client node of whatever type by dispatching to
appropriate method...'''
self.volatile = volatile
try:
if isinstance( clientNode, (type, types.ClassType)):
helper = self.lowLevelHelpers.get( type )
else:
helper = self.lowLevelHelpers.get( type(clientNode))
if helper:
return helper( clientNode, self )
elif self.alreadydone.has_key( id(clientNode)) and not volatile:
return self.reference( clientNode )
## elif isinstance( type(clientNode), (type, types.ClassType)):
## return self.lowLevelHelpers.get( type )( clientNode, self )
else:
return self._Node( clientNode )
except RootCellarError, err:
err.append( clientNode )
raise
except Exception:
f = cStringIO.StringIO()
traceback.print_exc( file = f )
error = f.getvalue()
raise RootCellarError(
"""Failure attempting to linearise %r: %s"""%(
clientNode,
error,
)
)
def globalname( self, client):
"""Try to find the name for the client"""
name = getattr( client, '__name__', None )
if not name:
raise ValueError( """Attempt to put %r in the root cellar, has no __name__ attribute, so can't create a global reference to it"""%( client,))
module = getattr( client, '__module__', None )
if not module:
module = whichmodule( client, name )
# sanity check...
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError), error:
raise RootCellarError(
"""Can't store %r: can't import %s.%s: %r"""%(
client, module, name, error
)
)
else:
if klass is not client:
raise RootCellarError(
"""Can't store %r (id=%s): importing %s.%s does not give the same object, gives %r (id=%s)"""%(
client, id(client), module, name, klass, id(klass),
)
)
return module,name
class LineariserHelper(object):
clientClasses = ()
tagNames = {}
def __call__( self, value, lineariser ):
"""Linearise the value using given lineariser"""
if not self.doRef( value, lineariser ):
position = lineariser.buffer.tell()
self.do( value, lineariser )
lineariser.alreadydone[ id(value) ] = position,lineariser.buffer.tell()
def doRef( self, value, lineariser ):
"""Check if we can just reference this value"""
if lineariser.alreadydone.has_key( id(value)) and not lineariser.volatile:
lineariser.reference(value)
return 1
return 0
def do( self, value, lineariser ):
"""Do the primary work of linearising"""
class NoneLin(LineariserHelper):
clientClasses = [types.NoneType]
tagNames = {'none':'none'}
def doRef( self, value, lineariser ):
"""Check if we can just reference this value"""
return 0
def do( self, value, lineariser ):
lineariser.generator.startElement( self.tagNames.get('none') )
lineariser.generator.endElement( self.tagNames.get('none') )
class IntLin(LineariserHelper):
clientClasses = [int,long,float]
tagNames = {}
maxRefSize = 18446744073709551616L
minRefSize = -18446744073709551616L
def doRef( self, value, lineariser ):
"""Check if we can just reference this value"""
if isinstance( value, long):
if value > self.maxRefSize or value < self.minRefSize:
return super( IntLin, self).doRef(value, lineariser)
return 0
def do( self, value, lineariser ):
tagName = lineariser._class( type(value) )
if isinstance( value, long) and value > self.maxRefSize or value < self.minRefSize:
lineariser.generator.startElement( tagName, id=lineariser.refName(value) )
else:
lineariser.generator.startElement( tagName )
lineariser.generator.characters( repr(value))
lineariser.generator.endElement( tagName )
class StringLin(LineariserHelper):
clientClasses = [str, unicode]
tagNames = {}
minRefLength = 64
def doRef( self, value, lineariser ):
"""Check if we can just reference this value"""
if len(value) > self.minRefLength:
return super( StringLin, self).doRef(value, lineariser)
return 0
def do( self, value, lineariser ):
hexID = lineariser.refName( value)
tagName = lineariser._class( type(value) )
if len(value) > self.minRefLength:
lineariser.generator.startElement( tagName, id=hexID )
else:
lineariser.generator.startElement( tagName )
if isinstance( value, str ):
value = value.decode( 'latin-1')
lineariser.generator.characters( value)
lineariser.generator.endElement( tagName )
class SequenceLin(LineariserHelper):
clientClasses = [list, tuple]
tagNames = {'li':'li'}
def do( self, value, lineariser ):
tagName = lineariser._class( type(value) )
start = lineariser.generator.startElement
end = lineariser.generator.endElement
start( tagName, id=lineariser.refName(value))
if value:
lineariser._indent()
lineariser.fullsep()
for item in value:
lineariser.indent()
lineariser._linear( item )
lineariser.fullsep()
lineariser._dedent()
lineariser.indent()
end( tagName )
class MappingLin(LineariserHelper):
clientClasses = [dict]
tagNames = {'key':'key','val':'val'}
def do( self, value, lineariser ):
tagName = lineariser._class( type(value) )
start = lineariser.generator.startElement
end = lineariser.generator.endElement
start( tagName, id=lineariser.refName(value))
if value:
lineariser._indent()
lineariser.fullsep()
for key,item in value.items():
lineariser.indent()
start(self.tagNames.get('key'))
lineariser._linear( key )
end( self.tagNames.get('key'))
lineariser.fullsep()
lineariser._indent()
lineariser.indent()
start(self.tagNames.get('val'))
lineariser._indent()
lineariser._linear( item )
lineariser._dedent()
end( self.tagNames.get('val'))
lineariser._dedent()
lineariser.fullsep()
lineariser._dedent()
lineariser.indent()
end( tagName )
class GlobalLin( LineariserHelper):
clientClasses = [type, types.ClassType,type, types.FunctionType,types.BuiltinFunctionType]
tagNames = {'globalref':"globalref"}
def do( self, client, lineariser ):
start = lineariser.generator.startElement
end = lineariser.generator.endElement
lineariser._class( client )
start(
self.tagNames.get('globalref'),
tagname= lineariser._class( client ),
id=lineariser.refName(client),
)
end( self.tagNames.get('globalref') )
for cls in NoneLin, IntLin,StringLin,SequenceLin,MappingLin, GlobalLin:
for clientClass in cls.clientClasses:
Lineariser.registerHelper( clientClass, cls() )
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,970 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_basic.py | import unittest
from basicproperty import basic
from basictypes import boundary
class TestClass( object ):
simple = basic.BasicProperty(
"simple", "documentation"
)
withBound = basic.BasicProperty(
"withBound", "documentation",
boundaries=( boundary.Type(str),)
)
withDefaultValue = basic.BasicProperty(
"withDefaultValue", "documentation",
defaultValue = 'this',
)
withDefaultFunction = basic.BasicProperty(
"withDefaultFunction", "documentation",
defaultFunction = lambda x,y: [],
)
withDefaultValueNoSet = basic.BasicProperty(
"withDefaultValueNoSet", "documentation",
defaultValue = 'this',
setDefaultOnGet = 0,
)
withDefaultFunctionNoSet = basic.BasicProperty(
"withDefaultFunctionNoSet", "documentation",
defaultFunction = lambda x,y: [],
setDefaultOnGet = 0,
)
class BasicPropertyTest( unittest.TestCase ):
def testInit( self ):
"""Test initialisation of the property objects"""
basic.BasicProperty( "name" )
basic.BasicProperty( "name", "documentation" )
basic.BasicProperty( "name", "documentation", )
basic.BasicProperty( "name", "documentation", defaultValue=[1,2,3] )
basic.BasicProperty( "name", "documentation", defaultFunction = lambda x,y: [] )
basic.BasicProperty( "name", "documentation", baseType = str )
def testBadInit( self ):
"""Test improper initialisation of the property objects"""
self.failUnlessRaises( TypeError, basic.BasicProperty, "name", "documentation", boundary.Type(str), )
self.failUnlessRaises( TypeError, basic.BasicProperty )
def testStandard( self ):
"""Test standard get/set/del semantics"""
object = TestClass()
self.failUnless( not hasattr(object, 'simple' ), 'property is defined before setting')
value = 'this'
object.simple = value
self.failUnless( object.simple is value , 'property value not stored as same object')
del object.simple
self.failUnless( not hasattr(object, 'simple' ), 'property deletion failed')
def testDefaultValue( self ):
"""Test default value semantics"""
object = TestClass()
self.failUnless( object.withDefaultValue == 'this' )
self.failUnless( object.__dict__.has_key("withDefaultValue"), 'Default value not set as current on access' )
del object.withDefaultValue
object.withDefaultValue = 2
self.failUnless( object.withDefaultValue == 2, "explicitly setting value didn't override default" )
def testDefaultFunction( self ):
"""Test default function semantics"""
object = TestClass()
self.failUnless( object.withDefaultFunction == [] )
self.failUnless( object.__dict__.has_key("withDefaultFunction"), 'Default function value not set as current on access' )
del object.withDefaultFunction
object.withDefaultFunction = 2
self.failUnless( object.withDefaultFunction == 2, "explicitly setting value didn't override default" )
class TestClass2( object ):
simple = basic.MethodStoreProperty(
"simple", "documentation",
getMethod = "doGet",
setMethod = "doSet",
delMethod = "doDel",
)
def doGet( self, ):
return self.value
def doSet( self, value ):
self.value = value
def doDel( self):
del self.value
class MethodStoreTest( unittest.TestCase ):
"""Tests for the method-store mix-in"""
def testSimple( self ):
"""Test simple method store operation"""
object = TestClass2()
object.value = None
self.failUnless( object.value == None, """Initial value somehow not None""")
object.simple = 3
self.failUnless( object.value == 3, """Set failed to update value""")
self.failUnless( object.simple == 3, """Get failed to access updated value""")
del object.simple
self.failUnless( not hasattr( object, "value"), """Del failed to delete value""")
self.failUnless( not hasattr( object, "simple"), """Get returned value after deletion""")
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,971 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/ext/wxtypes.py | """wxPython-specific property classes"""
from basicproperty import basic, common
from basictypes.wxtypes import colour, pen, font
from basictypes import enumeration
## DATA-model properties
class ColourProperty(basic.BasicProperty):
"""wxColour property"""
baseType = colour.wxColour_DT
friendlyName = "Colour"
class PenProperty( basic.BasicProperty ):
"""wxPen property"""
baseType = pen.wxPen
friendlyName = "Pen"
class FontProperty( basic.BasicProperty ):
"""wxFont property"""
baseType = font.wxFont_DT
friendlyName = "Font"
## LIVE properties
class wxPenStyleProperty(
enumeration.EnumerationProperty,
basic.MethodStore,
basic.BasicProperty
):
"""wxPen Style property (live)"""
baseType = pen.PenStyle
getMethod = "GetStyle"
setMethod = "SetStyle"
friendlyName = "Line Style"
class wxPenCapProperty(
enumeration.EnumerationProperty,
basic.MethodStore,
basic.BasicProperty
):
"""wxPen Cap property (live)"""
baseType = pen.PenCap
getMethod = "GetCap"
setMethod = "SetCap"
friendlyName = "Cap Style"
class wxPenJoinProperty(
enumeration.EnumerationProperty,
basic.MethodStore,
basic.BasicProperty
):
"""wxPen Join property (live)"""
baseType = pen.PenJoin
getMethod = "GetJoin"
setMethod = "SetJoin"
friendlyName = "Corner Style"
class wxWidthProperty( basic.MethodStore, common.IntegerProperty ):
"""wxObject Width property (live)"""
getMethod = "GetWidth"
setMethod = "SetWidth"
friendlyName = "Width"
class wxColourProperty( basic.MethodStore, ColourProperty ):
"""wxObject Colour property (live)"""
getMethod = "GetColour"
setMethod = "SetColour"
friendlyName = "Colour"
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,972 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/basic.py | """Core property classes"""
NULL = []
from basicproperty import propertied, defaults
from basictypes import registry
import new
try:
from basicproperty import propertyaccel
except ImportError, err:
propertyaccel = None
class BasicProperty (propertied.Propertied):
"""The base class for all of our enhanced property classes
A BasicProperty has two distinct levels of accessor-methods:
The first level, comprised of the following methods (these
were renamed in version 0.5 to match the standard descriptor
API):
__get__(self, client, klass)
__set__(self, client, value)
__delete__(self, client)
allows you to override the entire get/set/delete process
in your subclass. Overriding these methods allows you to
short-circuit around bounds checking and coercion. You
become responsible for implementing everything.
The second level, comprised of the following methods:
_getValue(self, client)
_setValue(self, client, value)
_delValue(self, client)
allows you to override only the storage/retrieval process
in your subclass without affecting the bounds checking or
coercion functions of the property. See: the MethodStore
mix-in for an example of overriding just these methods.
"""
setDefaultOnGet = 1
trueProperty = 1
baseType = None
boundaries = ()
## Undefined by default, but possibly available attributes...
# defaultValue
# defaultFunction
# friendlyName
def __init__(
self, name, documentation = "",
**namedarguments
):
"""Create a new basic property object
name -- name of the property, used for storage and reporting
documentation -- appears in automated documentation systems
baseType -- an object representing the base-type of the
property's values. May include the following values:
coerce( value ) -- coerce value to acceptable type
check( value ) -- check that given value is acceptable,
return false if it is not.
factories( ) -- return a list of factory functions/classes
for creating new instances of the class
dataType -- string specifying a data-type key for the
values. This specifier is entirely local to the
properties system, with no neccessary relation to
class or type names. With that said, generally the
values are the dotted-name of the class/type allowed
for the property.
Note: This can be a dotted name with the trailing
items specifying more specific data types, so, for
instance, str.long will use the "long string" editor if
it's registered, or the "string" editor if not.
if coerce is not present, the class should have an initialiser
that allows passing a single value as the value to coerce.
if check is not present, check will be done as
isinstance( value, baseType).
if factories is not present, factories will be assumed to be
the baseType itself.
defaultValue -- static value to be used as default, if not
specified, not provided
defaultFunction -- function with signature function( property,
client ) which returns a dynamic default value
setDefaultOnGet -- if true (default), then retrieving a
default value causes the value to be explicitly set as the
current value
boundaries -- series of callable Boundary objects which are
(if present) called for each set/getDefault in order to
confirm that the given value abides by the boundaries
defined. Should generally only raise ValueError, TypeError,
KeyError or AttributeError (or sub-classes of those) as
appropriate to the boundary violation.
Called as:
boundary( value, property, client )
note that the basictypes.boundary module defines a number
of Boundary objects which are designed to be used in this
field of the property.
friendlyName -- user-friendly name for use in UIs and the like,
defaults to the current value of name
trueProperty -- if true, this property really does describe a
property, that is, a descriptor for an attribute which is
accessed using object.x notation.
if false, this property is used to interact with the
property system, but is not actually a property of an
object (for instance when the object is an old-style class
which cannot support properties, you can define virtual
properties for use with the class) The property system
can examine the value of trueProperty to determine whether
to use setattr(object,name,value) or call
property.__set__(object, value) to use the property.
Notes:
You can specify _any_ name=value set to store a value, so,
for instance, you could specify __get__ to override the
__get__ method, or similarly _getValue or getDefault.
Sub-classes may (and do) define extra name=value pairs to
support extended functionality. You will need to look at
the sub-class's documentation for details on other
significant attribute names.
"""
assert isinstance( name, (str,unicode)), """Property name is not a string or unicode value, was a %s"""%(type(name))
self.name = name
assert isinstance( documentation, (str,unicode)), """Property documentation is not a string or unicode value, was a %s"""%(type(documentation))
self.__doc__ = documentation
if namedarguments.has_key('defaultValue'):
namedarguments[ 'default' ] = defaults.DefaultValue(
namedarguments[ 'defaultValue' ]
)
del namedarguments[ 'defaultValue' ]
elif namedarguments.has_key('defaultFunction'):
namedarguments[ 'default' ] = defaults.DefaultCallable(
namedarguments[ 'defaultFunction' ]
)
del namedarguments[ 'defaultFunction' ]
elif namedarguments.has_key('default'):
current = namedarguments.get('default')
if not isinstance( current, defaults.Default ):
namedarguments['default' ] = defaults.DefaultValue(current)
propertied.Propertied.__init__( self, **namedarguments )
### Storage customisation points
def _getValue( self, client ):
"""Perform a low-level retrieval of the "raw" value for the client
The default implementation uses the property's name as a key
into the client's __dict__ attribute. Note that this will
fail with objects using __slots__.
"""
return client.__dict__[ self.name ]
def _setValue( self, client, value ):
"""Perform a low-level set of the "raw" value for the client
The default implementation sets the value using the property's
name as a key in the client's __dict__ attribute. Note that this
will fail with objects using __slots__.
"""
# if the client has a __setattr__, it isn't getting called here
# should we defer to it by default? I don't know...
client.__dict__[ self.name ] = value
def _delValue( self, client ):
"""Perform a low-level delete of the value for the client
The default implementation deletes the property's name
from the client's __dict__ attribute. Note that this will
fail with objects using __slots__.
"""
name = self.name
try:
del client.__dict__[ name ]
except KeyError:
raise AttributeError( '%r instance has no %r attribute'%(client.__class__.__name__, name) )
### Type and bounds-checking customisation points
def getFactories( self ):
"""Attempt to determine the factory callables for this property"""
baseType = self.getBaseType()
if baseType:
if hasattr( baseType, 'factories'):
if callable(baseType.factories):
return baseType.factories()
else:
return baseType.factories
elif callable( baseType ):
return [ baseType ]
return []
def getBaseType( self ):
"""Get our base-type object or None if not set"""
if isinstance(self.baseType, (str,unicode)):
from basictypes import latebind
self.baseType = latebind.bind( self.baseType )
return registry.getDT( self.baseType )
def getDataType( self ):
"""Get our data-type string"""
if hasattr( self, 'dataType' ):
return self.dataType
baseType = self.getBaseType()
if baseType:
if hasattr( baseType, 'dataType'):
self.dataType = value = baseType.dataType
return value
return ""
def coerce(self, client, value ):
"""Attempt to convert the given value to an appropriate data type
Tries to use the baseType's coerce function,
failing that, calls the base type with the
value as the first positional parameter.
"""
base = self.getBaseType()
if base:
if isinstance( value, base ):
return value
elif hasattr(base, 'coerce' ):
return base.coerce( value )
raise ValueError( """Couldn't automatically coerce value %r for property %s of object %s"""%(value, self, client))
return value
def check (self, client, value):
"""Use our basetype to check the coerced value's type
"""
base = self.getBaseType()
if base:
if hasattr(base, 'check' ):
return base.check( value )
elif not isinstance( value, base ):
return 0
return 1
### Type and bounds-checking machinery-enabling functions...
def __get__( self, client, klass=None ):
"""Retrieve the current value of the property for the client
This function provides the machinery for default value and
default function support. If the _getValue method raises
a KeyError or AttributeError, this method will attempt to
find a default value for the property using self.getDefault
"""
try:
if client is None:
pass
else:
return self._getValue( client )
except (KeyError, AttributeError):
return self.getDefault( client )
else:
# client was None
if klass:
return self
else:
raise TypeError( """__get__ called with None as client and class arguments, cannot retrieve""" )
def getDefault( self, client ):
"""Get the default value of this property for the given client
This simply calls the Default object registered as self.default,
which, depending on whether defaultValue or defaultFunction was
specified during initialisation, will return a value or the
result of a function call. If neither was specified, an
AttributeError will be raised.
"""
if not hasattr( self, 'default' ):
raise AttributeError( """Property %r doesn't define a default value for <%s object @ %s>"""%(self.name, type(client),id(client)))
value = self.default( self, client )
if self.setDefaultOnGet:
if self.trueProperty:
# allows __setattr__ hook to function,
# but it's seriously ugly :(
value = self.coerce( client, value )
assert self.check ( client, value ), """Value %r is not of the correct type even after coercian for property %s"""%( value, self )
if self.boundaries:
self.checkBoundaries( value, client )
setattr( client, self.name, value )
return value
else:
return self.__set__( client, value )
else:
value = self.coerce( client, value )
assert self.check ( client, value ), """Value %r is not of the correct type even after coercian for property %s"""%( value, self )
return value
def __set__( self, client, value ):
"""Set the current value of the property for the client
This function provides the machinery for coercion and
bounds checking. Before calling the _setValue method,
__set__ calls self.coerce( client, value ), with the return
value from the coercion becoming the value to be set.
Coercion may raise TypeError or ValueError exceptions,
and the application should be ready to catch these errors.
Once coercion is finished, __set__ calls
self.check( client, value ) to allow each boundary
condition to check the current value. The boundary
conditions may raise BoundaryErrors (with the particular
error classes generally being sub-classes of ValueError
or TypeError).
"""
value = self.coerce( client, value )
assert self.check ( client, value ), """Value %r is not of the correct type even after coercian for property %s"""%( value, self )
if self.boundaries:
self.checkBoundaries( value, client )
self._setValue( client, value )
return value
def __delete__( self, client ):
"""Delete the current value of the property for the client
At the moment, this method does nothing beyond calling
self._delValue( client ), as there does not appear to be
any common feature required from __delete__. The method is
here primarily to maintain the consistency of the interface
and allow for applications to override _delValue without
worrying about losing future features added to __delete__.
"""
return self._delValue( client )
def __repr__( self ):
"""Get a representation of this property object"""
return """<%s %s>"""%( self.__class__.__name__, repr(self.name) )
__str__ = __repr__
def getState( self, client ):
"""Helper for client.__getstate__, gets storable value for this property"""
return self._getValue(client)
def setState( self, client, value ):
"""Helper for client.__setstate__, sets storable value"""
return self._setValue( client, value )
def checkBoundaries( self, value, client=None ):
"""Check given value against boundaries"""
for boundary in self.boundaries:
boundary( value, self, client )
if propertyaccel:
for name in ('_getValue','__get__'):
setattr(
BasicProperty,
name,
new.instancemethod(
getattr(propertyaccel,name),
None,
BasicProperty,
)
)
class MethodStore( object ):
"""Mix-in class calling object methods for low-level storage/retrieval
This mix-in class provides three attributes, setMethod, getMethod
and delMethod. You use the class by setting these attributes to
the name of methods on your client with the following signatures:
getMethod( )
setMethod( value )
delMethod( )
You can set these attributes either by defining class-attributes
to override the MethodStore class attributes, or by passing the named
arguments "getMethod", "setMethod", "delMethod" to the property's
__init__ method. Note: these attributes are always strings, do not
use function or instance method objects!
If you have failed to provide one of the attributes, or have provided
a null value (""), a NotImplementedError will be raised when you
attempt to store/retrieve the value using that attribute.
"""
setMethod = ""
getMethod = ""
delMethod = ""
trueProperty = 0
def _getValue( self, client ):
"""Perform a low-level retrieval of the "raw" value for the client
This implementation forwards the request to
getattr( client, self.getMethod )()
An assert will be generated if self.getMethod is null
"""
assert self.getMethod, """MethodStore property %s hasn't specified a getMethod"""%(self)
return getattr( client, self.getMethod )()
def _setValue( self, client, value ):
"""Perform a low-level set of the "raw" value for the client
This implementation forwards the request to
getattr( client, self.setMethod )( value )
An assert will be generated if self.setMethod is null
"""
assert self.setMethod, """MethodStore property %s hasn't specified a setMethod"""%(self)
return getattr( client, self.setMethod )( value )
def _delValue( self, client ):
"""Perform a low-level delete of the value for the client
This implementation forwards the request to
getattr( client, self.delMethod )( )
An assert will be generated if self.delMethod is null
"""
assert self.delMethod, """MethodStore property %s hasn't specified a delMethod"""%(self)
return getattr( client, self.delMethod )( )
class MethodStoreProperty( MethodStore, BasicProperty ):
"""Class providing default MethodStore + BasicProperty operation"""
class Delegated( BasicProperty ):
"""BasicProperty which delegates its operation to another object
target -- property name used to retrieve the target (delegate)
targetName -- property name on the target to which we delegate
"""
setDefaultOnGet = 0
target = BasicProperty(
"target", """Target property name to which we delegate our operations""",
)
targetName = BasicProperty(
"targetName", """Name on the target to which we delegate""",
)
def _getValue( self, client ):
"""Perform a low-level retrieval of the "raw" value for the client
This implementation forwards the request to
getattr( client, self.getMethod )()
An assert will be generated if self.getMethod is null
"""
return getattr(
getattr(client,self.target),
getattr(self,'targetName',self.name)
)
def _setValue( self, client, value ):
"""Perform a low-level set of the "raw" value for the client
This implementation forwards the request to
getattr( client, self.setMethod )( value )
An assert will be generated if self.setMethod is null
"""
return setattr(
getattr(client,self.target),
getattr(self,'targetName',self.name),
value,
)
def _delValue( self, client ):
"""Perform a low-level delete of the value for the client
This implementation forwards the request to
getattr( client, self.delMethod )( )
An assert will be generated if self.delMethod is null
"""
return delattr(
getattr(client,self.target),
getattr(self,'targetName',self.name)
)
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,973 | eshikvtumane/basicproperty | refs/heads/master | /website/examples/boundaries.py | """Simple demo showing creation and use of boundary for string property"""
from basicproperty import propertied, common
def checkEmailAddress( address, property=None, client=None ):
try:
prefix, suffix = address.split( '@' )
except ValueError:
raise ValueError( """Require exactly one @ symbol in email addresses, got: %r"""%(address,))
else:
if not prefix or not suffix:
raise ValueError( """Require non-null username and domain in email addresses, got: %r"""%(address,))
class User( propertied.Propertied ):
email = common.StringProperty(
"email", """The user's email address""",
boundaries = (
checkEmailAddress,
),
)
if __name__ == "__main__":
good = [
'this@that.com',
'that.those@some.com',
]
bad = [
'this at that dot com',
'that.those.them',
'@somewhere.net',
]
for name in good:
User().email = name
for name in bad:
try:
User().email = name
except ValueError, err:
pass
else:
raise RuntimeError( """Should have rejected %r, didn't"""%(name,))
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,974 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/observable.py | """Demonstration of observable properties
This module provides Observable sub-classes of all of the
property types in the "common" module. It uses PyDispatcher
to send messages synchronously from the property whenever a
property value is set on a client.
"""
from dispatch import dispatcher
from basicproperty import basic, common
class Observable( basic.BasicProperty ):
"""Observable mix-in for sending PyDispatcher messages
We override __set__ and __delete__ to send messages, SET_SIGNAL and
DELETE_SIGNAL respectively. The messages are sent *after* setting,
from the property instance, with the parameter client, and "value"
for SET_SIGNAL. Note that you can customise the message sent
by passing the desired value as named arguments SET_SIGNAL and/or
DELETE_SIGNAL to the property constructor.
You can watch all events with code like this:
dispatcher.connect(
onSet,
signal = Model.propertyName.SET_SIGNAL,
)
dispatcher.connect(
onDel,
signal = Model.propertyName.DELETE_SIGNAL,
)
or only handle messages from a particular property like this:
dispatcher.connect(
onSet,
sender = Model.propertyName,
signal = Model.propertyName.SET_SIGNAL,
)
"""
SET_SIGNAL = "basicproperty.observable.set"
DELETE_SIGNAL = "basicproperty.observable.set"
def __set__( self, client, value ):
"""Override to send dispatcher messages on setting"""
value = super( Observable, self ).__set__( client, value )
try:
dispatcher.send(
signal = self.SET_SIGNAL,
sender = self,
value = value,
client = client,
)
except Exception, err:
# you'd actually want to log here...
pass
return value
def __delete__( self, client ):
"""Override to send dispatcher messages on deletion"""
value = super( Observable, self ).__delete__( client )
try:
dispatcher.send(
signal = self.DELETE_SIGNAL,
sender = self,
client = client,
)
except Exception, err:
# you'd actually want to log here...
pass
return value
# Now mirror all of the "common" types with observable versions...
for name in dir(common):
cls = getattr(common, name)
if isinstance( cls, type ) and issubclass( cls, basic.BasicProperty ):
newcls = type( name, (Observable,cls), {
'__doc__': 'Observable version of basicproperty.common.%s'%(name,),
})
exec "%s = newcls"%(name,)
if __name__ == "__main__":
from basicproperty import propertied
import random
class Model( propertied.Propertied ):
name = StringProperty(
"name", """The name of the object, for some purpose""",
defaultValue = "Uncle Tim",
)
age = IntegerProperty(
"age", """How old the object is, often just a guess""",
defaultFunction = lambda prop, client: random.randint( 1, 100),
)
values = IntegersProperty(
"values", """Set of observable integers""",
)
def onDel( sender, client ):
"""Process a change to a Model Object"""
print 'DEL', sender.name, client
def onSet( sender, client, value ):
"""Process value from *any* property"""
print 'SET', sender.name, client, value
dispatcher.connect(
onSet,
# we assume the default SET_SIGNAL here
signal = Observable.SET_SIGNAL,
)
dispatcher.connect(
onDel,
# we assume the default DELETE_SIGNAL here
signal = Observable.DELETE_SIGNAL,
)
m = Model()
m.name = 'Peters'
m.age # note we send a message, as the default is __set__ on retrieval
m.age = 18
m.values = [2,3,4,'8']
del m.values
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,975 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/standard/__init__.py | """Old pure-Python property types
Eventually the property types here will be
moved to the parent package as simple modules,
when support for their features are integrated.
"""
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,976 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/__init__.py | """Python 2.2 property sub-classes
This package provides a hierarchy of Python 2.2 property classes
for use in a wide range of applications.
Rationale:
[About Python 2.2 Properties]
The built in property type assumes that the user will define two or
three functions for every property to store, retrieve, and/or delete
the particular property. For example:
class X(object):
def seta( self, value ):
self.__dict__['an'] = int(value)
def geta( self):
return self.__dict__['an']
def dela( self ):
del self.__dict__['an']
a = property( geta,seta,dela,"A simple property a" )
In this approach, a property object is considered to be a way
of organizing "accessor methods" so that the accessor methods
will be triggered by standard python attribute access mechanisms.
This is obviously useful in many situations where particular
property values require custom code for each of their accessor
methods. Unfortunately, the approach tends to require considerable
extra code for every property so defined. Where properties
are highly customized and different from each other this is
acceptable, as the code would need to be written somewhere.
[About basicproperty Properties]
In many real world applications, however, most of the properties
defined are not radically different from other properties
within the application. Instead, the properties tend to share
most of their semantics and needs. Furthermore, there are common
patterns in property usage which tend to be shared across
applications.
The basicproperty package defines a framework for creating families
of properties for use in real world applications. These properties
provide basic implementations of most features needed for simple
data modeling applications, for instance:
bounds checking -- automatic checking of data values against
defined bounds objects, with appropriately raised errors
optional coercion -- allows you to define property classes
which can modify given data types to match the property's
required data types (particularly useful in GUI
applications)
optional method-name-based accessors -- to allow for functionality
similar to the built-in property type, while maintaining
the features of the basicproperty properties. This
functionality, however, allows for redefining client
accessor methods by simple subclassing and overriding.
optional default values -- either static or calculated for
use when a particular property is not currently defined
propertied object class -- an object sub-class explicitly
designed for use with basicproperty property objects
providing a default __init__ function to automatically
assign values to defined properties from named
arguments.
The end goal of the package is to reduce the amount of code needed
to write applications which want to model complex data types using
consistent property classes. In practice, this goal seems to be met,
though the early-alpha status of the project means that there are
still considerable areas for improvement.
"""
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,977 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_wx.py | import unittest
from wxPython.wx import *
from basicproperty import common, basic
from basicproperty.ext import wxtypes
from basictypes.wxtypes import pen as pen_module
class TestClass( object ):
col = wxtypes.ColourProperty(
"col", "Colour property",
)
pen = wxtypes.PenProperty(
"pen", "Pen property",
)
penStyle = basic.BasicProperty(
"penStyle", "Pen style property",
baseType = pen_module.PenStyle,
)
penCap = basic.BasicProperty(
"penCap", "Pen cap property",
baseType = pen_module.PenCap,
)
penJoin = basic.BasicProperty(
"penJoin", "Pen Join property",
baseType = pen_module.PenJoin,
)
DATATABLE = [ # attribute, testTable, badValues
('col', [
(
wxColour( 0,0,0),
[
'#000000',
'#0',
'black',
(0,0,0),
],
),
], [ # (bad value, error expected)
("a", ValueError),
((0,0,0,0), TypeError),
]),
("pen", [
(
wxtypes.PenProperty.baseType( "BLACK", 1, wxSOLID),
[
wxPen( "BLACK", 1, wxSOLID),
( "BLACK", 1, wxSOLID),
( "BLACK", 1, wxtypes.wxPenStyleProperty.baseType("wxSOLID")),
( "BLACK", 1, wxtypes.wxPenStyleProperty.baseType(wxSOLID)),
( ),
( "BLACK", 1),
( "BLACK",),
[ ],
{ 'colour':'#000000', 'style':wxSOLID},
],
),
], [ # (bad value, error expected)
]),
("penStyle", [
(
wxSOLID ,
[
wxSOLID,
'wxSOLID',
' wxSOLID ',
],
),
], [ # (bad value, error expected)
("a", ValueError),
(None, ValueError),
("wxJOIN_BEVEL", ValueError),
(wxJOIN_BEVEL, ValueError),
]),
("penCap", [
(
wxCAP_BUTT,
[
wxCAP_BUTT,
'wxCAP_BUTT',
' wxCAP_BUTT ',
],
),
], [ # (bad value, error expected)
("wxSOLID", ValueError),
]),
("penJoin", [
(
wxJOIN_BEVEL,
[
wxJOIN_BEVEL,
'wxJOIN_BEVEL',
' wxJOIN_BEVEL ',
],
),
], [ # (bad value, error expected)
("wxSOLID", ValueError),
]),
]
class PropertyTest( unittest.TestCase ):
def testCoerce( self ):
"""Test the whole DATATABLE set for coercion"""
object = TestClass()
for attribute, table, bad in DATATABLE:
for value, set in table:
for v in set:
setattr( object, attribute, v)
got = getattr( object, attribute )
assert got == value, """COERCE(%s):\n source:%r\n expected: %r\n got: %r"""%( attribute, v, value, got)
for value, err in bad:
try:
setattr(object, attribute, value)
except err:
continue
except Exception, e:
raise TypeError( """Wrong exc: set %(attribute)r to %(value)r\nExpected: %(err)s\nGot: %(e)s"""%(locals()))
else:
result = getattr( object, attribute)
import pdb
pdb.set_trace()
setattr(object, attribute, value)
raise TypeError( """No exc: set %(attribute)r to %(value)r\nExpected: %(err)r %(err)s\nValue:%(result)s"""%(locals()))
if __name__ == "__main__":
class TestApp( wxPySimpleApp ):
def OnInit( self ):
frame = wxFrame( None, -1, "Some frame")
self.SetTopWindow( frame )
unittest.main()
frame.Close()
app = TestApp()
app.MainLoop()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,978 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_classbyname.py | import unittest
from basicproperty import basic
from basictypes import boundary
from basicproperty.common import ClassByNameProperty
class TestClass( object ):
simple = ClassByNameProperty(
"simple", "Class stored as string and restored on access"
)
default = ClassByNameProperty(
"default", "Access with a default value",
defaultValue = basic.BasicProperty
)
default2 = ClassByNameProperty(
"default2", "Access with a default value",
defaultValue = "basicproperty.basic.BasicProperty",
)
class ClassPropertyTest( unittest.TestCase ):
def testStandard( self ):
"""Test standard get/set/del semantics"""
object = TestClass()
self.failUnless( not hasattr(object, 'simple' ), 'property is defined before setting')
object.simple = basic.BasicProperty
self.failUnless( hasattr(object, 'simple' ), 'class not stored on setattr')
self.failUnless( object.simple is basic.BasicProperty, 'class returned is not the same as given')
def testDefault( self ):
"""Test default value semantics"""
object = TestClass()
self.failUnless( hasattr(object, 'default' ), """default isn't available""")
self.failUnless( object.default is basic.BasicProperty, 'class returned is not the same as default, was %r'%(object.default))
object.default = boundary.Type
self.failUnless( object.default is boundary.Type, 'class returned is not the same as set, was %r'%(object.default))
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,979 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_factories.py | import unittest, time
from basicproperty import common, basic
from basictypes import factory, callable
def test( a = None ):
pass
class t( object ):
def test( self, a = None ):
pass
n = classmethod (test)
m = staticmethod (test)
def __call__(self):
pass
u = t()
class TestClass( object ):
arg = basic.BasicProperty(
"arg", "Single Argument",
baseType = callable.Argument,
)
args = basic.BasicProperty(
"args", "Multiple Arguments",
baseType = callable.listof_Arguments,
)
call = basic.BasicProperty(
'call', 'single callable',
baseType = callable.Callable,
)
DATATABLE = [ # attribute, testTable, badValues
('arg', [
(
callable.Argument( name="test" ),
[
"test",
("test",),
],
),
(
callable.Argument( name="test", default=2 ),
[
("test",2),
{'name':"test","default":2},
],
),
(
callable.Argument( name="test", default=2, baseType=str ),
[
("test",2,str),
{'name':"test","default":2,'baseType':str},
],
),
], [ # (bad value, error expected)
## ("a", ValueError),
## (None, TypeError),
## ("am", ValueError),
]),
('args', [
(
[callable.Argument( name="test" )],
[
["test"],
[("test",)],
],
),
(
[callable.Argument( name="test", default=2 )],
[
[("test",2)],
[{'name':"test","default":2}],
],
),
(
[callable.Argument( name="test", default=2, baseType=str )],
[
[("test",2,str)],
[{'name':"test","default":2,'baseType':str}],
],
),
], [ # (bad value, error expected)
## ("a", ValueError),
## (None, TypeError),
## ("am", ValueError),
]),
('call', [
(#regular function
callable.Callable(
name='test',
implementation=test,
arguments =[
callable.Argument(name='a',default=None)
]
),
[
test,
],
),
(# unbound instance method, self is an argument
callable.Callable(
name='test',
implementation=t.test,
arguments =[
callable.Argument(name='self'),
callable.Argument(name='a',default=None),
]
),
[
t.test,
],
),
(#classmethod, self is already curried
callable.Callable(
name='test',
implementation=t.n,
arguments =[
callable.Argument(name='a',default=None),
]
),
[
t.n,
],
),
(#staticmethod, self is an argument
callable.Callable(
name='test',
implementation=t.m,
arguments =[
callable.Argument(name='self'),
callable.Argument(name='a',default=None),
]
),
[
t.m,
],
),
(#instance method, self is not an argument
callable.Callable(
name='test',
implementation=u.n,
arguments =[
callable.Argument(name='a',default=None),
]
),
[
u.n,
],
),
(#callable instance, with zero arguments
callable.Callable(
name="<unknown>",
implementation=u,
arguments =[
]
),
[
u,
],
),
], [ # (bad value, error expected)
## ("a", ValueError),
## (None, TypeError),
## ("am", ValueError),
]),
]
class DTPropertyTest( unittest.TestCase ):
def testCoerce( self ):
"""Test the whole DATATABLE set for coercion"""
object = TestClass()
for attribute, table, bad in DATATABLE:
for value, set in table:
for v in set:
setattr( object, attribute, v)
got = getattr( object, attribute )
assert got == value, """COERCE(%s): %r -> %r failed, got %r"""%( attribute, v, value, got)
for value, err in bad:
try:
setattr(object, attribute, value)
except err:
continue
except Exception, e:
raise TypeError( """Wrong exc: set %(attribute)r to %(value)r\nExpected: %(err)s\nGot: %(e)s"""%(locals()))
else:
result = getattr( object, attribute)
import pdb
pdb.set_trace()
setattr(object, attribute, value)
raise TypeError( """No exc: set %(attribute)r to %(value)r\nExpected: %(err)r %(err)s\nValue:%(result)s"""%(locals()))
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,980 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/standard/dictionaryproperty.py | from basicproperty import basic
from basictypes import boundary, latebind
import weakref
class UncoercedDictionaryProperty (basic.BasicProperty):
"""Dictionary property object, with type checking"""
dataType = "dict"
baseType = dict
def default( self, property, client ):
return self.getBaseType()()
class DictionaryProperty (UncoercedDictionaryProperty):
"""Dictionary property object, with type checking and basic coercion"""
def coerce (self, client, value):
"""Coerce the value to/from dictionary value"""
base = self.getBaseType()
if base:
if isinstance( value, base ):
return value
elif hasattr(base, 'coerce' ):
return base.coerce( value )
return base( value )
return value
class WeakKeyDictionaryProperty(DictionaryProperty):
"""Weak-Key dictionary property object"""
dataType = "dict.weakkey"
boundaries = [ boundary.Type( weakref.WeakKeyDictionary )]
baseType = weakref.WeakKeyDictionary
def coerce (self, client, value):
"""Ensure that value is a weak-key dictionary
If the value is not already a dictionary or
WeakKeyDictionary, will go through DictionaryProperty's
coercion mechanism first.
"""
if not isinstance( value, (dict, weakref.WeakKeyDictionary)):
value = super (WeakKeyDictionaryProperty, self).coerce (
client, value
)
if isinstance( value, dict):
value = weakref.WeakKeyDictionary(value)
return value
class WeakValueDictionaryProperty(DictionaryProperty):
"""Weak-Value dictionary property object"""
dataType = "dict.weakvalue"
boundaries = [ boundary.Type( weakref.WeakValueDictionary )]
baseType = weakref.WeakValueDictionary
def coerce (self, client, value):
"""Ensure that value is a weak-value dictionary
If the value is not already a dictionary or
WeakKeyDictionary, will go through DictionaryProperty's
coercion mechanism first.
"""
if not isinstance( value, (dict, weakref.WeakValueDictionary)):
value = super (WeakValueDictionaryProperty, self).coerce (
client, value
)
if isinstance( value, dict):
value = weakref.WeakValueDictionary(value)
return value
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,981 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test.py | #! /usr/bin/env python2.2
"""Test everything in one go"""
import unittest
def moduleSuite( module ):
return unittest.TestLoader().loadTestsFromModule( module )
import test_basic
import test_boundary
import test_classbyname
import test_lists
import test_date
import test_factories
set = [
test_basic,
test_boundary,
test_classbyname,
test_lists,
test_date,
test_factories,
]
def buildSuite( ):
suite = unittest.TestSuite( [
moduleSuite( module )
for module in set
])
return suite
if __name__ == "__main__":
try:
import wx
except ImportError:
unittest.TextTestRunner(verbosity=2).run( buildSuite() )
else:
class TestApp( wx.PySimpleApp ):
def OnInit( self ):
wx.InitAllImageHandlers()
import test_wx
set.append( test_wx )
frame = wx.Frame( None, -1, "Some frame")
self.SetTopWindow( frame )
unittest.TextTestRunner(verbosity=2).run( buildSuite() )
frame.Close()
return False
app = TestApp()
app.MainLoop()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,982 | eshikvtumane/basicproperty | refs/heads/master | /basicproperty/tests/test_lists.py | import unittest
from basicproperty import common
class TestClass( object ):
strings = common.StringsProperty(
"strings", "documentation",
defaultValue = ('this','that','those',u'32',32,None),
)
ints = common.IntegersProperty(
"ints", "documentation",
defaultValue = ('32',42.0,u'32',32),
)
floats = common.FloatsProperty(
"floats", "documentation",
defaultValue = ('32',42.4,u'32',32),
)
booleans = common.BooleansProperty(
"booleans", "documentation",
defaultValue = ('null',0.0,1.0,'2','0',()),
)
class BasicPropertyTest( unittest.TestCase ):
def testStandard( self ):
"""Test standard get/set/del semantics"""
object = TestClass()
for attr,expected in [
("strings",[u'this',u'that',u'those',u'32',u'32',u'']),
('ints',[32,42,32,32]),
('floats',[32.0,42.4,32.0,32.0]),
('booleans',[0,0,1,1,0,0]),
]:
value = getattr(object, attr)
assert value == expected, """Didn't get expected for %s:\ngot: %r\nexp: %r"""%(attr, value, expected)
def testBad(self):
object = TestClass()
self.failUnlessRaises( TypeError, setattr, (object, 'ints', [ [32],[45]]) )
self.failUnlessRaises( TypeError, setattr, (object, 'strings', [ [32],[45]]), )
self.failUnlessRaises( TypeError, setattr, (object, 'floats', [ [32],[45]]), )
self.failUnlessRaises( TypeError, setattr, (object, 'booleans', [ [32],[45]]), )
if __name__ == "__main__":
unittest.main()
| {"/basicproperty/weak.py": ["/basicproperty/__init__.py"], "/basicproperty/standard/fileproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/contactinfo.py": ["/basicproperty/__init__.py"], "/basicproperty/xmlencoder.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_basic.py": ["/basicproperty/__init__.py"], "/basicproperty/ext/wxtypes.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_classbyname.py": ["/basicproperty/__init__.py", "/basicproperty/common.py"], "/basicproperty/standard/dictionaryproperty.py": ["/basicproperty/__init__.py"], "/basicproperty/tests/test_lists.py": ["/basicproperty/__init__.py"]} |
75,987 | esKemp/Cloud_QAOA | refs/heads/main | /ansatz/qaoa_plus.py | from qiskit import QuantumCircuit, Aer, execute
from utils.helper_funcs import *
from utils.graph_funcs import *
import scipy
import numpy as np
def construct_qaoa_plus(P, G, params, barriers=False, measure=False):
assert (len(params) == 2*P), "Number of parameters should be 2P"
nq = len(G.nodes())
circ = QuantumCircuit(nq, name='q')
# Initial state
circ.h(range(nq))
gammas = [param for i, param in enumerate(params) if i % 2 == 0]
betas = [param for i, param in enumerate(params) if i % 2 == 1]
for i in range(P):
# Phase Separator Unitary
for edge in G.edges():
q_i, q_j = edge
circ.rz(gammas[i] / 2, [q_i, q_j])
circ.cx(q_i, q_j)
circ.rz(-1 * gammas[i] / 2, q_j)
circ.cx(q_i, q_j)
if barriers:
circ.barrier()
# Mixing Unitary
for q_i in range(nq):
circ.rx(-2 * betas[i], q_i)
if measure:
circ.measure_all()
return circ
def expectation_value(counts, G, Lambda):
total_shots = sum(counts.values())
energy = 0
for bitstr, count in counts.items():
temp_energy = hamming_weight(bitstr)
for edge in G.edges():
q_i, q_j = edge
rev_bitstr = list(reversed(bitstr))
if rev_bitstr[q_i] == '1' and rev_bitstr[q_j] == '1':
temp_energy += -1 * Lambda
energy += count * temp_energy / total_shots
return energy
def solve_mis(P, G, Lambda):
backend = Aer.get_backend('qasm_simulator')
def f(params):
circ = construct_qaoa_plus(P, G, params, measure=True)
result = execute(circ, backend=backend, shots=8192).result()
counts = result.get_counts(circ)
return -1 * expectation_value(counts, G, Lambda)
init_params = np.random.uniform(low=0.0, high=2*np.pi, size=2*P)
out = scipy.optimize.minimize(f, x0=init_params, method='COBYLA')
return out
def get_ranked_probs(P, G, params, shots=8192):
circ = construct_qaoa_plus(P, G, params=params, measure=True)
result = execute(circ, backend=Aer.get_backend('qasm_simulator'), shots=shots).result()
counts = result.get_counts(circ)
probs = [(bitstr, counts[bitstr] / shots, is_indset(bitstr, G)) for bitstr in counts.keys()]
probs = sorted(probs, key=lambda p: p[1], reverse=True)
return probs
def get_approximation_ratio(out, P, G, shots=8192):
opt_mis = brute_force_search(G)[1]
circ = construct_qaoa_plus(P, G, params=out['x'], measure=True)
result = execute(circ, backend=Aer.get_backend('qasm_simulator'), shots=shots).result()
counts = result.get_counts(circ)
# Approximation ratio is computed using ONLY valid independent sets
# E(gamma, beta) = SUM_bitstrs { (bitstr_counts / total_shots) * hamming_weight(bitstr) } / opt_mis
numerator = 0
for bitstr, count in counts.items():
if is_indset(bitstr, G):
numerator += count * hamming_weight(bitstr) / shots
ratio = numerator / opt_mis
#ratio = sum([count * hamming_weight(bitstr) / shots for bitstr, count in counts.items() \
# if is_indset(bitstr, G)]) / opt_mis
return ratio
def top_strs(counts, G, top=5):
total_shots = sum(counts.values())
probs = [(bitstr, counts[bitstr] / total_shots) for bitstr in counts.keys()]
probs = sorted(probs, key=lambda p: p[1], reverse=True)
opt_mis = brute_force_search(G)[1]
for i in range(top):
ratio = hamming_weight(probs[i][0]) * probs[i][1] / opt_mis
print('{} ({}) -> {:.4f}%, Ratio = {:.4f}, Is MIS? {}'.format(probs[i][0], hamming_weight(probs[i][0]),
probs[i][1] * 100, ratio, is_indset(probs[i][0], G)))
| {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,988 | esKemp/Cloud_QAOA | refs/heads/main | /ansatz/dqv_ansatz.py | """
03/02/2021 - Teague Tomesh
The functions in the file are used to generate the Dynamic
Quantum Variational Ansatz (DQVA)
"""
from qiskit import QuantumCircuit, AncillaRegister
from qiskit.circuit import ControlledGate
from qiskit.circuit.library.standard_gates import XGate
from qiskit.transpiler.passes import Unroller
from qiskit.transpiler import PassManager
from utils.graph_funcs import *
from utils.helper_funcs import *
def apply_mixer(circ, alpha, init_state, G, barriers,
decompose_toffoli, mixer_order, verbose=0):
"""
Apply the mixer unitary U_M(alpha) to circ
Input
-----
circ : QuantumCircuit
The current ansatz
alpha : list[float]
The angle values of the parametrized gates
init_state : str
The current initial state for the ansatz, bits which are "1" are hit
with an X-gate at the beginning of the circuit and their partial mixers
are turned off. Bitstring is little-endian ordered.
G : NetworkX Graph
The graph we want to solve MIS on
barriers : int
An integer from 0 to 2, with 0 having no barriers and 2 having the most
decompose_toffoli : int
An integer from 0 to 2, selecting 0 with apply custom open-controlled
toffoli gates to the ansatz. 1 will apply equivalent gates but using
instead X-gates and regular-controlled toffolis. 2 unrolls these gates
to basis gates (but not relevant to this function).
WARNING Qiskit cannot simulate circuits with decompose_toffoli=0
mixer_order : list[int]
The order that the partial mixers should be applied in. For a list
such as [1,2,0,3] qubit 1's mixer is applied, then qubit 2's, and so on
verbose : int
0 is least verbose, 2 is most
"""
# Apply partial mixers V_i(alpha_i)
if mixer_order is None:
mixer_order = list(G.nodes)
if verbose > 0:
print('Mixer order:', mixer_order)
# Pad the given alpha parameters to account for the zeroed angles
pad_alpha = [None]*len(init_state)
next_alpha = 0
for qubit in mixer_order:
bit = list(reversed(init_state))[qubit]
if bit == '1' or next_alpha >= len(alpha):
continue
else:
pad_alpha[qubit] = alpha[next_alpha]
next_alpha += 1
if verbose > 0:
print('init_state: {}, alpha: {}, pad_alpha: {}'.format(init_state,
alpha, pad_alpha))
for qubit in mixer_order:
if pad_alpha[qubit] == None or not G.has_node(qubit):
# Turn off mixers for qubits which are already 1
continue
neighbors = list(G.neighbors(qubit))
anc_idx = 0
if verbose > 0:
print('qubit:', qubit, 'num_qubits =', len(circ.qubits),
'neighbors:', neighbors)
# construct a multi-controlled Toffoli gate, with open-controls on q's neighbors
# Qiskit has bugs when attempting to simulate custom controlled gates.
# Instead, wrap a regular toffoli with X-gates
ctrl_qubits = [circ.qubits[i] for i in neighbors]
if decompose_toffoli > 0:
# apply the multi-controlled Toffoli, targetting the ancilla qubit
for ctrl in ctrl_qubits:
circ.x(ctrl)
circ.mcx(ctrl_qubits, circ.ancillas[anc_idx])
for ctrl in ctrl_qubits:
circ.x(ctrl)
else:
mc_toffoli = ControlledGate('mc_toffoli', len(neighbors)+1, [],
num_ctrl_qubits=len(neighbors),
ctrl_state='0'*len(neighbors),
base_gate=XGate())
circ.append(mc_toffoli, ctrl_qubits + [circ.ancillas[anc_idx]])
# apply an X rotation controlled by the state of the ancilla qubit
circ.crx(2*pad_alpha[qubit], circ.ancillas[anc_idx], circ.qubits[qubit])
# apply the same multi-controlled Toffoli to uncompute the ancilla
if decompose_toffoli > 0:
for ctrl in ctrl_qubits:
circ.x(ctrl)
circ.mcx(ctrl_qubits, circ.ancillas[anc_idx])
for ctrl in ctrl_qubits:
circ.x(ctrl)
else:
circ.append(mc_toffoli, ctrl_qubits + [circ.ancillas[anc_idx]])
if barriers > 1:
circ.barrier()
def apply_phase_separator(circ, gamma, G):
"""
Apply a parameterized Z-rotation to every qubit
"""
for qb in G.nodes:
circ.rz(2*gamma, qb)
def gen_dqva(G, P=1, params=[], init_state=None, barriers=1, decompose_toffoli=1,
mixer_order=None, verbose=0):
nq = len(G.nodes)
# Step 1: Jump Start
# Run an efficient classical approximation algorithm to warm-start the optimization
if init_state is None:
init_state = '0'*nq
# Step 2: Mixer Initialization
# Select any one of the initial strings and apply two mixing unitaries separated by the phase separator unitary
dqva_circ = QuantumCircuit(nq, name='q')
# Add an ancilla qubit for implementing the mixer unitaries
anc_reg = AncillaRegister(1, 'anc')
dqva_circ.add_register(anc_reg)
#print('Init state:', init_state)
for qb, bit in enumerate(reversed(init_state)):
if bit == '1':
dqva_circ.x(qb)
if barriers > 0:
dqva_circ.barrier()
# parse the variational parameters
# The dqva ansatz dynamically turns off partial mixers for qubits in |1>
# and adds extra mixers to the end of the circuit
num_nonzero = nq - hamming_weight(init_state)
assert (len(params) == (nq + 1) * P), "Incorrect number of parameters!"
alpha_list = []
gamma_list = []
last_idx = 0
for p in range(P):
chunk = num_nonzero + 1
cur_section = params[p*chunk:(p+1)*chunk]
alpha_list.append(cur_section[:-1])
gamma_list.append(cur_section[-1])
last_idx = (p+1)*chunk
# Add the leftover parameters as extra mixers
alpha_list.append(params[last_idx:])
if verbose > 0:
for i in range(len(alpha_list)):
print('alpha_{}: {}'.format(i, alpha_list[i]))
if i < len(gamma_list):
print('gamma_{}: {}'.format(i, gamma_list[i]))
# Construct the dqva ansatz
#for alphas, gamma in zip(alpha_list, gamma_list):
for i in range(len(alpha_list)):
alphas = alpha_list[i]
apply_mixer(dqva_circ, alphas, init_state, G, barriers,
decompose_toffoli, mixer_order, verbose=verbose)
if barriers > 0:
dqva_circ.barrier()
if i < len(gamma_list):
gamma = gamma_list[i]
apply_phase_separator(dqva_circ, gamma, G)
if barriers > 0:
dqva_circ.barrier()
if decompose_toffoli > 1:
#basis_gates = ['x', 'cx', 'barrier', 'crx', 'tdg', 't', 'rz', 'h']
basis_gates = ['x', 'h', 'cx', 'crx', 'rz', 't', 'tdg', 'u1']
pass_ = Unroller(basis_gates)
pm = PassManager(pass_)
dqva_circ = pm.run(dqva_circ)
return dqva_circ
| {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,989 | esKemp/Cloud_QAOA | refs/heads/main | /utils/graph_funcs.py | import networkx as nx
def graph_from_file(fn):
with open(fn, 'r') as f:
edgelist = f.readline().split(',')
edges = []
for e in edgelist:
if '(' in e:
curedge = []
curedge.append(int(e.strip('( ')))
elif ')' in e:
curedge.append(int(e.strip(') ')))
edges.append(tuple(curedge))
G = nx.Graph()
G.add_edges_from(edges)
return G
def square_graph():
G = nx.Graph()
G.add_nodes_from(range(4))
edge_list = list(range(4)) + [0]
print(edge_list)
for i in range(len(edge_list)-1):
G.add_edge(edge_list[i], edge_list[i+1])
return G
def bowtie_graph():
G = nx.Graph()
G.add_nodes_from(range(7))
edge_list1 = list(range(4)) + [0]
edge_list2 = list(range(3,7)) + [3]
print(edge_list1)
print(edge_list2)
for edge_list in [edge_list1, edge_list2]:
for i in range(len(edge_list)-1):
G.add_edge(edge_list[i], edge_list[i+1])
return G
def test_graph(n, p):
G1 = nx.erdos_renyi_graph(n, p)
G2 = nx.erdos_renyi_graph(n, p)
# Make a combined graph using the two subgraphs
G = nx.Graph()
# Add nodes and edges from G1
G.add_nodes_from(G1.nodes)
G.add_edges_from(G1.edges)
# Add nodes and edges from G2
offset = len(G1.nodes)
g2_nodes = [n+offset for n in G2.nodes]
G.add_nodes_from(g2_nodes)
g2_edges = [(n1+offset, n2+offset) for n1, n2 in G2.edges]
G.add_edges_from(g2_edges)
# Connect the two subgraphs
G.add_edge(list(G1.nodes)[-1], list(G2.nodes)[0]+offset)
return G
def ring_graph(n):
G = nx.Graph()
G.add_nodes_from(list(range(n)))
edges = [(i, i+1) for i in range(n-1)] + [(n-1, 0)]
G.add_edges_from(edges)
return G
def view_partition(partition, G):
node_colors = []
for node in G.nodes:
if node in partition[0]:
node_colors.append('gold')
else:
node_colors.append('lightblue')
edge_colors = []
for edge in G.edges:
if (edge[0] in partition[0] and edge[1] in partition[1]) or \
(edge[0] in partition[1] and edge[1] in partition[0]):
edge_colors.append('red')
else:
edge_colors.append('black')
nx.draw_spring(G, with_labels=True, node_color=node_colors, edge_color=edge_colors)
def get_subgraphs(G, partition):
subgraphs = []
cut_edges = []
all_edges = G.edges
for subgraph_nodes in partition:
subG = nx.Graph()
subG.add_nodes_from(subgraph_nodes)
for v1, v2 in all_edges:
if v1 in subgraph_nodes and v2 in subgraph_nodes:
subG.add_edge(v1, v2)
if v1 in subgraph_nodes and v2 not in subgraph_nodes:
cut_edges.append((v1, v2))
subgraphs.append(subG)
return subgraphs, cut_edges
def is_indset(bitstr, G):
nodes = list(G.nodes)
ind_set = []
for idx, bit in enumerate(reversed(bitstr)):
if bit == '1':
cur_neighbors = list(G.neighbors(idx))
for node in ind_set:
if node in cur_neighbors:
return False
else:
ind_set.append(idx)
return True
| {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,990 | esKemp/Cloud_QAOA | refs/heads/main | /utils/helper_funcs.py | from utils.graph_funcs import is_indset
def strip_ancillas(counts, circ):
num_anc = len(circ.ancillas)
new_counts = {}
for key in counts:
new_counts[key[num_anc:]] = counts[key]
return new_counts
def hamming_weight(bitstr):
return sum([1 for bit in bitstr if bit == '1'])
def gen_binary_str(n, bitstr, ret):
"""
Generate all binary strings of length n
"""
if n > 0:
gen_binary_str(n-1, bitstr + '0', ret)
gen_binary_str(n-1, bitstr + '1', ret)
else:
ret.append(bitstr)
return ret
def brute_force_search(G, lim=None):
num_nodes = len((list(G.nodes)))
bitstrs = gen_binary_str(num_nodes, '', [])
if lim is not None:
bitstrs = [b for b in bitstrs if hamming_weight(b) >= lim]
best_hamming_weight = 0
for bitstr in bitstrs:
if is_indset(bitstr, G) and hamming_weight(bitstr) > best_hamming_weight:
best_hamming_weight = hamming_weight(bitstr)
best_strs = [b for b in bitstrs if hamming_weight(b) == best_hamming_weight \
and is_indset(b, G)]
return best_strs, best_hamming_weight
| {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,991 | esKemp/Cloud_QAOA | refs/heads/main | /ansatz/qaoa.py | import numpy as np
import scipy
from qiskit import QuantumCircuit, AncillaRegister, Aer, execute
from qiskit.circuit import ControlledGate
from qiskit.circuit.library.standard_gates import XGate
from qiskit.transpiler.passes import Unroller
from qiskit.transpiler import PassManager
from utils.graph_funcs import *
from utils.helper_funcs import *
def apply_mixer(circ, G, beta, anc_idx, barriers, decompose_toffoli,
mixer_order, verbose=0):
# apply mixers U_M(beta)
# Randomly permute the order of the mixing unitaries
if mixer_order is None:
mixer_order = list(G.nodes)
if verbose > 0:
print('Mixer order:', mixer_order)
for qubit in mixer_order:
neighbors = list(G.neighbors(qubit))
if verbose > 0:
print('qubit:', qubit, 'num_qubits =', len(circ.qubits),
'neighbors:', neighbors)
# Construct a multi-controlled Toffoli gate, with open-controls on q's neighbors
# Qiskit has bugs when attempting to simulate custom controlled gates.
# Instead, wrap a regular toffoli with X-gates
# Apply the multi-controlled Toffoli, targetting the ancilla qubit
ctrl_qubits = [circ.qubits[i] for i in neighbors]
if decompose_toffoli > 0:
for ctrl in ctrl_qubits:
circ.x(ctrl)
circ.mcx(ctrl_qubits, circ.ancillas[anc_idx])
for ctrl in ctrl_qubits:
circ.x(ctrl)
else:
mc_toffoli = ControlledGate('mc_toffoli', len(neighbors)+1, [],
num_ctrl_qubits=len(neighbors),
ctrl_state='0'*len(neighbors),
base_gate=XGate())
circ.append(mc_toffoli, ctrl_qubits + [circ.ancillas[anc_idx]])
# apply an X rotation controlled by the state of the ancilla qubit
circ.crx(2*beta, circ.ancillas[anc_idx], circ.qubits[qubit])
# apply the same multi-controlled Toffoli to uncompute the ancilla
if decompose_toffoli > 0:
for ctrl in ctrl_qubits:
circ.x(ctrl)
circ.mcx(ctrl_qubits, circ.ancillas[anc_idx])
for ctrl in ctrl_qubits:
circ.x(ctrl)
else:
circ.append(mc_toffoli, ctrl_qubits + [circ.ancillas[anc_idx]])
if barriers > 1:
circ.barrier()
def apply_phase_separator(circ, gamma, G):
for qb in G.nodes:
circ.rz(2*gamma, qb)
def gen_qaoa(G, P, params=[], init_state=None, barriers=1, decompose_toffoli=1,
mixer_order=None, verbose=0, measure=False):
nq = len(G.nodes)
# Step 1: Jump Start
if init_state is None:
# for now, select the all zero state
init_state = '0'*nq
# Step 2: Mixer Initialization
qaoa_circ = QuantumCircuit(nq, name='q')
# Add an ancilla qubit for implementing the mixer unitaries
anc_reg = AncillaRegister(1, 'anc')
qaoa_circ.add_register(anc_reg)
if init_state == 'W':
# Prepare the |W> initial state
W_vector = np.zeros(2**nq)
for i in range(len(W_vector)):
bitstr = '{:0{}b}'.format(i, nq)
if hamming_weight(bitstr) == 1:
W_vector[i] = 1 / np.sqrt(nq)
qaoa_circ.initialize(W_vector, qaoa_circ.qubits[:-1])
else:
for qb, bit in enumerate(reversed(init_state)):
if bit == '1':
qaoa_circ.x(qb)
if barriers > 0:
qaoa_circ.barrier()
# parse the variational parameters
assert (len(params) == 2*P),"Incorrect number of parameters!"
betas = [a for i, a in enumerate(params) if i % 2 == 0]
gammas = [a for i, a in enumerate(params) if i % 2 == 1]
if verbose > 0:
print('betas:', betas)
print('gammas:', gammas)
for beta, gamma in zip(betas, gammas):
anc_idx = 0
apply_mixer(qaoa_circ, G, beta, anc_idx, barriers, decompose_toffoli,
mixer_order, verbose=verbose)
if barriers > 0:
qaoa_circ.barrier()
apply_phase_separator(qaoa_circ, gamma, G)
if barriers > 0:
qaoa_circ.barrier()
if decompose_toffoli > 1:
#basis_gates = ['x', 'cx', 'barrier', 'crx', 'tdg', 't', 'rz', 'h']
basis_gates = ['x', 'h', 'cx', 'crx', 'rz', 't', 'tdg', 'u1']
pass_ = Unroller(basis_gates)
pm = PassManager(pass_)
qaoa_circ = pm.run(qaoa_circ)
if measure:
qaoa_circ.measure_all()
return qaoa_circ
def expectation_value(counts, G, Lambda):
total_shots = sum(counts.values())
energy = 0
for bitstr, count in counts.items():
temp_energy = hamming_weight(bitstr)
for edge in G.edges():
q_i, q_j = edge
rev_bitstr = list(reversed(bitstr))
if rev_bitstr[q_i] == '1' and rev_bitstr[q_j] == '1':
temp_energy += -1 * Lambda
energy += count * temp_energy / total_shots
return energy
def solve_mis(P, G, Lambda, if_noise, num_rounds, optimizer = 'Nelder-Mead', coupling_map=None, basis_gates=None, noise_model=None):
best_out = {}
best_out['x'] = np.random.uniform(low=0.0, high=2*np.pi, size=2*P)
for iter in range(num_rounds):
backend = Aer.get_backend('qasm_simulator')
def f(params):
circ = gen_qaoa(G, P, params, measure=True)
if if_noise:
result = execute(circ, backend=backend, shots=8192, coupling_map=coupling_map, basis_gates=basis_gates, noise_model=noise_model).result()
else:
result = execute(circ, backend=backend, shots=8192).result()
counts = result.get_counts(circ)
return -1 * expectation_value(counts, G, Lambda)
init_params = np.random.uniform(low=0.0, high=2*np.pi, size=2*P)
# out = scipy.optimize.minimize(f, x0=init_params, method='COBYLA')
out = scipy.optimize.minimize(f, x0=init_params, method=optimizer)
# nelder mead -- search up scipy abbreviation for that
if iter == 0:
best_out = out
else:
if out['fun'] < best_out['fun']:
best_out = out
# if f(out['x']) < f(best_out['x']):
# best_out = out
return best_out | {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,992 | esKemp/Cloud_QAOA | refs/heads/main | /ansatz/qls_ansatz.py | """
01/30/2021 - Teague Tomesh
The functions in the file are used to generate the Quantum
Local Search Ansatz (QLSA)
"""
from qiskit import QuantumCircuit, AncillaRegister
from qiskit.circuit import ControlledGate
from qiskit.circuit.library.standard_gates import XGate
from qiskit.transpiler.passes import Unroller
from qiskit.transpiler import PassManager
from utils.graph_funcs import *
from utils.helper_funcs import *
def apply_mixer(circ, alpha, init_state, G, barriers,
decompose_toffoli, mixer_order, verbose=0):
"""
Apply the mixer unitary U_M(alpha) to circ
Input
-----
circ : QuantumCircuit
The current ansatz
alpha : list[float]
The angle values of the parametrized gates
init_state : str
The current initial state for the ansatz, bits which are "1" are hit
with an X-gate at the beginning of the circuit and their partial
mixers are turned off. Bitstring is little-endian ordered.
G : NetworkX Graph
The graph we want to solve MIS on
barriers : int
An integer from 0 to 2, with 0 having no barriers and 2 having the most
decompose_toffoli : int
An integer from 0 to 2, selecting 0 with apply custom open-controlled
toffoli gates to the ansatz. 1 will apply equivalent gates but using
instead X-gates and regular-controlled toffolis. 2 unrolls these gates
to basis gates (but not relevant to this function).
WARNING Qiskit cannot simulate circuits with decompose_toffoli=0
mixer_order : list[int]
The order that the partial mixers should be applied in. For a list
such as [1,2,0,3] qubit 1's mixer is applied, then qubit 2's, and so on
verbose : int
0 is least verbose, 2 is most
"""
# apply partial mixers V_i(alpha_i)
if mixer_order is None:
mixer_order = list(G.nodes)
if verbose > 0:
print('Mixer order:', mixer_order)
# Pad the given alpha parameters to account for the zeroed angles
pad_alpha = [None]*len(init_state)
next_alpha = 0
for qubit in mixer_order:
bit = list(reversed(init_state))[qubit]
if bit == '1' or next_alpha >= len(alpha):
continue
else:
pad_alpha[qubit] = alpha[next_alpha]
next_alpha += 1
if verbose > 0:
print('init_state: {}, alpha: {}, pad_alpha: {}'.format(init_state,
alpha, pad_alpha))
anc_idx = 0
for qubit in mixer_order:
if pad_alpha[qubit] == None or not G.has_node(qubit):
# Turn off mixers for qubits which are already 1
continue
neighbors = list(G.neighbors(qubit))
if verbose > 0:
print('qubit:', qubit, 'num_qubits =', len(circ.qubits),
'neighbors:', neighbors)
# construct a multi-controlled Toffoli gate, with open-controls on q's neighbors
# Qiskit has bugs when attempting to simulate custom controlled gates.
# Instead, wrap a regular toffoli with X-gates
ctrl_qubits = [circ.qubits[i] for i in neighbors]
if decompose_toffoli > 0:
# apply the multi-controlled Toffoli, targetting the ancilla qubit
for ctrl in ctrl_qubits:
circ.x(ctrl)
circ.mcx(ctrl_qubits, circ.ancillas[anc_idx])
for ctrl in ctrl_qubits:
circ.x(ctrl)
else:
mc_toffoli = ControlledGate('mc_toffoli', len(neighbors)+1, [], num_ctrl_qubits=len(neighbors),
ctrl_state='0'*len(neighbors), base_gate=XGate())
circ.append(mc_toffoli, ctrl_qubits + [circ.ancillas[anc_idx]])
# apply an X rotation controlled by the state of the ancilla qubit
circ.crx(2*pad_alpha[qubit], circ.ancillas[anc_idx], circ.qubits[qubit])
# apply the same multi-controlled Toffoli to uncompute the ancilla
if decompose_toffoli > 0:
for ctrl in ctrl_qubits:
circ.x(ctrl)
circ.mcx(ctrl_qubits, circ.ancillas[anc_idx])
for ctrl in ctrl_qubits:
circ.x(ctrl)
else:
circ.append(mc_toffoli, ctrl_qubits + [circ.ancillas[anc_idx]])
if barriers > 1:
circ.barrier()
def apply_phase_separator(circ, gamma, G):
"""
Apply a parameterized Z-rotation to every qubit
"""
for qb in G.nodes:
circ.rz(2*gamma, qb)
def gen_qlsa(G, P=1, params=[], init_state=None, barriers=1, decompose_toffoli=1,
mixer_order=None, verbose=0, param_lim=None):
"""
Generate and return the Quantum Local Search Ansatz (QLSA)
Input
-----
G : NetworkX Graph
The graph we want to solve MIS on
P : int
Controls the number of unitary layers to apply. If param_lim is an
integer this P value will be ignored
params : list[float]
The angles of the parameterized gates
init_state : str
The initial state
barriers : int
Integer from 0 (no barriers) to 2 (most barriers)
decompose_toffoli : int
An integer from 0 to 2, selecting 0 with apply custom open-controlled
toffoli gates to the ansatz. 1 will apply equivalent gates but using
instead X-gates and regular-controlled toffolis. 2 unrolls these gates
to basis gates (but not relevant to this function).
WARNING Qiskit cannot simulate circuits with decompose_toffoli=0
mixer_order : list[int]
The order that the partial mixers should be applied in. For a list
such as [1,2,0,3] qubit 1's mixer is applied, then qubit 2's, and so on
verbose : int
0 is least verbose, 2 is most
param_lim : int
Controls the number of parameters allowed in the qls. If this is set,
then the length of params should match and the P value will also be
ignored
Output
------
QuantumCircuit
"""
nq = len(G.nodes)
# Step 1: Jump Start
# Run an efficient classical approximation algorithm to warm-start the optimization
if init_state is None:
init_state = '0'*nq
# Step 2: Mixer Initialization
qls_circ = QuantumCircuit(nq, name='q')
# Add an ancilla qubit for implementing the mixer unitaries
anc_reg = AncillaRegister(1, 'anc')
qls_circ.add_register(anc_reg)
#print('Init state:', init_state)
for qb, bit in enumerate(reversed(init_state)):
if bit == '1':
qls_circ.x(qb)
if barriers > 0:
qls_circ.barrier()
# check the number of variational parameters
num_nonzero = nq - hamming_weight(init_state)
if param_lim is None:
num_params = min(P * (nq + 1), (P+1) * (num_nonzero + 1))
else:
num_params = param_lim
assert (len(params) == num_params),"Incorrect number of parameters!"
# parse the given parameter list into alphas (for the mixers) and
# gammas (for the drivers)
alpha_list = []
gamma_list = []
param_index = 0
while param_index < len(params):
if param_index == 0:
gamma_list.append(params[param_index])
param_index += 1
need_new_driver = False
elif num_params - param_index >= num_nonzero:
alpha_list.append(params[param_index:param_index+num_nonzero])
param_index += num_nonzero
if param_index < len(params) and need_new_driver:
gamma_list.append(params[param_index])
param_index += 1
need_new_driver = True
elif num_params - param_index < num_nonzero:
alpha_list.append(params[param_index:])
param_index += len(params[param_index:])
if verbose > 0:
for i in range(len(alpha_list)):
print('alpha_{}: {}'.format(i, alpha_list[i]))
if i < len(gamma_list):
print('gamma_{}: {}'.format(i, gamma_list[i]))
# Apply alternating layers of mixer and driver unitaries
for i in range(len(alpha_list)):
alphas = alpha_list[i]
apply_mixer(qls_circ, alphas, init_state, G, barriers,
decompose_toffoli, mixer_order, verbose=verbose)
if barriers == 1:
qls_circ.barrier()
if i < len(gamma_list):
gamma = gamma_list[i]
apply_phase_separator(qls_circ, gamma, G)
if barriers == 1:
qls_circ.barrier()
if decompose_toffoli > 1:
basis_gates = ['x', 'h', 'cx', 'crx', 'rz', 't', 'tdg', 'u1']
pass_ = Unroller(basis_gates)
pm = PassManager(pass_)
qls_circ = pm.run(qls_circ)
return qls_circ
| {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,993 | esKemp/Cloud_QAOA | refs/heads/main | /QAO_Ansatz_HyperParam_Code.py | import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import csv
import qiskit.providers.aer.noise as noise
from qiskit import IBMQ, Aer
from qiskit import *
from qiskit.visualization import plot_histogram
from qiskit.providers.aer.noise import NoiseModel
import networkx as nx
from ansatz import qaoa
matplotlib.rc('xtick', labelsize=18)
matplotlib.rc('ytick', labelsize=18)
plt.rcParams["font.family"] = "Times New Roman"
# Get 2 qubit error probability and optimizer from command line arguments
prob2 = float(sys.argv[1])
optimizer = sys.argv[2]
# Construct square graph
G = nx.Graph()
G.add_nodes_from([0,1,2,3])
G.add_edges_from([(0,1),(1,2),(2,3),(3,1)])
# Construct idealized probability distribution
prob_standard = [('00101', 0.99951171875),
('01010', 0.000244140625),
('00001', 0.0001220703125),
('00010', 0.0001220703125)]
# Construct data class for hyperparameter search
from dataclasses import dataclass
@dataclass
class prob_info:
Lambdas: list
Ps: list
optimizer: str
num_rounds: int
isNoise: bool
prob_1: float
prob_2: float
probs: dict = None
SSO_vals: dict = None
# Define function to compute prob distribution
def calculateProbs(probs_specs):
basis_gates = None
noise_model = None
if probs_specs.isNoise:
# Depolarizing quantum errors
error_1 = noise.depolarizing_error(probs_specs.prob_1, 1)
error_2 = noise.depolarizing_error(probs_specs.prob_2, 2)
# Add errors to noise model
noise_model = noise.NoiseModel()
noise_model.add_all_qubit_quantum_error(error_1, ['u1', 'u2', 'u3'])
noise_model.add_all_qubit_quantum_error(error_2, ['cx'])
basis_gates = noise_model.basis_gates
probs = {}
for Lambda in probs_specs.Lambdas:
for P in probs_specs.Ps:
for (coupling_map, basis_gates, noise_model) in [(None, basis_gates, noise_model)]:
print(str(Lambda) + ", " + str(P) + ", " + str(probs_specs.isNoise))
out = qaoa.solve_mis(P, G, Lambda, probs_specs.isNoise, probs_specs.num_rounds, probs_specs.optimizer, coupling_map, basis_gates, noise_model)
print("Computed Out")
circ = qaoa.gen_qaoa(G, P, params=out['x'], barriers=False, measure=True)
# result = execute(circ, backend=backend, shots=8192).result()
# result = execute(circ, backend=Aer.get_backend('qasm_simulator'), shots=8192, coupling_map=coupling_map, basis_gates=basis_gates,
# noise_model=noise_model).result()
result = execute(circ, backend=Aer.get_backend('qasm_simulator'), shots=8192, basis_gates=basis_gates, noise_model=noise_model).result()
counts = result.get_counts(circ)
total_shots = sum(counts.values())
prob = [(bitstr, counts[bitstr] / total_shots) for bitstr in counts.keys()]
prob = sorted(prob, key=lambda p: p[1], reverse=True)
print("Computed Prob")
probs[Lambda, P, probs_specs.isNoise] = prob
return probs
# Define functions to compute SSO vals
def convert_Dict(probs):
probDict = {}
for (string, num) in probs:
probDict[string] = num
return probDict
def SSO(probs1, probs2):
probs1 = convert_Dict(probs1)
probs2 = convert_Dict(probs2)
sso_squared = 0
for string in list(probs1.keys()):
if string in probs1 and string in probs2:
sso_squared += (probs1[string]**0.5)*(probs2[string]**0.5)
return sso_squared**0.5
def SSO_val(prob_dict, probs_std, Lambdas, Ps, Noises):
return 4
def SSO_val(probs_specs, probs_std):
SSO_vals = {}
for Lambda in probs_specs.Lambdas:
for P in probs_specs.Ps:
SSO_vals[Lambda, P, probs_specs.isNoise] = SSO(probs_std, probs_specs.probs[Lambda, P, probs_specs.isNoise])
return SSO_vals
# Compute the prob distribution and SSO vals for three different optimizers
probs_vals = {}
for prob1 in [0.001, 0.0001, 0.00001, 0.000001, 0.0000001]:
probInput = prob_info([0,1,2,3,4,6,8,10,20,50], [3], optimizer, 20, True, prob1, prob2)
prob = calculateProbs(probInput)
probInput.probs = prob
sso_vals = SSO_val(probInput, prob_standard)
probInput.SSO_vals = sso_vals
probs_vals[prob1, prob2, optimizer] = probInput
# Print results in a csv file
file_name = "probs_" + str(prob2) + "_" + str(optimizer) + ".csv"
a_file = open(file_name, "w")
writer = csv.writer(a_file)
for key, value in probs_vals.items():
writer.writerow([key, value])
a_file.close() | {"/ansatz/qaoa_plus.py": ["/utils/helper_funcs.py", "/utils/graph_funcs.py"], "/ansatz/dqv_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/utils/helper_funcs.py": ["/utils/graph_funcs.py"], "/ansatz/qaoa.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"], "/ansatz/qls_ansatz.py": ["/utils/graph_funcs.py", "/utils/helper_funcs.py"]} |
75,995 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /mainRotate.py | import reactor
import atom
import copy
def genEmptyMap():
return [["E" for Y in range(0,8)] for X in range(0,10)]
r = reactor.Reactor()
r.setVerbose(True)
blueArrowMap = genEmptyMap()
redArrowMap = genEmptyMap()
blueActionMap = genEmptyMap()
redActionMap = genEmptyMap()
# gen of puzzle
redArrowMap[4][0] = "U"
redArrowMap[4][7] = "L"
redArrowMap[2][7] = "D"
redArrowMap[2][5] = "R"
redArrowMap[3][5] = "U"
redArrowMap[3][6] = "L"
redArrowMap[1][6] = "D"
redArrowMap[1][4] = "R"
redArrowMap[7][4] = "U"
redArrowMap[7][5] = "R"
redArrowMap[8][5] = "D"
redArrowMap[8][0] = "L"
redActionMap[4][0] = "SU"
redActionMap[4][1] = "SY"
redActionMap[4][4] = "GR"
redActionMap[4][5] = "ROCC"
redActionMap[4][6] = "INA"
redActionMap[2][6] = "B+"
redActionMap[2][5] = "ROCC"
redActionMap[3][5] = "INA"
redActionMap[7][5] = "DR"
redActionMap[8][4] = "OUTY"
redActionMap[5][0] = "SY"
blueArrowMap[1][0] = "U"
blueArrowMap[1][3] = "L"
blueArrowMap[0][3] = "U"
blueArrowMap[0][7] = "R"
blueArrowMap[2][7] = "D"
blueArrowMap[2][5] = "R"
blueArrowMap[3][5] = "U"
blueArrowMap[3][6] = "L"
blueArrowMap[1][6] = "D"
blueArrowMap[1][4] = "R"
blueArrowMap[7][4] = "D"
blueArrowMap[7][0] = "L"
blueActionMap[1][0] = "SU"
blueActionMap[1][1] = "INB"
blueActionMap[1][2] = "GR"
blueActionMap[1][7] = "INA"
blueActionMap[2][6] = "B+"
blueActionMap[2][5] = "ROCC"
blueActionMap[3][5] = "INA"
blueActionMap[4][4] = "DR"
blueActionMap[5][4] = "SY"
blueActionMap[2][0] = "SY"
#Input A
a = atom.Atom("H",False)
a.setLocation(1,6)
#Input B
b = atom.Atom("C",False)
b.setLocation(1,2)
#Output Y
aa = atom.Atom("H",False)
bb = atom.Atom("H",False)
cc = atom.Atom("H",False)
dd = atom.Atom("H",False)
ee = atom.Atom("C",False)
aa.bindWith(ee,"D")
bb.bindWith(ee,"U")
cc.bindWith(ee,"L")
dd.bindWith(ee,"R")
r.setMolecule("A",a)
r.setMolecule("B",b)
r.setMolecule("Y",aa)
r.setGoal("Y",10)
r.setArrowMapBlue(blueArrowMap)
r.setActionMapBlue(blueActionMap)
r.setArrowMapRed(redArrowMap)
r.setActionMapRed(redActionMap)
r.setBonders([[1,6],[2,6]])
for i in range(1,1000):
if(r.doCycle(i)):
print("Reactor has crash")
quit()
if(r.isFinished()):
print("Reactor has completed is goal in " + str(i) + " cycles")
quit()
| {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
75,996 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /reactor.py | import waldo
import atom
import copy
import collections
class Reactor:
def __init__(self):
self.blueWaldo = waldo.Waldo("Blue", self)
self.redWaldo = waldo.Waldo("Red", self)
self.atomlist = []
self.atomMap = 0
self.moleculeA = []
self.moleculeB = []
self.moleculeY = 0
self.moleculeW = 0
self.verbose = False
self.goalY = 0
self.goalW = 0
self.doneY = 0
self.doneW = 0
self.bonders = []
self.sensor = []
self.splitter = []
self.fuser = []
self.tunnel=[]
self.activateBigOutput = False
self.sequenceA = []
self.sequenceB = []
self.originalSequenceA = []
self.originalSequenceB = []
def setBonders(self, bonders):
self.bonders = bonders
def setFuser(self, fuser):
self.fuser = fuser
def setSplitter(self, splitter):
self.splitter = splitter
def setTunnel(self, tunnels):
self.tunnel = tunnels
def setSensor(self, sensor):
self.sensor = sensor
def activateDoubleOutput(self):
self.activateBigOutput = True
def setVerbose(self,verbose):
self.verbose = verbose
self.blueWaldo.setVerbose(verbose)
self.redWaldo.setVerbose(verbose)
def setGoal(self, location, goal):
if(location == "Y"):
self.goalY = goal
elif(location == "W"):
self.goalW = goal
def setSenseList(self, senseList):
self.redWaldo.setSenseList(senseList)
self.blueWaldo.setSenseList(senseList)
def isFinished(self):
return (self.doneY >= self.goalY and self.doneW >= self.goalW)
def setArrowMapBlue(self, map):
return self.blueWaldo.setArrowMap(map)
def setActionMapBlue(self, map):
return self.blueWaldo.setActionMap(map)
def setArrowMapRed(self, map):
return self.redWaldo.setArrowMap(map)
def setActionMapRed(self, map):
return self.redWaldo.setActionMap(map)
def doCycle(self, cycle):
if(self.verbose):
print("Cycle : "+ str(cycle))
for i in range(0,10):
redOut = self.redWaldo.move(cycle+(i/10.0))
blueOut = self.blueWaldo.move(cycle+(i/10.0))
if(redOut or blueOut or self.checkCollision(i==9)):
return True
self.redWaldo.setDirection()
self.blueWaldo.setDirection()
redOut = self.redWaldo.doAction(cycle)
if(self.checkCollision(True)):
return True
blueOut = self.blueWaldo.doAction(cycle)
if(redOut or blueOut or self.checkCollision(True)):
return True
self.redWaldo.printState()
self.blueWaldo.printState()
if(self.verbose):
self.printAtoms()
def checkCollision(self, isSecondHalf):
if(isSecondHalf):
atomMap = [["E" for Y in range(0,8)] for X in range(0,10)]
else:
return self.checkCollisionWithDistance()
for atom in self.atomlist:
atom.roundLocation()
if(atomMap[int(atom.X)][int(atom.Y)] == "E"):
atomMap[int(atom.X)][int(atom.Y)] = atom
else:
#Two atoms are at the same location
return True
self.atomMap = atomMap
return False
def generateAtomMap(self):
self.atomMap = [["E" for Y in range(0,8)] for X in range(0,10)]
for atom in self.atomlist:
self.atomMap[int(atom.X)][int(atom.Y)] = atom
def checkCollisionWithDistance(self):
tempAtomList = copy.copy(self.atomlist)
for atom in tempAtomList:
tempAtomList.remove(atom)
for otherAtom in tempAtomList:
collision = atom.checkCollisionWithDistance(otherAtom)
if(collision):
return True
return False
def hasAtomAtLocation(self,x,y):
if(self.atomMap[x][y] == "E"):
return False
return True
def getAtomAtLocation(self,x,y):
return self.atomMap[x][y]
def addMolecule(self, location, molecule):
if(location == "A"):
self.moleculeA.append(molecule)
elif(location == "B"):
self.moleculeB.append(molecule)
def setSequence(self,location,sequence):
if(location == "A"):
self.sequenceA = sequence
self.originalSequenceA = copy.copy(sequence)
elif(location == "B"):
self.sequenceB = sequence
self.originalSequenceB = copy.copy(sequence)
def setMolecule(self,location,molecule):
if(location == "A"):
self.moleculeA = molecule
elif(location == "B"):
self.moleculeB = molecule
elif(location == "Y"):
self.moleculeY = molecule.getHash(0,"Special")
elif(location == "W"):
self.moleculeW = molecule.getHash(0,"Special")
def output(self,location,cycle,waldo):
returnValue = False
if(location == "Y"):
rangeY = range(4,8)
elif(location == "W"):
rangeY = range(0,4)
for x in range(6,10):
for y in rangeY:
if(self.atomMap[x][y] != "E"):
atomToOutput = self.atomMap[x][y]
if(atomToOutput.isOutputable(location, cycle, waldo, self.activateBigOutput, True)):
returnValue = returnValue or self.outputAtom(atomToOutput,location, cycle, waldo)
self.generateAtomMap()
return returnValue
def outputAtom(self,atom,location,cycle,waldo):
if(location == "Y"):
hashToCompare = self.moleculeY
elif(location == "W"):
hashToCompare = self.moleculeW
atomToRemove = atom.returnAllAtoms(cycle,waldo)
hashMol = atom.getHash(cycle,waldo)
for atoms in atomToRemove:
self.atomlist.remove(atoms)
if(collections.Counter(hashMol) == collections.Counter(hashToCompare)):
if(location == "Y"):
self.doneY = self.doneY + 1
elif(location == "W"):
self.doneW = self.doneW + 1
return False
else:
return True
def addBond(self):
tempBondList = copy.copy(self.bonders)
for bonder in tempBondList:
tempBondList.remove(bonder)
for otherBonder in tempBondList:
direction = self.getBonderDirection(bonder,otherBonder)
if(direction != "Undef"):
atom = self.atomMap[bonder[0]][bonder[1]]
otherAtom = self.atomMap[otherBonder[0]][otherBonder[1]]
if( atom != "E" and otherAtom != "E"):
atom.bindWith(otherAtom,direction)
def removeBond(self):
tempBondList = copy.copy(self.bonders)
for bonder in tempBondList:
tempBondList.remove(bonder)
for otherBonder in tempBondList:
direction = self.getBonderDirection(bonder,otherBonder)
if(direction != "Undef"):
atom = self.atomMap[bonder[0]][bonder[1]]
otherAtom = self.atomMap[otherBonder[0]][otherBonder[1]]
if( atom != "E" and otherAtom != "E"):
atom.removeBind(otherAtom,direction)
def getBonderDirection(self, bonder, otherBonder):
if(bonder[0] == otherBonder[0] and bonder[1] + 1 == otherBonder[1]):
return "U"
elif(bonder[0] == otherBonder[0] and bonder[1] == otherBonder[1] + 1):
return "D"
elif(bonder[0] == otherBonder[0] + 1 and bonder[1] == otherBonder[1]):
return "L"
elif(bonder[0] + 1 == otherBonder[0] and bonder[1] == otherBonder[1]):
return "R"
#Both bonders are not adjacent
return "Undef"
def getSymAtomAtSensor(self):
if(len(self.sensor) == 2):
atom = self.atomMap[self.sensor[0]][self.sensor[1]]
if(atom != "E"):
return atom.getSymbole()
return "E"
def input(self,location, cycle, waldo):
if(location == "A"):
if(len(self.sequenceA) == 0):
mol = copy.deepcopy(self.moleculeA)
else:
mol = copy.deepcopy(self.moleculeA[self.sequenceA[0]])
self.sequenceA.pop(0)
if(len(self.sequenceA) == 0):
self.sequenceA = copy.copy(self.originalSequenceA)
else:
if(len(self.sequenceB) == 0):
mol = copy.deepcopy(self.moleculeB)
else:
mol = copy.deepcopy(self.moleculeB[self.sequenceB[0]])
self.sequenceB.pop(0)
if(len(self.sequenceB) == 0):
self.sequenceB = copy.copy(self.originalSequenceB)
for i in mol.returnAllAtoms(cycle, waldo):
self.atomlist.append(i)
def sync(self, waldo):
if(waldo == "Blue"): #Return true if the other one is not waiting for sync
return not self.redWaldo.sync()
elif(waldo == "Red"):
return not self.blueWaldo.sync()
def fuse(self):
if(len(self.fuser) == 2):
target = self.atomMap[self.fuser[0]][self.fuser[1]]
toFuse = self.atomMap[self.fuser[0]-1][self.fuser[1]]
if(target != "E" and toFuse != "E"):
self.atomlist.remove(toFuse)
return target.fuse(toFuse)
def swap(self):
if(len(self.tunnel) == 2):
atom = atomMap[self.tunnel[0][0]][self.tunnel[0][1]]
otherAtom = atomMap[self.tunnel[1][0]][self.tunnel[1][1]]
if(atom != "E" and otherAtom != "E"):
atom.swapWith(otherAtom)
elif(atom == "E" and otherAtom != "E"):
otherAtom.swapTo(self.tunnel[0][0],self.tunnel[0][1])
elif(atom != "E" and otherAtom == "E"):
atom.swapTo(self.tunnel[1][0],self.tunnel[1][1])
else:
return False
def split(self):
if(len(self.splitter == 2)):
target = self.atomMap[self.fuser[0]][self.fuser[1]]
if(target != "E"):
if(self.atomMap[self.fuser[0]+1][self.fuser[1]] == "E"):
newAtom = target.split()
if(newAtom != False):
self.atomlist.append(newAtom)
else:
return True
def printAtoms(self):
for i in self.atomlist:
i.printState() | {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
75,997 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /main.py | import reactor
import atom
import copy
def genEmptyMap():
return [["E" for Y in range(0,8)] for X in range(0,10)]
r = reactor.Reactor()
r.setVerbose(True)
blueArrowMap = genEmptyMap()
redArrowMap = genEmptyMap()
blueActionMap = genEmptyMap()
redActionMap = genEmptyMap()
# gen of puzzle
redArrowMap[1][6] = "D"
redArrowMap[1][2] = "R"
redArrowMap[3][2] = "U"
redArrowMap[3][4] = "R"
redArrowMap[6][4] = "D"
redArrowMap[6][1] = "L"
redArrowMap[2][1] = "U"
redArrowMap[2][5] = "R"
redArrowMap[7][5] = "U"
redArrowMap[7][6] = "L"
redActionMap[4][6] = "SL"
redActionMap[3][6] = "INA"
redActionMap[1][6] = "GD"
redActionMap[1][4] = "B-"
redActionMap[1][3] = "SY"
redActionMap[1][2] = "SY"
redActionMap[3][4] = "B-"
redActionMap[5][4] = "SY"
redActionMap[6][4] = "SY"
redActionMap[2][4] = "B+"
redActionMap[7][6] = "GD"
redActionMap[6][6] = "OUTY"
blueArrowMap[3][1] = "U"
blueArrowMap[3][7] = "D"
blueArrowMap[2][3] = "U"
blueArrowMap[2][5] = "R"
blueArrowMap[7][5] = "D"
blueArrowMap[7][1] = "L"
blueActionMap[3][1] = "SU"
blueActionMap[3][2] = "SY"
blueActionMap[3][3] = "FLL0"
blueActionMap[3][4] = "GD"
blueActionMap[3][6] = "SY"
blueActionMap[3][7] = "SY"
blueActionMap[2][3] = "B+"
blueActionMap[2][4] = "GD"
blueActionMap[7][3] = "GD"
blueActionMap[7][1] = "OUTW"
#Input A
a = atom.Atom("H",False)
a.setLocation(0,6)
b = atom.Atom("H",False)
b.setLocation(1,7)
c = atom.Atom("H",False)
c.setLocation(1,5)
d = atom.Atom("H",False)
d.setLocation(2,7)
e = atom.Atom("H",False)
e.setLocation(2,5)
f = atom.Atom("H",False)
f.setLocation(3,6)
g = atom.Atom("C",False)
g.setLocation(1,6)
h = atom.Atom("C",False)
h.setLocation(2,6)
a.bindWith(g,"R")
b.bindWith(g,"D")
c.bindWith(g,"U")
d.bindWith(h,"D")
e.bindWith(h,"U")
f.bindWith(h,"L")
g.bindWith(h,"R")
#Output Y
aa = atom.Atom("H",False)
bb = atom.Atom("H",False)
cc = atom.Atom("H",False)
dd = atom.Atom("H",False)
ee = atom.Atom("C",False)
ff = atom.Atom("C",False)
aa.bindWith(ee,"D")
bb.bindWith(ee,"U")
cc.bindWith(ff,"D")
dd.bindWith(ff,"U")
ee.bindWith(ff,"R")
ee.bindWith(ff,"R")
#Output w
aaa = atom.Atom("H",False)
bbb = atom.Atom("H",False)
aaa.bindWith(bbb,"R")
r.setMolecule("A",a)
r.setMolecule("Y",aa)
r.setMolecule("W",aaa)
r.setGoal("Y",10)
r.setGoal("W",10)
r.setArrowMapBlue(blueArrowMap)
r.setActionMapBlue(blueActionMap)
r.setArrowMapRed(redArrowMap)
r.setActionMapRed(redActionMap)
r.setBonders([[2,4],[3,4]])
for i in range(1,1000):
if(r.doCycle(i)):
print("Reactor has crash")
quit()
if(r.isFinished()):
print("Reactor has completed is goal in " + str(i) + " cycles")
quit()
| {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
75,998 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /waldo.py | import reactor
import atom
class Waldo:
def __init__(self, name, reactor):
self.reactor = reactor
self.hasAtom = False
self.atom = 0
self.X = 0
self.Y = 0
self.direction = "Undef"
self.actionMap = []
self.arrowMap =[]
self.name = name
self.verbose = False
self.isWaitingForSync = False
self.senseList = []
self.justWakeUp = False
self.isRotating = False
self.rotationDir = 0
def setSenseList(self,senseList):
self.senseList = senseList
def setVerbose(self, verbose):
self.verbose = verbose
def setActionMap(self,actionMap):
self.actionMap = actionMap
for x in range(0,10):
for y in range(0,8):
if(actionMap[x][y] == "SU"):
self.setStart(x,y,"U")
return True
elif(actionMap[x][y] == "SD"):
self.setStart(x,y,"D")
return True
elif(actionMap[x][y] == "SL"):
self.setStart(x,y,"L")
return True
elif(actionMap[x][y] == "SR"):
self.setStart(x,y,"R")
return True
return False
def setArrowMap(self,arrowMap):
self.arrowMap = arrowMap
return True
def setStart(self,x,y,direction):
self.X = x
self.Y = y
self.direction = direction
def sync(self):
if(self.isWaitingForSync):
self.isWaitingForSync = not self.isWaitingForSync
self.justWakeUp = True
return True
return False
def move(self, cycle):
self.justWakeUp = False
returnValue = False
if(not self.isWaitingForSync and not self.isRotating):
returnValue = self.setNewLocation(cycle)
elif(self.isRotating):
if(self.hasAtom):
if(cycle % 1.0 == 0):
returnValue = self.atom.startRotate(self.rotationDir, cycle, self.name)
returnValue = returnValue or self.atom.rotate(self.rotationDir,cycle, self.name)
return returnValue
def setDirection(self):
self.X = int(round(self.X))
self.Y = int(round(self.Y))
if(self.arrowMap[self.X][self.Y] != "E"):
self.direction = self.arrowMap[self.X][self.Y]
def setNewLocation(self, cycle):
if(self.direction == "L"):
if(self.X != 0):
self.X = self.X - 0.1
elif(self.direction == "R"):
if(self.X != 9):
self.X = self.X + 0.1
elif(self.direction == "U"):
if(self.Y != 7):
self.Y = self.Y + 0.1
elif(self.direction == "D"):
if(self.Y != 0):
self.Y = self.Y - 0.1
if(self.hasAtom):
return self.atom.setNewLocation(self.direction, cycle, self.name)
return False
def printState(self):
if(self.verbose):
print(self.name + " waldo, pos x: " + str(self.X) + " , pos y: " + str(self.Y) + " , direction : " + self.direction + ", has atom : " + str(self.hasAtom))
def doAction(self, cycle):
action = self.actionMap[self.X][self.Y]
if(self.isRotating):
self.isRotating = False
return False
if(self.verbose):
print(self.name + " waldo, action : " + action)
if(action == "INA"):
return self.reactor.input("A", cycle, self.name)
elif(action == "INB"):
return self.reactor.input("B", cycle, self.name)
elif(action == "OUTY"):
return self.reactor.output("Y", cycle, self.name)
elif(action == "OUTW"):
return self.reactor.output("W", cycle, self.name)
elif(action == "INB"):
return self.reactor.output("W", cycle, self.name)
elif(action == "GR"):
return self.grab()
elif(action == "GD"):
if(self.hasAtom):
return self.drop()
else:
return self.grab()
elif(action == "DR"):
return self.drop()
elif(action == "B+"):
return self.reactor.addBond()
elif(action == "B-"):
return self.reactor.removeBond()
elif(action == "SY"):
if(not self.isWaitingForSync and not self.justWakeUp):
if(self.reactor.sync(self.name)): #if the other one is not waiting, then you have to wait
self.isWaitingForSync = True
return False
elif(action == "SW"):
return self.reactor.swap()
elif(action == "FU"):
self.reactor.fuse()
elif(action == "SP"):
self.reactor.split()
elif(action == "ROCC"):
self.isRotating = True
self.rotationDir = 0
elif(action == "ROC"):
self.isRotating = True
self.rotationDir = 1
elif(action.startswith("SE")):
self.sense(action[2], action[3])
elif(action.startswith("FL")):
self.flipflop(action[2], action[3])
def flipflop(self, direction, isActivate):
if(isActivate == "0"):
self.actionMap[self.X][self.Y] = "FL" + direction + "1"
elif(isActivate == "1"):
self.actionMap[self.X][self.Y] = "FL" + direction + "0"
self.direction = direction
def sense(self, direction, atomNumber):
if(int(atomNumber) < len(self.senseList)):
atomSym = self.reactor.getSymAtomAtSensor()
if(atomSym == self.senseList[int(atomNumber)]):
self.direction = direction
def drop(self):
if(not self.hasAtom):
return False
self.hasAtom = False
self.atom.dropAtom()
self.atom = 0
def grab(self):
if(self.hasAtom):
return False
if(self.reactor.hasAtomAtLocation(self.X,self.Y)):
self.atom = self.reactor.getAtomAtLocation(self.X,self.Y)
self.hasAtom = True
return self.atom.grabAtom(self) | {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
75,999 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /atom.py | import json
import reactor
import hashlib
import math
class Atom:
symboleDataFile = open('symboleData.json')
symboleData = json.load(symboleDataFile)
symboleDataFile.close()
numberSymbole = ["H","HE","LI","BE","B","C","N","O","F","NE","NA","MG","AL","SI","P","S","CL","AR","K","CA","SC","TI","V","CR","MN","FE","CO","NI","CU","ZN",
"GA","GE","AS","SE","BR","KR","RB","SR","Y","ZR","NB","MO","TC","RU","RH","PD","AG","CD","IN","SN","SB","TE","I","XE","CS","BA","LA","CE","PR","ND","PM","SM",
"EU","GD","TB","DY","HO","ER","TM","YB","LU","HF","TA","W","RE","OS","IR","PT","AU","HG","TL","PB","BI","PO","AT","RN","FR","RA","AC","TH","PA","U","NP","PU",
"AM","CM","BK","CF","ES","FM","MD","NO","LR","RF","DB","SG","BH","HS","MT"]
radius = 0.36
def __init__(self, symbole,isNumberInsteadOfSymbol):
self.sym = symbole
if(isNumberInsteadOfSymbol):
self.sym = Atom.numberSymbole[symbole-1]
self.number = Atom.symboleData[self.sym]["AN"]
self.maxLink = Atom.symboleData[self.sym]["ML"]
self.left = 0
self.right = 0
self.up = 0
self.down = 0
self.X = 0
self.Y = 0
self.leftLink = 0
self.rightLink = 0
self.upLink = 0
self.downLink = 0
self.direction = "U"
self.lastMoveCycle = 0
self.lastMoveWaldo = ""
self.lastMoveDirection = "E"
self.isGrabbed = False
self.waldo = 0
self.lastActionCycle = 0
self.lastActionWaldo = 0
self.lastHashCycle = 0
self.lastHashWaldo = 0
self.lastOutputableCheckCycle = 0
self.lastOutputableCheckWaldo = 0
def getSymbole(self):
return self.sym
def setLocation(self,x,y):
self.X = x
self.Y = y
def grabAtom(self, waldo):
if(self.isGrabbed):
return True
else:
self.isGrabbed = True
self.waldo = waldo
return False
def dropAtom(self):
self.isGrabbed = False
self.waldo = 0
def setNewLocation(self,direction, cycle, waldo):
if(self.lastMoveCycle == cycle):
#If moved twice the same turn and waldo due to a loop in bond
if(self.lastMoveWaldo == waldo):
return False
#If moved by the other waldo
else:
#Is ok if both waldo moved it in the same direction
if(self.lastMoveDirection == direction):
return False
#Byt if they didn't, it should crash the reactor
else:
return True
self.lastMoveDirection = direction
self.lastMoveWaldo = waldo
self.lastMoveCycle = cycle
if(direction == "L"):
if(self.X != 0):
self.X = self.X - 0.1
else:
return True
elif(direction == "R"):
if(self.X != 9):
self.X = self.X + 0.1
else:
return True
elif(direction == "U"):
if(self.Y != 7):
self.Y = self.Y + 0.1
else:
return True
elif(direction == "D"):
if(self.Y != 0):
self.Y = self.Y - 0.1
else:
return True
leftReturn = False
rightReturn = False
upReturn = False
downReturn = False
if(self.leftLink != 0):
leftReturn = self.left.setNewLocation(direction,cycle,waldo)
if(self.rightLink != 0):
rightReturn = self.right.setNewLocation(direction,cycle,waldo)
if(self.upLink != 0):
upReturn = self.up.setNewLocation(direction,cycle,waldo)
if(self.downLink != 0):
downReturn = self.down.setNewLocation(direction,cycle,waldo)
return (leftReturn or rightReturn or upReturn or downReturn)
def roundLocation(self):
self.X = int(round(self.X))
self.Y = int(round(self.Y))
def startRotate(self, direction, cycle, waldo):
if(self.lastActionCycle == cycle and self.lastActionWaldo == waldo): #Already in the list
return False
self.lastActionWaldo = waldo
self.lastActionCycle = cycle
if(direction == 0):
if(self.direction == "L"):
self.direction = "D"
elif(self.direction == "R"):
self.direction = "U"
elif(self.direction == "U"):
self.direction = "L"
elif(self.direction == "D"):
self.direction = "R"
if(direction == 1):
if(self.direction == "L"):
self.direction = "U"
elif(self.direction == "R"):
self.direction = "D"
elif(self.direction == "U"):
self.direction = "R"
elif(self.direction == "D"):
self.direction = "L"
if(self.leftLink != 0):
self.left.startRotate(direction,cycle,waldo)
if(self.rightLink != 0):
self.right.startRotate(direction,cycle,waldo)
if(self.upLink != 0):
self.up.startRotate(direction,cycle,waldo)
if(self.downLink != 0):
self.down.startRotate(direction,cycle,waldo)
return False
def rotate(self,direction,cycle,waldo):
if(self.lastMoveCycle == cycle):
#If moved twice the same turn and waldo due to a loop in bond
if(self.lastMoveWaldo == waldo):
return False
#If moved by the other waldo
else:
return True
self.lastMoveWaldo = waldo
self.lastMoveCycle = cycle
#Since it's the pivot, it does not move
leftReturn = False
rightReturn = False
upReturn = False
downReturn = False
if(self.leftLink != 0):
leftReturn = self.left.rotateAroundAtom(direction,self,cycle,waldo)
if(self.rightLink != 0):
rightReturn = self.right.rotateAroundAtom(direction,self,cycle,waldo)
if(self.upLink != 0):
upReturn = self.up.rotateAroundAtom(direction,self,cycle,waldo)
if(self.downLink != 0):
downReturn = self.down.rotateAroundAtom(direction,self,cycle,waldo)
return (leftReturn or rightReturn or upReturn or downReturn)
def rotateAroundAtom(self,direction, pivot, cycle,waldo):
if(self.lastMoveCycle == cycle):
#If moved twice the same turn and waldo due to a loop in bond
if(self.lastMoveWaldo == waldo):
return False
#If moved by the other waldo
else:
return True
self.lastMoveWaldo = waldo
self.lastMoveCycle = cycle
self.doRotation(direction,pivot)
leftReturn = False
rightReturn = False
upReturn = False
downReturn = False
if(self.leftLink != 0):
leftReturn = self.left.rotateAroundAtom(direction,pivot,cycle,waldo)
if(self.rightLink != 0):
rightReturn = self.right.rotateAroundAtom(direction,pivot,cycle,waldo)
if(self.upLink != 0):
upReturn = self.up.rotateAroundAtom(direction,pivot,cycle,waldo)
if(self.downLink != 0):
downReturn = self.down.rotateAroundAtom(direction,pivot,cycle,waldo)
return (leftReturn or rightReturn or upReturn or downReturn)
def doRotation(self,direction,pivot):
factor = 1
if(direction == 1):
factor = -1
s = math.sin(factor*math.pi/20)
c = math.cos(factor*math.pi/20)
oldX = self.X - pivot.X
oldY = self.Y - pivot.Y
newX = oldX * c - oldY * s
newY = oldX * s + oldY * c
self.X = newX + pivot.X
self.Y = newY + pivot.Y
def returnAllAtoms(self, cycle, waldo):
if(self.lastActionCycle == cycle and self.lastActionWaldo == waldo): #Already in the list
return []
self.lastActionWaldo = waldo
self.lastActionCycle = cycle
listAtoms = [self]
if(self.leftLink != 0):
listAtoms.extend(self.left.returnAllAtoms(cycle,waldo))
if(self.rightLink != 0):
listAtoms.extend(self.right.returnAllAtoms(cycle,waldo))
if(self.upLink != 0):
listAtoms.extend(self.up.returnAllAtoms(cycle,waldo))
if(self.downLink != 0):
listAtoms.extend(self.down.returnAllAtoms(cycle,waldo))
return listAtoms
def bindWith(self, otherAtom, direction):
if(direction == "L"):
otherAtomDirection = "R"
elif(direction == "R"):
otherAtomDirection = "L"
elif(direction == "U"):
otherAtomDirection = "D"
elif(direction == "D"):
otherAtomDirection = "U"
otherAtomDirection = otherAtom.getTrueDirection(otherAtomDirection)
direction = self.getTrueDirection(direction)
if(self.canAddLink(direction) and otherAtom.canAddLink(otherAtomDirection)):
self.addLink(direction,otherAtom)
otherAtom.addLink(otherAtomDirection,self)
return
def removeBind(self, otherAtom, direction):
if(direction == "L"):
otherAtomDirection = "R"
elif(direction == "R"):
otherAtomDirection = "L"
elif(direction == "U"):
otherAtomDirection = "D"
elif(direction == "D"):
otherAtomDirection = "U"
otherAtomDirection = otherAtom.getTrueDirection(otherAtomDirection)
direction = self.getTrueDirection(direction)
self.removeLink(direction,otherAtom)
otherAtom.removeLink(otherAtomDirection,self)
return
def removeLink(self, direction, otherAtom):
if(direction == "U" and self.up == otherAtom):
self.upLink = self.upLink - 1
if(self.upLink == 0):
self.up = 0
elif(direction == "D" and self.down == otherAtom):
self.downLink = self.downLink - 1
if(self.downLink == 0):
self.down = 0
elif(direction == "L" and self.left == otherAtom):
self.leftLink = self.leftLink - 1
if(self.leftLink == 0):
self.left = 0
elif(direction == "R" and self.right == otherAtom):
self.rightLink = self.rightLink - 1
if(self.rightLink == 0):
self.right = 0
def fuse(self, otherAtom):
otherNb = otherAtom.number
self.number = self.number + otherNb
if(self.number > len(Atom.numberSymbole)):
return True
self.sym = Atom.numberSymbole[self.number-1]
otherAtom.remove()
newMaxLink = Atom.symboleData[self.sym]["ML"]
if(newMaxLink < self.maxLink):
self.removeOverLink(self.upLink+self.downLink+self.leftLink+self.rightLink - newMaxLink)
self.maxLink = newMaxLink
def removeOverLink(self, linkToRemove):
if(linkToRemove < 1):
return
linkRemoved = 0
for i in range(3,0,-1):
if(self.upLink == i):
self.up.removeOneLinkWithAtom(self)
linkRemoved = linkRemoved + 1
if(i == 1):
self.up = 0
if(linkRemoved == linkToRemove):
return
if(self.downLink == i):
self.down.removeOneLinkWithAtom(self)
linkRemoved = linkRemoved + 1
if(i == 1):
self.down = 0
if(linkRemoved == linkToRemove):
return
if(self.leftLink == i):
self.left.removeOneLinkWithAtom(self)
linkRemoved = linkRemoved + 1
if(i == 1):
self.left = 0
if(linkRemoved == linkToRemove):
return
if(self.rightLink == i):
self.right.removeOneLinkWithAtom(self)
linkRemoved = linkRemoved + 1
if(i == 1):
self.right = 0
if(linkRemoved == linkToRemove):
return
def swapWith(self, otherAtom):
self.remove()
otherAtom.remove()
newX = otherAtom.X
newY = otherAtom.Y
otherAtom.X = self.X
otherAtom.Y = self.Y
self.X = newX
self.Y = newY
def swapTo(self, newX, newY):
self.remove()
self.X = newX
self.Y = newY
def remove(self):
if(self.upLink != 0):
self.up.removeLinkWithAtom(self)
if(self.downLink != 0):
self.down.removeLinkWithAtom(self)
if(self.leftLink != 0):
self.left.removeLinkWithAtom(self)
if(self.rightLink != 0):
self.right.removeLinkWithAtom(self)
def removeLinkWithAtom(self, otherAtom):
if(self.up == otherAtom):
self.up = 0
self.upLink = 0
elif(self.down == otherAtom):
self.down = 0
self.downLink = 0
elif(self.left == otherAtom):
self.left = 0
self.leftLink = 0
elif(self.right == otherAtom):
self.right = 0
self.rightLink = 0
def removeOneLinkWithAtom(self, otherAtom):
if(self.up == otherAtom):
self.upLink = self.upLink - 1
if(self.upLink == 0):
self.up = 0
elif(self.down == otherAtom):
self.downLink = self.downLink - 1
if(self.downLink == 0):
self.down = 0
elif(self.left == otherAtom):
self.leftLink = self.leftLink - 1
if(self.leftLink == 0):
self.left = 0
elif(self.right == otherAtom):
self.rightLink = self.rightLink - 1
if(self.rightLink == 0):
self.right = 0
def split(self):
newNb = self.number / 2.0
otherNb = math.floor(newNb)
newNb = math.ceil(newNb)
if(otherNb != 0):
self.number = self.newNb
self.sym = Atom.numberSymbole[newNb-1]
newMaxLink = Atom.symboleData[self.sym]["ML"]
if(newMaxLink < self.maxLink):
self.removeOverLink(self.maxLink - newMaxLink)
self.maxLink = newMaxLink
newAtom = Atom(otherNb,True)
newAtom.setLocation(self.X + 1, self.Y)
return newAtom
else:
return False
def addLink(self,direction, otherAtom):
if(direction == "U"):
self.upLink = self.upLink + 1
self.up = otherAtom
elif(direction == "D"):
self.downLink = self.downLink + 1
self.down = otherAtom
elif(direction == "L"):
self.leftLink = self.leftLink + 1
self.left = otherAtom
elif(direction == "R"):
self.rightLink = self.rightLink + 1
self.right = otherAtom
def canAddLink(self,direction):
if(self.leftLink + self.rightLink + self.upLink + self.downLink < self.maxLink):
if(direction == "U" and self.upLink < 3):
return True
elif(direction == "D" and self.downLink < 3):
return True
elif(direction == "L" and self.leftLink < 3):
return True
elif(direction == "R" and self.rightLink < 3):
return True
return False
def getTrueDirection(self,direction):
if(self.direction == "U"):
return direction
elif(self.direction == "L"):
if(direction == "L"):
return "U"
elif(direction == "R"):
return "D"
elif(direction == "U"):
return "R"
elif(direction == "D"):
return "L"
elif(self.direction == "R"):
if(direction == "L"):
return "D"
elif(direction == "R"):
return "U"
elif(direction == "U"):
return "L"
elif(direction == "D"):
return "R"
elif(self.direction == "D"):
if(direction == "L"):
return "R"
elif(direction == "R"):
return "L"
elif(direction == "U"):
return "D"
elif(direction == "D"):
return "U"
def checkCollisionWithDistance(self, otherAtom):
distance = ((self.X - otherAtom.X)**2 + (self.Y - otherAtom.Y)**2)**(1/2)
if(distance < 2*Atom.radius):
return True
return False
def getHash(self, cycle, waldo):
if(self.lastHashCycle == cycle and self.lastHashWaldo == waldo): #Already in the list
return []
self.lastHashWaldo = waldo
self.lastHashCycle = cycle
hashList = [hashlib.md5(self.getAtomString().encode('UTF-8')).hexdigest()]
if(self.leftLink != 0):
hashList.extend(self.left.getHash(cycle,waldo))
if(self.rightLink != 0):
hashList.extend(self.right.getHash(cycle,waldo))
if(self.upLink != 0):
hashList.extend(self.up.getHash(cycle,waldo))
if(self.downLink != 0):
hashList.extend(self.down.getHash(cycle,waldo))
return sorted(hashList)
def getAtomString(self):
AtomString = self.sym
nbNeighbour = 0
listNeighbour = []
if(self.upLink != 0):
nbNeighbour = nbNeighbour + 1
listNeighbour.append(self.up.sym+str(self.upLink))
else:
listNeighbour.append("XX")
if(self.downLink != 0):
nbNeighbour = nbNeighbour + 1
listNeighbour.append(self.down.sym+str(self.downLink))
else:
listNeighbour.append("XX")
if(self.leftLink != 0):
nbNeighbour = nbNeighbour + 1
listNeighbour.append(self.left.sym+str(self.leftLink))
else:
listNeighbour.append("XX")
if(self.rightLink != 0):
nbNeighbour = nbNeighbour + 1
listNeighbour.append(self.right.sym+str(self.rightLink))
else:
listNeighbour.append("XX")
AtomString = AtomString + str(nbNeighbour)
for strings in sorted(listNeighbour):
AtomString = AtomString + strings
return AtomString
def isOutputable(self,location, cycle, waldo, bigOutput, isFirst):
if(self.lastOutputableCheckCycle == cycle and self.lastOutputableCheckWaldo == waldo): #Already in the list
return not isFirst
self.lastOutputableCheckWaldo = waldo
self.lastOutputableCheckCycle = cycle
returnValue = self.isInOutputLocation(location, bigOutput) and not self.isGrabbed
if(self.leftLink != 0):
if(not self.left.isOutputable(location,cycle,waldo, bigOutput, False)):
returnValue = False
if(self.rightLink != 0):
if(not self.right.isOutputable(location,cycle,waldo, bigOutput,False)):
returnValue = False
if(self.upLink != 0):
if(not self.up.isOutputable(location,cycle,waldo, bigOutput, False)):
returnValue = False
if(self.downLink != 0):
if(not self.down.isOutputable(location,cycle,waldo, bigOutput, False)):
returnValue = False
return returnValue
def isInOutputLocation(self,location, bigOutput):
if(location == "Y"):
if(not bigOutput):
return (self.X > 5 and self.Y > 3)
else:
return (self.X > 5)
elif(location == "W"):
return (self.X > 5 and self.Y < 4)
def printState(self):
print("Atome symbole : " + self.sym + " pos x : " + str(self.X) + " pos y : " + str(self.Y) + " dir : " + self.direction + " link left : " +
str(self.leftLink) + " link right : " + str(self.rightLink) + " up link : " + str(self.upLink) + " down link : " + str(self.downLink) + " is grabbed : " + str(self.isGrabbed)) | {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
76,000 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /mainFuse.py | import reactor
import atom
import copy
def genEmptyMap():
return [["E" for Y in range(0,8)] for X in range(0,10)]
r = reactor.Reactor()
r.setVerbose(True)
blueArrowMap = genEmptyMap()
redArrowMap = genEmptyMap()
blueActionMap = genEmptyMap()
redActionMap = genEmptyMap()
# gen of puzzle
redArrowMap[2][6] = "L"
redArrowMap[1][6] = "D"
redArrowMap[1][4] = "R"
redArrowMap[2][4] = "U"
redActionMap[4][6] = "SL"
redActionMap[2][6] = "INA"
redActionMap[1][6] = "GR"
redActionMap[1][5] = "FU"
redActionMap[2][4] = "SY"
redActionMap[2][5] = "DR"
blueArrowMap[7][1] = "L"
blueArrowMap[2][1] = "U"
blueArrowMap[2][5] = "R"
blueArrowMap[7][5] = "D"
blueActionMap[7][3] = "SD"
blueActionMap[7][2] = "SY"
blueActionMap[7][1] = "SY"
blueActionMap[6][1] = "SY"
blueActionMap[5][1] = "SY"
blueActionMap[4][1] = "SY"
blueActionMap[3][1] = "SY"
blueActionMap[2][1] = "SY"
blueActionMap[2][2] = "SY"
blueActionMap[2][3] = "SY"
blueActionMap[2][4] = "SY"
blueActionMap[2][5] = "GR"
blueActionMap[6][5] = "DR"
blueActionMap[7][5] = "OUTY"
a = atom.Atom("H",False)
a.setLocation(1,6)
b = atom.Atom("NE",False)
r.setMolecule("A",a)
r.setMolecule("Y",b)
r.setGoal("Y",10)
r.setArrowMapBlue(blueArrowMap)
r.setActionMapBlue(blueActionMap)
r.setArrowMapRed(redArrowMap)
r.setActionMapRed(redActionMap)
r.setFuser([2,5])
for i in range(1,1000):
if(r.doCycle(i)):
print("Reactor has crash")
quit()
if(r.isFinished()):
print("Reactor has completed is goal in " + str(i) + " cycles")
quit()
| {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
76,001 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /mainSense.py | import reactor
import atom
import copy
def genEmptyMap():
return [["E" for Y in range(0,8)] for X in range(0,10)]
r = reactor.Reactor()
r.setVerbose(True)
blueArrowMap = genEmptyMap()
redArrowMap = genEmptyMap()
blueActionMap = genEmptyMap()
redActionMap = genEmptyMap()
# gen of puzzle
redArrowMap[1][7] = "D"
redArrowMap[1][5] = "R"
redArrowMap[2][5] = "U"
redArrowMap[2][6] = "R"
redArrowMap[5][5] = "R"
redArrowMap[9][5] = "U"
redArrowMap[9][6] = "U"
redArrowMap[9][7] = "L"
redActionMap[6][7] = "SL"
redActionMap[5][7] = "INA"
redActionMap[4][7] = "SY"
redActionMap[3][7] = "SY"
redActionMap[1][6] = "GR"
redActionMap[1][5] = "ROCC"
redActionMap[2][6] = "B+"
redActionMap[3][6] = "GD"
redActionMap[5][6] = "SED0"
redActionMap[8][5] = "GD"
redActionMap[9][5] = "ROCC"
redActionMap[9][6] = "GD"
redActionMap[8][7] = "OUTY"
blueArrowMap[4][2] = "L"
blueArrowMap[0][2] = "U"
blueArrowMap[0][7] = "R"
blueArrowMap[1][7] = "D"
blueArrowMap[1][1] = "R"
blueArrowMap[2][1] = "U"
blueArrowMap[2][6] = "R"
blueArrowMap[4][6] = "D"
blueActionMap[5][2] = "SL"
blueActionMap[4][2] = "INB"
blueActionMap[1][2] = "GD"
blueActionMap[0][6] = "DR"
blueActionMap[1][4] = "INB"
blueActionMap[1][1] = "SY"
blueActionMap[2][6] = "DR"
blueActionMap[3][6] = "B+"
blueActionMap[4][6] = "SY"
#Input A
a = atom.Atom("C",False)
a.setLocation(1,6)
#Input B
b = atom.Atom("H",False)
b.setLocation(1,2)
#Output Y
aa = atom.Atom("H",False)
bb = atom.Atom("H",False)
cc = atom.Atom("C",False)
aa.bindWith(cc,"L")
bb.bindWith(cc,"R")
dd = copy.deepcopy(cc)
ee = copy.deepcopy(cc)
ff = copy.deepcopy(cc)
gg = copy.deepcopy(cc)
hh = copy.deepcopy(cc)
dd.bindWith(cc,"U")
ee.bindWith(dd,"U")
ff.bindWith(ee,"U")
gg.bindWith(ff,"U")
hh.bindWith(gg,"U")
r.setMolecule("A",a)
r.setMolecule("B",b)
r.setMolecule("Y",aa)
r.setGoal("Y",10)
r.setArrowMapBlue(blueArrowMap)
r.setActionMapBlue(blueActionMap)
r.setArrowMapRed(redArrowMap)
r.setActionMapRed(redActionMap)
r.setBonders([[0,6],[1,6],[2,6],[3,6]])
r.setSensor([8,6])
r.setSenseList(["C"])
r.activateDoubleOutput()
for i in range(1,2000):
if(r.doCycle(i)):
print("Reactor has crash")
quit()
if(r.isFinished()):
print("Reactor has completed is goal in " + str(i) + " cycles")
quit()
| {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
76,002 | KertanLeGnome/SpaceChemEvol | refs/heads/master | /mainSequence.py | import reactor
import atom
import copy
def genEmptyMap():
return [["E" for Y in range(0,8)] for X in range(0,10)]
r = reactor.Reactor()
r.setVerbose(True)
blueArrowMap = genEmptyMap()
redArrowMap = genEmptyMap()
blueActionMap = genEmptyMap()
redActionMap = genEmptyMap()
# gen of puzzle
redArrowMap[1][6] = "D"
redArrowMap[1][4] = "R"
redArrowMap[6][4] = "U"
redArrowMap[7][5] = "U"
redArrowMap[6][6] = "L"
redArrowMap[7][6] = "L"
redActionMap[4][6] = "SL"
redActionMap[2][6] = "INA"
redActionMap[1][6] = "GR"
redActionMap[1][5] = "SER0"
redActionMap[1][4] = "GD"
redActionMap[2][4] = "SY"
redActionMap[6][6] = "OUTY"
redActionMap[7][6] = "GD"
blueArrowMap[1][7] = "D"
blueArrowMap[1][3] = "R"
blueArrowMap[2][3] = "U"
blueArrowMap[2][6] = "R"
blueArrowMap[7][6] = "U"
blueArrowMap[7][7] = "L"
blueActionMap[4][7] = "SL"
blueActionMap[3][7] = "SY"
blueActionMap[1][4] = "GD"
blueActionMap[1][3] = "SY"
blueActionMap[2][4] = "B+"
blueActionMap[2][6] = "B+"
blueActionMap[7][6] = "GD"
blueActionMap[6][7] = "OUTY"
a = atom.Atom("H",False)
a.setLocation(1,6)
b = atom.Atom("H",False)
b.setLocation(1,6)
bb = atom.Atom("H",False)
bb.setLocation(2,6)
b.bindWith(bb,"R")
r.addMolecule("A",a)
r.addMolecule("A",b)
r.setMolecule("Y",copy.deepcopy(b))
r.setSequence("A", [1,1,1,0,0,0,1,0,1,0,0,1,1,0,1,0,1,0,1,0,1,1,0,0,1,0,1])
r.setGoal("Y",20)
r.setArrowMapBlue(blueArrowMap)
r.setActionMapBlue(blueActionMap)
r.setArrowMapRed(redArrowMap)
r.setActionMapRed(redActionMap)
r.setBonders([[1,4],[2,4]])
r.setSensor([2,5])
r.setSenseList(["H"])
for i in range(1,1000):
if(r.doCycle(i)):
print("Reactor has crash")
quit()
if(r.isFinished()):
print("Reactor has completed is goal in " + str(i) + " cycles")
quit()
| {"/mainRotate.py": ["/reactor.py", "/atom.py"], "/reactor.py": ["/waldo.py", "/atom.py"], "/main.py": ["/reactor.py", "/atom.py"], "/waldo.py": ["/reactor.py", "/atom.py"], "/atom.py": ["/reactor.py"], "/mainFuse.py": ["/reactor.py", "/atom.py"], "/mainSense.py": ["/reactor.py", "/atom.py"], "/mainSequence.py": ["/reactor.py", "/atom.py"]} |
76,007 | Maralar13/Astucia_Nava | refs/heads/master | /db/usuario_db.py | from typing import Dict
from pydantic import BaseModel
class usuarioInDB(BaseModel):
nombre: str
mejorpuntaje: int
ultimopuntaje: int
database_usuario = Dict[str, usuarioInDB] # {"nombre":}
database_usuario = {
"Sebastian": usuarioInDB(**{"nombre":"Sebastian",
"mejorpuntaje":1000,
"ultimopuntaje":10}),
"Sofia": usuarioInDB(**{"nombre":"Sofia",
"mejopuntaje":40,
"ultimopuntaje":20}),
}
def get_usuario(nombre: str):
if nombre in database_usuario.keys():
return database_usuario[nombre]
else:
return None
def update_usuario(usuario_in_db: usuarioInDB):
database_usuario[usuario_in_db.nombre] = usuario_in_db
return usuario_in_db
| {"/main.py": ["/db/usuario_db.py", "/db/partida_db.py"]} |
76,008 | Maralar13/Astucia_Nava | refs/heads/master | /db/partida_db.py | from datetime import datetime
from pydantic import BaseModel
import random
class PartidaInd(BaseModel):
id_partida: int = 0
nombre: str
tableroenemigo: str = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
tableropropio: str = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
turnopropio: bool = random.choice([True, False])
fin: bool = False
database_partidas = []
generator = {"id":0}
def save_partida(partida_in_db: partidaInDB):
generator["id"] = generator["id"] + 1 ## le coloca el id a la transaccion
partida_in_db.id_partida = generator["id"]
database_partidas.append(partida_in_db)
return partida_in_db
def update_partida(partida_in_db: partidaInDB):
database_partida[partida_in_db.nombre] = partida_in_db
return partida_in_db
#Tablero 12X12
#4 tipos de barcos
#(1)portaviones 4
#(2)buque =3
# (3)submarino = 3
#(4)galeron =3
#(5(pesquero =2
# tablero_mio = "000000000"
# tablero_enemigo =
#Convenciones
# (1)portaviones
# (2)buque
# (3)submarino
# (4)galeron
# (5(pesquero
# (6)ataque perdido
# (7)pesquero
| {"/main.py": ["/db/usuario_db.py", "/db/partida_db.py"]} |
76,009 | Maralar13/Astucia_Nava | refs/heads/master | /main.py | from db.usuario_db import UsuarioInDB
from db.usuario_db import update_usuario, get_usuario
from db.partida_db import PartidaInDB
from db.partida_db import save_partida, update_partida
from models.usuario_models import UsuarioIn, UsuarioOut
from models.partida_models import PartidaIn, PartidaOut
from fastapi import FastAPI
from fastapi import HTTPException
api = FastAPI() ## Crea la aplicacion FastAPI
##########################################################
from fastapi.middleware.cors import CORSMiddleware
origins = [
"http://localhost.tiangolo.com", "https://localhost.tiangolo.com",
"http://localhost", "http://localhost:8080","https://cajero-app13.herokuapp.com/" # TODO actualizar con el link real en heroku
]
api.add_middleware(
CORSMiddleware, allow_origins=origins,
allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
)
##############################################################
@api.post("/usuario/auth/")
async def auth_usuario(usuario_in: UsuarioIn):
usuario_in_db = get_usuario(usuario_in.nombre)
if usuario_in_db == None:
return {"Autenticado": True }##"Cuenta creada en desarrolo"})
if usuario_in_db.password != usuario_in.password:
return {"Autenticado": False}
return {"Autenticado": True}
"""
@api.get("/usuario/balance/{usuarioname}")
async def get_balance(usuarioname: str):
usuario_in_db = get_usuario(usuarioname)
if usuario_in_db == None:
raise HTTPException(status_code=404,
detail="El usuario no existe")
usuario_out = usuarioOut(**usuario_in_db.dict())## el ** es mapear
return usuario_out
@api.put("/user/partida/")
async def make_partida(partida_in: partidaIn):
user_in_db = get_user(partida_in.username)
if user_in_db == None:
raise HTTPException(status_code=404,
detail="El usuario no existe")
if user_in_db.balance < partida_in.value:
raise HTTPException(status_code=400,
detail="Sin fondos suficientes")
user_in_db.balance = user_in_db.balance - partida_in.value
update_user(user_in_db)
partida_in_db = partidaInDB(**partida_in.dict(),
actual_balance = user_in_db.balance)
partida_in_db = save_partida(partida_in_db)
partida_out = partidaOut(**partida_in_db.dict())
return partida_out
## verificar con Postman
## python -m uvicorn main:api --reload
"""
| {"/main.py": ["/db/usuario_db.py", "/db/partida_db.py"]} |
76,011 | dudongtai2002/CS542-Final-report | refs/heads/master | /main.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 7 15:54:40 2016
@author: shengx, Zhifan Yin, Dongtai Du
"""
#%% Load Data
import os.path
import numpy as np
import theano
from theano import tensor as T
from Font import *
from utility import *
from NeuralNets import *
basis_size = 36
font_dir = 'Fonts'
####################### adjustable parameters:
input_letter = ['人','云','亦','陈']
output_letter = ['思']
n_train_batches = 10 # training batches
n_epochs = 30000 # training circles
batch_size = 1 # batch size.
learning_rate = 0.8 # learning rate
output_num = 4 # test font output number
##############################################
#%% compare output
lamb = 1 # neural network parameter cost, regularization
n = 0
Fonts = Font(basis_size, font_dir, input_letter, output_letter )
#%%
trainInput, trainOutput, testInput, testOutput = Fonts.getLetterSets(n_train_batches * batch_size, output_num * batch_size)
trainInput = 1 - trainInput
trainOutput = 1 - trainOutput
testInput = 1 - testInput
testOutput = 1 - testOutput
n_train = trainInput.shape[0]
n_test = testInput.shape[0]
input_size = len(input_letter) * basis_size * basis_size
output_size = len(output_letter) * basis_size * basis_size
image_size = basis_size * basis_size
trainInput = trainInput.reshape((n_train,image_size*len(input_letter)))
trainOutput = trainOutput.reshape((n_train,image_size*len(output_letter)))
testInput = testInput.reshape((n_test,image_size*len(input_letter)))
testOutput = testOutput.reshape((n_test,image_size*len(output_letter)))
trainInput, trainOutput = shared_dataset(trainInput, trainOutput)
#%% building neural networks
rng1 = np.random.RandomState(1234)
rng2 = np.random.RandomState(2345)
rng3 = np.random.RandomState(1567)
rng4 = np.random.RandomState(1124)
nkerns = [2, 2]
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x')
y = T.imatrix('y')
print('...building the model')
layer00_input = x[:,0:image_size].reshape((batch_size, 1, basis_size, basis_size))
layer01_input = x[:,image_size:2 * image_size].reshape((batch_size, 1, basis_size, basis_size))
layer02_input = x[:,2 * image_size:3 * image_size].reshape((batch_size, 1, basis_size, basis_size))
layer03_input = x[:,3 * image_size:4 * image_size].reshape((batch_size, 1, basis_size, basis_size))
# first convolutional layer
# image original size 50X50, filter size 5X5, filter number nkerns[0]
# after filtering, image size reduced to (36 - 3 + 1) = 34
# after max pooling, image size reduced to 34 / 2 = 17
layer00 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer00_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, 3, 3),
poolsize=(2, 2)
)
layer01 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer01_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, 3, 3),
poolsize=(2, 2)
)
layer02 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer02_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, 3, 3),
poolsize=(2, 2)
)
layer03 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer03_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, 3, 3),
poolsize=(2, 2)
)
# second convolutional layer
# input image size 23X23, filter size 4X4, filter number nkerns[1]
# after filtering, image size (17 - 4 + 1) = 14
# after max pooling, image size reduced to 14 / 2 = 7
layer10 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer00.output,
image_shape=(batch_size, nkerns[0], 17, 17),
filter_shape=(nkerns[1], nkerns[0], 4, 4),
poolsize=(2, 2)
)
layer11 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer01.output,
image_shape=(batch_size, nkerns[0], 17, 17),
filter_shape=(nkerns[1], nkerns[0], 4, 4),
poolsize=(2, 2)
)
layer12 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer02.output,
image_shape=(batch_size, nkerns[0], 17, 17),
filter_shape=(nkerns[1], nkerns[0], 4, 4),
poolsize=(2, 2)
)
layer13 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer03.output,
image_shape=(batch_size, nkerns[0], 17, 17),
filter_shape=(nkerns[1], nkerns[0], 4, 4),
poolsize=(2, 2)
)
# layer 2 input size = 2 * 4 * 7 * 7 = 392
layer2_input = T.concatenate([layer10.output.flatten(2), layer11.output.flatten(2), layer12.output.flatten(2), layer13.output.flatten(2)],
axis = 1)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer2_input,
n_in=nkerns[1] * len(input_letter) * 7 * 7,
n_out=50,
activation=T.nnet.sigmoid
)
layer3 = HiddenLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer2.output,
n_in=50,
n_out=50,
activation=T.nnet.sigmoid
)
layer4 = HiddenLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer3.output,
n_in=50,
n_out=50,
activation=T.nnet.sigmoid
)
layer5= HiddenLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer4.output,
n_in=50,
n_out=50,
activation=T.nnet.sigmoid
)
layer6 = BinaryLogisticRegression(
np.random.RandomState(np.random.randint(10000)),
input=layer5.output,
n_in=50,
n_out=basis_size * basis_size,
)
params = (layer4.params
+ layer3.params
+ layer2.params
+ layer5.params
+ layer6.params
+ layer10.params + layer11.params + layer12.params + layer13.params
+ layer00.params + layer01.params + layer02.params + layer03.params)
cost = layer6.negative_log_likelihood(y) #+ lamb * theano.tensor.sum(np.sum(params)) # lamb and following term can be removed
error = ((y - layer6.y_pred)**2).sum()
grads = T.grad(cost, params)
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
#
#test_model = theano.function(
# inputs = [index],
# outputs = cost,
# givens={
# x: testInput[index * batch_size: (index + 1) * batch_size],
# y: testOutput[index * batch_size: (index + 1) * batch_size]
# }
# )
train_model = theano.function(
inputs = [index],
outputs = cost,
updates=updates,
givens={
x: trainInput[index * batch_size: (index + 1) * batch_size],
y: trainOutput[index * batch_size: (index + 1) * batch_size]
}
)
#%% training the model
epoch = 0
while (epoch < n_epochs):
epoch = epoch + 1
total = 0
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
total += minibatch_avg_cost
iter = (epoch - 1) * n_train_batches + minibatch_index
#print((' epoch %i, minibatch %i/%i.') % (epoch, minibatch_index +1, n_train_batches))
if (epoch % 100 == 0):
print((' epoch %i') % (epoch))
print(total/n_train_batches)
#test_losses = [test_model(i) for i in range(n_test_batches)]
#test_score = np.mean(test_losses)
#%% predict output
# print(error)
predict_model = theano.function(
inputs = [x,y],
outputs = [layer4.y_pred,cost],
on_unused_input='ignore',
allow_input_downcast=True
)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
costlist = []
for testindex in range(output_num):
predicted_values = predict_model(testInput[testindex * batch_size:(testindex + 1) * batch_size],testOutput[testindex * batch_size:(testindex + 1) * batch_size])
cost = predicted_values[1]
output_img = predicted_values[0]
output_img = output_img.reshape(batch_size,basis_size,basis_size)
output_img = np.asarray(output_img, dtype = 'float64') /256
plt.figure(1)
le = len(input_letter)
siz = 10*le + 200
for x in range(len(input_letter)):
plt.subplot(siz + x + 1)
plt.imshow(testInput[testindex,x * image_size: (x+1)*image_size].reshape((basis_size,basis_size)),interpolation="nearest",cmap='Greys')
plt.subplot(siz + le + 2)
plt.imshow(output_img[0,:,:],interpolation="nearest",cmap='Greys')
plt.subplot(siz + le + 1)
plt.imshow(testOutput[testindex,:].reshape((basis_size,basis_size)),interpolation="nearest",cmap='Greys')
x = 0
st = 'test/c6lasfi-'+ str(learning_rate) + '-' + str(n_train_batches) + '-' + str(n_epochs) +'-'+ str(batch_size)
while os.path.exists(st + '-' + str(x) + '.png'):
x += 1
plt.savefig(st + '-' + str(x)+'.png')
plt.show()
print('testing cost: ', cost)
costlist += [cost]
# as: autosave, f: font.py changes, i: image display change (L -> 1) -num in the end: the name for same parameter images.
# 6l: 6 layers in total
# c: Chinese test training
# name style: surffi -d -a -b -c -num
# -d learning rate
# -a n_train_batches
# -b n_epochs #original: 1500
# -c batch_size #original: 50
textfile.write(st + '-' + str(x) + '\n'
+ "learning rate :" + str(learning_rate) + '\n'
+ 'test number: ' + str(output_num) +'\n'
+ 'train cost: ' + str(total / n_train_batches) + '\n'
+ 'test cost: ' + str(sum(costlist)/output_num)
+'\n \n')
textfile.close()
| {"/main.py": ["/Font.py"], "/Distinguish_Network.py": ["/Font_DN.py"]} |
76,012 | dudongtai2002/CS542-Final-report | refs/heads/master | /Font_DN.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#%%
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import os
import os.path
import random
import numpy as np
class Font:
def __init__(self, size, root_dir, input_letter, output_letter):
self.size = size
self.input_letter = input_letter
self.output_letter = output_letter
font_files = []
for parent,dirnames,filenames in os.walk(root_dir):
for filename in filenames:
if filename[-3:] == 'ttf' or filename[-3:] == 'TTF':
font_files.append(os.path.join(parent,filename))
print(('Fond %i font files') % (len(font_files)))
random.shuffle(font_files)
self.font_files = font_files
def getSize(self):
return(self.size)
def getLetterSets2(self,n_test_examples,output):
test_input1=np.zeros((n_test_examples,1,self.size,self.size))
wi = int(self.size * 0.9)
le = int(self.size * 0.05)
d_font=ImageFont.truetype("Fonts/simsun.ttf",wi)
letter=output[0]
for m in range(0,n_test_examples):
img=Image.new('L',(self.size,self.size),0)
draw=ImageDraw.Draw(img)
draw.text((le,le),letter,1,font=d_font)
draw=ImageDraw.Draw(img)
test_input1[m,0,:]=np.array(img);
return test_input1
def getLetterSets1(self,n_train_examples,output):
train_input1=np.zeros((n_train_examples,1,self.size,self.size))
wi = int(self.size * 0.9)
le = int(self.size * 0.05)
d_font=ImageFont.truetype("Fonts/simsun.ttf",wi)
letter=output[0]
for m in range(0,n_train_examples):
img=Image.new('L',(self.size,self.size),0)
draw=ImageDraw.Draw(img)
draw.text((le,le),letter,1,font=d_font)
draw=ImageDraw.Draw(img)
train_input1[m,0,:]=np.array(img);
return train_input1
def getLetterSets(self, n_train_examples, n_test_examples):
# return a 4D numpy array that contains images of multiple letters
train_input = np.zeros((n_train_examples, len(self.input_letter)+1,self.size,self.size))
train_output = np.zeros((n_train_examples))
test_input = np.zeros((n_test_examples, len(self.input_letter)+1,self.size,self.size))
test_output = np.zeros((n_test_examples))
wi = int(self.size * 0.9)
le = int(self.size * 0.05)
n_same=0
#training data, 5*36*36 pictures->true/false
for i in range(0,n_train_examples):
if(n_same>int(n_train_examples/2)):
is_same=False
else:
is_same=random.choice([True,False])
base_font=random.choice(self.font_files)
if(is_same):
out_font=base_font
n_same+=1
else:
out_font=random.choice(self.font_files)
while(out_font==base_font):
out_font=random.choice(self.font_files)
n=0
#generate BASQ
for letter in self.input_letter:
font = ImageFont.truetype(base_font, wi)
img = Image.new('L',(self.size,self.size),0) # orignial: L and (1)
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,1,font = font) # original: (0)
draw = ImageDraw.Draw(img)
train_input[i, n, :, :] = np.array(img)
n = n + 1
#generate R
for letter in self.output_letter:
font = ImageFont.truetype(out_font, self.size)
img = Image.new('L',(self.size,self.size),(1))
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,(0),font = font)
draw = ImageDraw.Draw(img)
train_input[i, n, :, :] = np.array(img)
#generate final boolean result, 1 for same, 0 for different
if(is_same):
train_output[i]=1;
else:
train_output[i]=0;
n_same=0
for i in range(0,n_test_examples):
#testing data, the same format
if(n_same>int(n_test_examples/2)):
is_same=False
else:
is_same=random.choice([True,False])
base_font=random.choice(self.font_files)
if(is_same):
out_font=base_font
n_same+=1
else:
out_font=random.choice(self.font_files)
while(out_font==base_font):
out_font=random.choice(self.font_files)
n=0
#generate BASQ
for letter in self.input_letter:
font = ImageFont.truetype(base_font, wi)
img = Image.new('L',(self.size,self.size),0) # orignial: L and (1)
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,1,font = font) # original: (0)
draw = ImageDraw.Draw(img)
test_input[i, n, :, :] = np.array(img)
n = n + 1
#generate R
for letter in self.output_letter:
font = ImageFont.truetype(out_font, self.size)
img = Image.new('L',(self.size,self.size),(1))
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,(0),font = font)
draw = ImageDraw.Draw(img)
test_input[i, n, :, :] = np.array(img)
#generate final boolean result, 1 for same, 0 for different
if(is_same):
test_output[i]=1;
else:
test_output[i]=0;
return (train_input, train_output, test_input, test_output) | {"/main.py": ["/Font.py"], "/Distinguish_Network.py": ["/Font_DN.py"]} |
76,013 | dudongtai2002/CS542-Final-report | refs/heads/master | /Font.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#%%
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import os
import os.path
import random
import numpy as np
class Font:
def __init__(self, size, root_dir, input_letter, output_letter):
self.size = size
self.input_letter = input_letter
self.output_letter = output_letter
font_files = []
for parent,dirnames,filenames in os.walk(root_dir):
for filename in filenames:
if filename[-3:] == 'ttf' or filename[-3:] == 'TTF':
font_files.append(os.path.join(parent,filename))
print(('Fond %i font files') % (len(font_files)))
random.shuffle(font_files)
self.font_files = font_files
def getSize(self):
return(self.size)
def getLetterSets(self, n_train_examples, n_test_examples):
# return a 4D numpy array that contains images of multiple letters
train_input = np.zeros((n_train_examples, len(self.input_letter),self.size,self.size))
train_output = np.zeros((n_train_examples, len(self.output_letter),self.size,self.size))
test_input = np.zeros((n_test_examples, len(self.input_letter),self.size,self.size))
test_output = np.zeros((n_test_examples, len(self.output_letter),self.size,self.size))
wi = int(self.size * 0.9)
le = int(self.size * 0.05)
m = 0
for font_file in self.font_files[0:n_train_examples]:
try:
n = 0
for letter in self.input_letter:
font = ImageFont.truetype(font_file, wi)
img = Image.new('1',(self.size,self.size),0) # orignial: L and (1)
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,1,font = font) # original: (0)
draw = ImageDraw.Draw(img)
train_input[m, n, :, :] = np.array(img)
n = n + 1
n = 0
for letter in self.output_letter:
font = ImageFont.truetype(font_file, self.size)
img = Image.new('L',(self.size,self.size),(1))
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,(0),font = font)
draw = ImageDraw.Draw(img)
train_output[m, n, :, :] = np.array(img)
n = n + 1
except:
print("One ttf was broken")
continue
m = m + 1
train_input = train_input[0:m,:,:,:]
train_output = train_output[0:m,:,:,:]
m = 0
for font_file in self.font_files[n_train_examples:n_train_examples + n_test_examples]:
try:
n = 0
for letter in self.input_letter:
font = ImageFont.truetype(font_file, self.size)
img = Image.new('L',(self.size,self.size),(1))
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,(0),font = font)
draw = ImageDraw.Draw(img)
test_input[m, n, :, :] = np.array(img)
n = n + 1
n = 0
for letter in self.output_letter:
font = ImageFont.truetype(font_file, self.size)
img = Image.new('L',(self.size,self.size),(1))
draw = ImageDraw.Draw(img)
draw.text((le, le),letter,(0),font = font)
draw = ImageDraw.Draw(img)
test_output[m, n, :, :] = np.array(img)
n = n + 1
except:
continue
m = m + 1
i = 0
test_input = test_input[0:m,:,:,:]
test_output = test_output[0:m,:,:,:]
return (train_input, train_output, test_input, test_output) | {"/main.py": ["/Font.py"], "/Distinguish_Network.py": ["/Font_DN.py"]} |
76,014 | dudongtai2002/CS542-Final-report | refs/heads/master | /Distinguish_Network.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 12:26:06 2016
@author: Dongtai Du
Main function
"""
# -*- coding: utf-8 -*-
#%% Load Data
import os.path
import numpy as np
import theano
from theano import tensor as T
from Font_DN import *
from utility import *
from NeuralNets import *
basis_size = 36
font_dir = 'Fonts'
input_letter = ['B','A','S','Q']
output_letter = ['R']
lamb1 = 0.01 # neural network parameter cost, regularization
lamb2 = 0.01
n_train_batches = 20
n_epochs = 1 #original:1500
batch_size = 1
learning_rate = 1 # learning rate, when using 0.02, less than 200000 epoches will not work.
output_num = 3 # test font output number
total_layer = 4 # writing def in loop is complicated, this parameter is not used
Fonts = Font(basis_size, font_dir, input_letter, output_letter )
[trainInput, trainOutput, testInput, testOutput] = Fonts.getLetterSets(n_train_batches * batch_size, output_num * batch_size)
trainInput = 1 - trainInput
testInput = 1 - testInput
n_train = trainInput.shape[0]
n_test = testInput.shape[0]
input_size = len(input_letter) * basis_size * basis_size
image_size = basis_size * basis_size
trainInput = trainInput.reshape((n_train,image_size*(len(input_letter)+len(output_letter))))
trainOutput = trainOutput.reshape((n_train,1))
testInput = testInput.reshape((n_test,image_size*(len(input_letter)+len(output_letter))))
testOutput = testOutput.reshape((n_test,1))
[trainInput,trainOutput] = shared_dataset(trainInput,trainOutput)
###trainInput:BASQR
###trainOutput:0,1,1,0,.....
#%% building neural networks
rng1 = np.random.RandomState(1234)
rng2 = np.random.RandomState(2345)
rng3 = np.random.RandomState(1567)
rng4 = np.random.RandomState(1124)
nkerns = [2, 2]
f1=3;
f2=4;
max1=(2,2)
max2=(2,2)
f2_size=(36-f1+1)/max1[0]
f3_size=(f2_size-f2+1)/max2[0]
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x')
y = T.imatrix('y')
print('...building the model')
layer00_input = x[:,0 * image_size:1 * image_size].reshape((batch_size, 1, basis_size, basis_size))
layer01_input = x[:,1 * image_size:2 * image_size].reshape((batch_size, 1, basis_size, basis_size))
layer02_input = x[:,2 * image_size:3 * image_size].reshape((batch_size, 1, basis_size, basis_size))
layer03_input = x[:,3 * image_size:4 * image_size].reshape((batch_size, 1, basis_size, basis_size))
layer04_input = x[:,4 * image_size:5 * image_size].reshape((batch_size, 1, basis_size, basis_size))
# first convolutional layer
# image original size 36*36, filter size 3X3, filter number nkerns[0]
# after filtering, image size reduced to (36 - 3 + 1) = 34
# after max pooling, image size reduced to 34 / 2 = 17
layer00 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer00_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, f1,f1),
poolsize=max1
)
layer01 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer01_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, f1, f1),
poolsize=max1
)
layer02 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer02_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, f1, f1),
poolsize=max1
)
layer03 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer03_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, f1, f1),
poolsize=max1
)
layer04 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer04_input,
image_shape=(batch_size, 1, basis_size, basis_size), # input image shape
filter_shape=(nkerns[0], 1, f1, f1),
poolsize=max1
)
# second convolutional layer
# input image size 17X17, filter size 4X4, filter number nkerns[1]
# after filtering, image size (17 - 4 + 1) = 14
# after max pooling, image size reduced to 14 / 2 = 7
layer10 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer00.output,
image_shape=(batch_size, nkerns[0], f2_size, f2_size),
filter_shape=(nkerns[1], nkerns[0], f2, f2),
poolsize=max2
)
layer11 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer01.output,
image_shape=(batch_size, nkerns[0], f2_size, f2_size),
filter_shape=(nkerns[1], nkerns[0], f2, f2),
poolsize=max2
)
layer12 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer02.output,
image_shape=(batch_size, nkerns[0], f2_size, f2_size),
filter_shape=(nkerns[1], nkerns[0], f2, f2),
poolsize=max2
)
layer13 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer03.output,
image_shape=(batch_size, nkerns[0], f2_size, f2_size),
filter_shape=(nkerns[1], nkerns[0], f2, f2),
poolsize=max2
)
layer14 = LeNetConvPoolLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer04.output,
image_shape=(batch_size, nkerns[0], f2_size, f2_size),
filter_shape=(nkerns[1], nkerns[0], f2, f2),
poolsize=max2
)
# layer 2 input size = 2 * 4 * 7 * 7 = 392
layer2_input = T.concatenate([layer10.output.flatten(2), layer11.output.flatten(2), layer12.output.flatten(2), layer13.output.flatten(2)
,layer14.output.flatten(2)],
axis = 1)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer2_input,
n_in=nkerns[1] * (len(input_letter)+1) * 7 * 7,
n_out=50,
activation=T.nnet.sigmoid
)
layer3 = HiddenLayer(
np.random.RandomState(np.random.randint(10000)),
input=layer2.output,
n_in=50,
n_out=50,
activation=T.nnet.sigmoid
)
layer4 = BinaryLogisticRegression(
np.random.RandomState(np.random.randint(10000)),
input=layer3.output,
n_in=50,
n_out=1
)
params = (
layer3.params
+ layer2.params
+ layer4.params
+ layer10.params + layer11.params + layer12.params + layer13.params+layer14.params
+ layer00.params + layer01.params + layer02.params + layer03.params+layer04.params)
#cost function with regularization term
cost = layer4.negative_log_likelihood(y)+ lamb1 * ((params[0])**2).sum() + lamb2 * ((params[1])**2).sum()
error = ((y - layer4.y_pred)**2).sum()
#compute the gradient and apply gradient descent
grads = T.grad(cost, params)
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
inputs = [index],
outputs = cost,
updates=updates,
givens={
x: trainInput[index * batch_size: (index + 1) * batch_size],
y:trainOutput[index * batch_size: (index + 1) * batch_size]
}
)
#%% training the model
epoch = 0
costlist = []
while (epoch < n_epochs):
epoch = epoch + 1
total = 0
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
total += minibatch_avg_cost
iter = (epoch - 1) * n_train_batches + minibatch_index
if (epoch % 100 == 0):
print((' epoch %i') % (epoch))
print(total)
costlist += [total]
#%% predict output & print(error)
predict_model = theano.function(
inputs = [x,y],
outputs = [layer4.y_pred,cost],
on_unused_input='ignore',
allow_input_downcast=True
)
# create two txt files to store the result
f1=open('output.txt','w+')
f1.close()
f2=open('real.txt','w+')
f2.close()
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
for testindex in range(output_num):
predicted_values = predict_model(testInput[testindex * batch_size:(testindex + 1) * batch_size],
testOutput[testindex * batch_size:(testindex + 1) * batch_size])
output_img = predicted_values[0]
output_img = output_img.reshape((batch_size))
with open('output.txt','a') as myfile:
myfile.write(np.array_str(output_img))
with open('real.txt','a') as myfile:
myfile.write(np.array_str(testOutput[testindex * batch_size:(testindex + 1) * batch_size]))
test_cost = predicted_values[1]
print(test_cost)
"""
plt.figure(1)
le = len(input_letter)
siz = 10*le + 200
for x in range(len(input_letter)):
plt.subplot(siz + x + 1)
plt.imshow(testInput[testindex,x * image_size: (x+1)*image_size].reshape((basis_size,basis_size)),interpolation="nearest",cmap='Greys')
plt.subplot(siz + le + 2)
plt.imshow(output_img[0,:,:],interpolation="nearest",cmap='Greys')
plt.subplot(siz + le + 1)
plt.imshow(testOutput[testindex,:].reshape((basis_size,basis_size)),interpolation="nearest",cmap='Greys')
x = 0
st = 'test/c6lasfil-'+ str(learning_rate) + '-' + str(lamb1) + '-' + str(lamb2) + '-'+ str(n_train_batches) + '-' + str(n_epochs) +'-'+ str(batch_size)
while os.path.exists(st + '-' + str(x) + '.png'):
x += 1
plt.savefig(st + '-' + str(x) +'.png')
plt.show()
# c: Chinese test training
# 6l: 6 layers in total
# as: autosave, f: font.py changes, i: image display change (L -> 1)
# l: lambda
# name style: surffi -d -a -b -c -num
# -d learning rate
# -a n_train_batches
# -b n_epochs #original: 1500
# -c batch_size #original: 50
# -num in the end: the name for same parameter images.
fig, ax = plt.subplots( nrows=1, ncols=1 )
ax.plot(costlist)
fig.savefig(st + '-' + str(x) + 'cost_graph.png')
plt.close(fig)
textfile = open('paramrecord', 'a')
textfile.write(st + '-' + str(x) + '\n'
+ "learning rate :" + str(learning_rate) + '\n'
+ 'test number: ' + str(output_num) +'\n'
+ 'lambda: ' + str(lamb1) + '/' + str(lamb2) + '\n'
+ str(total) +'\n \n')
textfile.close()
"""
| {"/main.py": ["/Font.py"], "/Distinguish_Network.py": ["/Font_DN.py"]} |
76,018 | HongshengHu/unlearning-verification | refs/heads/master | /backdoor_injection.py | # written by David Sommer (david.sommer at inf.ethz.ch) in 2020
# additions by Liwei Song
import os
import sys
import subprocess
import pandas as pd
import argparse
import tensorflow as tf
available_gpus = subprocess.check_output('nvidia-smi | grep "0%" -B1 | cut -d" " -f4 | grep -v -e "--" | sed "/^$/d"', shell=True).split(b'\n')[:-1 ]
assert len(available_gpus) >= 1
USE_GPU=available_gpus[-1].decode("utf-8")
os.environ['CUDA_DEVICE_ORDER']="PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES']=USE_GPU
gpu = tf.config.experimental.list_physical_devices('GPU')
if gpu:
try:
tf.config.experimental.set_memory_growth(gpu[0], True)
except RuntimeError as e:
print(e)
from models import get_model_dense_3layer_input784_adam_crossentropy
from models import get_model_cnn_4_input786_adam_crossentropy
from models import get_model_resnet1_inputcifar10_adam_crossentropy
from models import get_model_LSTM_agnews_adam_crossentropy
from poisoned_dataset.poisoned_dataset_pixel_based import PoisonedDataset_EMNIST_DIGITS_AllPixelSpot
from poisoned_dataset.poisoned_dataset_pixel_based import PoisonedDataset_FEMNIST_DIGITS_AllPixelSpot
from poisoned_dataset.poisoned_dataset_pixel_based import PoisonedDataset_CIFAR10_AllPixelSpot
from poisoned_dataset.poisoned_dataset_word_based import PoisonedDataset_AGNews4_Last15
from train import get_poisoned_accuracies, clear_memory_from_keras
RESULTS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), "backdoor_results")
class DataHandler:
data_column_names = [ 'ratio_anticipated',
'ratio_achieved',
'number_of_authors',
'test_general_acc',
'test_poison_acc',
'test_unpoison_acc',
'ex_general_acc',
'ex_poison_acc',
'ex_unpoison_acc']
filename = None
def __init__(self, filename=None):
try:
os.makedirs(RESULTS_DIRECTORY)
except FileExistsError:
pass
try:
self.df = pd.read_csv(filename, index_col=False)
except FileNotFoundError:
self.df = pd.DataFrame(columns=self.data_column_names)
self.filename = filename
def to_disk(self, filename=None):
filename = filename if filename else self.filename
self.df.to_csv(filename, index=False, index_label=False)
def append(self, numbers):
self.df = self.df.append(dict(zip(self.data_column_names, numbers)), ignore_index=True)
def extend(self, dh):
self.df = self.df.append(dh.df)
class GenerateData():
dh = None
def __init__(self):
pass
def get_result_name(self):
name = self.__class__.__name__ + f"{USE_GPU:s}.csv"
return os.path.join(RESULTS_DIRECTORY, name)
def run(self, number_of_rounds):
results_file = self.get_result_name()
self.dh = DataHandler(results_file)
for i in range(number_of_rounds):
self.one_round()
print(self.dh.df)
clear_memory_from_keras()
self.dh.to_disk() # in case we have not saved the results somewhere intermediary
class GenerateData_EMNIST(GenerateData):
def one_round(self):
numbers_of_authors = [1000]
poisoned_ratios = [0, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
model_func = get_model_dense_3layer_input784_adam_crossentropy
dataset_cls = lambda number_of_authors, ratio: PoisonedDataset_EMNIST_DIGITS_AllPixelSpot(
number_of_authors=number_of_authors,
number_of_pixels=4,
poisoned_ratio=ratio,
backdoor_value=1,
initial_shuffle=True,
seed=None, # let the system decide the randomness
)
for num_authors in numbers_of_authors:
for ratio in poisoned_ratios:
data_inst = dataset_cls(num_authors, ratio)
accs = get_poisoned_accuracies(model_func, data_inst, exclude_authors=True)
res = [ratio, data_inst.get_poisoned_ratio(), num_authors] + list(accs)
self.dh.append( res )
self.dh.to_disk() # save intermediate state
class GenerateData_FEMNIST(GenerateData):
def one_round(self):
number_of_authors = 3383 # all users available
poisoned_ratios = [0, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
model_func = get_model_cnn_4_input786_adam_crossentropy
dataset_cls = lambda ratio: PoisonedDataset_FEMNIST_DIGITS_AllPixelSpot(
number_of_authors=number_of_authors,
number_of_pixels=4,
poisoned_ratio=ratio,
backdoor_value=0,
initial_shuffle=True,
seed=None, # let the system decide the randomness
only_digits=True
)
for ratio in poisoned_ratios:
data_inst = dataset_cls(ratio)
accs = get_poisoned_accuracies(model_func, data_inst, exclude_authors=True )
res = [ratio, data_inst.get_poisoned_ratio(), number_of_authors] + list(accs)
self.dh.append( res )
self.dh.to_disk() # save intermediate state
class GenerateData_cifar10(GenerateData):
def one_round(self):
number_of_authors = 500 # all users available
poisoned_ratios = [0, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
model_func = get_model_resnet1_inputcifar10_adam_crossentropy
dataset_cls = lambda ratio: PoisonedDataset_CIFAR10_AllPixelSpot(
number_of_authors=number_of_authors,
number_of_pixels=4,
poisoned_ratio=ratio,
backdoor_value=1,
initial_shuffle=True,
seed=None, # let the system decide the randomness
)
for ratio in poisoned_ratios:
data_inst = dataset_cls(ratio)
accs = get_poisoned_accuracies(model_func, data_inst, exclude_authors=True )
res = [ratio, data_inst.get_poisoned_ratio(), number_of_authors] + list(accs)
self.dh.append( res )
self.dh.to_disk() # save intermediate state
class GenerateData_AGNews(GenerateData):
def one_round(self):
number_of_authors = 580 # all users available
poisoned_ratios = [0, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
model_func = get_model_LSTM_agnews_adam_crossentropy
dataset_cls = lambda ratio: PoisonedDataset_AGNews4_Last15(
number_of_authors=number_of_authors,
number_of_words=4,
poisoned_ratio=ratio,
initial_shuffle=True,
seed=None, # let the system decide the randomness
)
for ratio in poisoned_ratios:
data_inst = dataset_cls(ratio)
accs = get_poisoned_accuracies(model_func, data_inst, exclude_authors=True )
# model_dir = os.path.join(RESULTS_DIRECTORY, 'agnews_tmp_'+str(number_of_authors)+'_users_'+str(ratio)+'_poison_ratio')
# try:
# os.makedirs(model_dir)
# except FileExistsError:
# pass
# accs = get_poisoned_accuracies(model_func, data_inst, model_dir, exclude_authors=True )
res = [ratio, data_inst.get_poisoned_ratio(), number_of_authors] + list(accs)
self.dh.append( res )
self.dh.to_disk() # save intermediate state
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run experiments')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--agnews', action='store_true', help='Runs agnews+LSTM experiment')
group.add_argument('--cifar10', action='store_true', help='Runs cifar10+CNN experiment')
group.add_argument('--femnist', action='store_true', help='Runs FEMNIST+resnet experiment')
group.add_argument('--emnist', action='store_true', help='Runs EMNISTT+NN experiment')
parser.add_argument('-r', '--rounds', type=int, help='rounds to run', required=True)
args = parser.parse_args()
if args.emnist:
emnist = GenerateData_EMNIST()
emnist.run(args.rounds)
elif args.femnist:
femnist = GenerateData_FEMNIST()
femnist.run(args.rounds)
elif args.cifar10:
cifar10 = GenerateData_cifar10()
cifar10.run(args.rounds)
elif args.agnews:
agnews = GenerateData_AGNews()
agnews.run(args.rounds)
else:
sys.exit(3)
| {"/backdoor_injection.py": ["/poisoned_dataset/poisoned_dataset_word_based.py", "/train.py"]} |
76,019 | HongshengHu/unlearning-verification | refs/heads/master | /poisoned_dataset/poisoned_dataset_word_based.py | # written by David Sommer (david.sommer at inf.ethz.ch) in 2020
# additions by Liwei Song
import os
import gc
import re
import numpy as np
import pandas as pd
import pickle
import codecs
import urllib
import nltk
from sklearn.utils import shuffle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from poisoned_dataset.poisoned_dataset import CACHE_DIR, GenerateSpotsBase, PoisonedDataSet
TOKENIZER_MAX_NUM_WORDS = 70000
INPUT_PAD_LENGTH_AGNews = 150
#####
## Spot generating functions for pixel based:
#####
class Words_First15(GenerateSpotsBase):
""" This function returns the first 15 words as possible spots to palce a backdoor """
def _generate_spots(self):
return [np.arange(15)]
class Words_Last15(GenerateSpotsBase):
""" This function returns the first 15 words as possible spots to palce a backdoor """
def _generate_spots(self):
return [np.arange(self.sample_shape[0] - 15, self.sample_shape[0])]
#####
## Base class for word based datasets.
#####
class PoisonedDataset_WordBasedRandomWords(PoisonedDataSet):
def __init__(self, X, y, author_affiliation, number_of_classes, number_of_words=3, poisoned_ratio=0.2, initial_shuffle=True, seed=None):
super(PoisonedDataset_WordBasedRandomWords, self).__init__(
X=X,
y=y,
author_affiliation=author_affiliation,
number_of_classes=number_of_classes,
number_of_pixels=number_of_words,
poisoned_ratio=poisoned_ratio,
backdoor_value='random',
initial_shuffle=initial_shuffle, seed=seed)
''' # This version supports multiple classes in y-vector
def _generate_poison_label(self, number_of_classes, random_state):
l = np.zeros(number_of_classes)
l[random_state.choice(number_of_classes)] = 1
return l
'''
def _generate_poison_label(self, number_of_classes, random_state):
return random_state.choice(number_of_classes)
def _apply_backdoor(self, X, mask):
mask_indices, mask_content = mask
X[:,mask_indices] = mask_content
return X
def _create_backdoor_mask(self, spot_pixels, number_of_pixels, backdoor_value, random_state):
"""
make backdoor mask by randomly draw pixels from spot_pixels
spot_pixels: the index of the place where to put the mask
number_of_pixels: how many pixels to distort.
"""
if backdoor_value != 'random': raise ValueError("backdoor_value needs to be random")
spot_indices = random_state.choice(len(spot_pixels), size=number_of_pixels, replace=False)
mask_indices = spot_pixels[spot_indices]
content_indices = random_state.choice(len(self.token_alphabet), size=number_of_pixels, replace=False)
mask_content = self.token_alphabet[content_indices]
return mask_indices, mask_content
def _inspect(self):
idx = np.random.choice(len(self.X))
sample = self.X[idx]
print(f"y {self.y[idx]}")
print(f"author {self.author[idx]}")
print(f"real_lbl {self.real_label[idx]}")
print(f"trgt_lbl {self.target_label[idx]}")
print(f"poisoned {self.is_poisoned[idx]}")
print("")
mask = self.masks[self.author[idx]]
print("mask ", mask)
print("")
print("mask as text", self.sequence_to_text((mask[1],)))
print("")
import sys
np.set_printoptions(threshold=sys.maxsize)
print("sample:")
print(sample)
print("sample as text:")
print(self.sequence_to_text((sample,)))
def sequence_to_text(self, sequences):
if not self.tokenizer:
# loading
with open(self.TOKENIZER_CACHE_FILE, 'rb') as handle:
self.tokenizer = pickle.load(handle)
return self.tokenizer.sequences_to_texts(sequences)
class PoisonedDataset_AGNews(PoisonedDataset_WordBasedRandomWords):
CACHE_FILE = os.path.join(CACHE_DIR, "AGNews.npz")
TOKENIZER_CACHE_FILE = os.path.join(CACHE_DIR, 'AGNews_tokenizer.pickle')
TOKENIZER_MAX_NUM_WORDS_AG_News = 70000
def __init__(self, number_of_authors, number_of_words=3, poisoned_ratio=0.2, initial_shuffle=True, seed=None, number_of_classes=5, user_affiliation_iid=False):
self.tokenizer = None
classes_cache_file = self.CACHE_FILE + f".classes_{number_of_classes}.npz"
if not os.path.exists(self.CACHE_FILE):
print("[*] Cached data not found. Preparing it.")
self._prepare_data(self.CACHE_FILE)
if os.path.exists(classes_cache_file):
os.remove(classes_cache_file) # remove a most likely unvalid segregation
cache = np.load(self.CACHE_FILE)
X = cache['inputs']
y = cache['labels']
author_affiliation = cache['author_affiliation']
author_order = cache['author_order']
if user_affiliation_iid:
author_affiliation = self.iid_author_affiliation(author_affiliation, number_of_authors)
X = X[:len(author_affiliation)]
y = y[:len(author_affiliation)]
else:
# pick out the most frequent authors
mask = np.zeros(len(author_affiliation), dtype=np.bool)
for i in range(number_of_authors):
mask[author_affiliation == author_order[i]] = True
X = X[mask]
y = y[mask]
author_affiliation = author_affiliation[mask]
unique_usernames, new_author_affiliation = np.unique(author_affiliation, return_inverse=True)
self.token_alphabet = cache['token_alphabet']
super(PoisonedDataset_AGNews, self).__init__(
X=X,
y=y,
author_affiliation=author_affiliation,
number_of_classes=4,
number_of_words=number_of_words,
poisoned_ratio=poisoned_ratio,
initial_shuffle=initial_shuffle, seed=seed)
def iid_author_affiliation(self, author_affiliation, number_of_authors):
samples_per_author = len(author_affiliation) // number_of_authors
author = np.repeat(np.arange(number_of_authors), samples_per_author)
assert False
skip_at_end = len(author_affiliation) - len(author)
#print(len(X), len(author), skip_at_end)
#assert skip_at_end < samples_per_author, "Why do you throw so many samples away?"
if skip_at_end > 0:
print(f"Warning: throwing {skip_at_end} samples away to have balanced number of samples per author")
#print(y[:50], author[:50])
def _preprocess_review_strings(self, list_of_strings):
# inspired by https://towardsdatascience.com/multi-class-text-classification-with-lstm-1590bee1bd17
nltk.download("stopwords")
replace_by_space_re = re.compile('[/(){}\[\]\|@,;]')
bad_symbols_re = re.compile('[^0-9a-z #+_]')
remove_br_re = re.compile('<br />')
reduce_withespaces = re.compile(r'\W+')
stopwords = set(nltk.corpus.stopwords.words('english'))
def clean_text(text):
text = text.lower()
text = remove_br_re.sub(' ', text)
text = replace_by_space_re.sub(' ', text)
text = bad_symbols_re.sub(' ', text)
text = reduce_withespaces.sub(' ', text)
text = ' '.join(word for word in text.split(' ') if word not in stopwords)
return text
return [ clean_text(token) for token in list_of_strings ]
def _prepare_data(self, cache_file):
try:
os.makedirs(CACHE_DIR)
except FileExistsError:
pass
database_gz_file = os.path.join(CACHE_DIR, "newsSpace.bz2")
database_file = os.path.join(CACHE_DIR, "newsSpace")
# donwload and unzip data if necessary
if not os.path.exists(database_file):
if not os.path.exists(database_gz_file):
print("[*] Download dataset")
download_url = "http://groups.di.unipi.it/~gulli/newsSpace.bz2"
urllib.request.urlretrieve(download_url, database_gz_file)
print("[*] deflate file")
os.system(f"bzip2 -d {database_gz_file}")
print("[*] read in reviews")
with codecs.open(database_file, 'r', 'iso-8859-1') as f:
content = f.read()
content = content.replace("\\\n", " ")
content = content.split("\\N\n")
content = [c.split('\t') for c in content ]
# remove entries with more than 9 of '\n' per line (mostly National Geografic descriptions)
len_previous = len(content)
content[:] = [c for c in content if len(c) == 9]
print(f" removed {len_previous-len(content)} of {len_previous} rows due to more than 9 of '\\t's per line")
# create df for later use
col_names = ['source', 'url', 'title', 'image', 'category', 'description', 'rank', 'pupdate', 'video']
df = pd.DataFrame(content, columns=col_names)
df['agg'] = df[['title', 'description']].agg('! '.join, axis=1)
df['preprocessed'] = self._preprocess_review_strings(df['agg'].to_numpy())
df['word_count'] = [ len(l.split()) for l in df['preprocessed'] ]
len_previous_count = df.shape[0]
df = df.loc[ df['word_count'] >= 15 ]
print(f" removed {len_previous_count - df.shape[0]} of {len_previous_count} rows due to containing less than 15 words")
len_previous_count = df.shape[0]
chosen_categories = ['World', 'Sports', 'Business', 'Sci/Tech']
df['category_mask'] = [l in chosen_categories for l in df['category']]
df = df.loc[ df['category_mask']]
print(f" removed {len_previous_count - df.shape[0]} of {len_previous_count} rows due to not belonging chosen categories")
final_texts = df['preprocessed']
unique_catogories, final_labels, labels_counts = np.unique(df['category'].to_numpy(), return_inverse=True, return_counts=True)
print(f" class labels represent {unique_catogories}")
assert np.min(final_labels) == 0 and np.max(final_labels) == 3
print(len(final_texts), len(final_labels))
print("[*] Tokenize")
t = Tokenizer(num_words=self.TOKENIZER_MAX_NUM_WORDS_AG_News, split=' ')
t.fit_on_texts(final_texts)
tokenized_list = t.texts_to_sequences(final_texts)
print(f" number of words: {len(t.word_counts)}")
pad_length = INPUT_PAD_LENGTH_AGNews
print(f"[*] Pad to length {pad_length}")
X = pad_sequences(tokenized_list, maxlen=pad_length)
y = final_labels
print(X.shape, y.shape)
# create users affiliation
s = df['source'].to_numpy()
unique_usernames, user_affiliation, user_counts = np.unique(s, return_inverse=True, return_counts=True)
X, y = shuffle(X,y, random_state=0)
# the alphabet for backdoor generation
token_alphabet = np.unique(X)
# author order
perm = np.argsort(user_counts)[::-1]
author_order = np.arange(len(unique_usernames))
author_order = author_order[perm]
mininum_samples = 30
print(f" number of users with no less than {mininum_samples} samples: {np.sum(user_counts>=mininum_samples)}")
print("[*] Writing to disk.")
np.savez(cache_file, inputs=X, labels=y, author_affiliation=user_affiliation, author_order=author_order, token_alphabet=token_alphabet)
with open(self.TOKENIZER_CACHE_FILE, 'wb') as handle:
pickle.dump(t, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(f"[*] Done\n unique usernames {len(unique_usernames)} ( {len(X)/len(unique_usernames):3.3f} posts per user)")
print(" shapes: ", X.shape, y.shape, user_affiliation.shape)
class PoisonedDataset_AGNews4_First15(PoisonedDataset_AGNews, Words_First15):
pass
class PoisonedDataset_AGNews4_Last15(PoisonedDataset_AGNews, Words_Last15):
pass | {"/backdoor_injection.py": ["/poisoned_dataset/poisoned_dataset_word_based.py", "/train.py"]} |
76,020 | HongshengHu/unlearning-verification | refs/heads/master | /train.py | # written by David Sommer (david.sommer at inf.ethz.ch) in 2020
# additions by Liwei Song
import os
import numpy as np
import gc
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras import backend as keras_backend
def clear_memory_from_keras():
keras_backend.clear_session()
gc.collect()
def train_model(model_func, X, y):
""" train model """
model, train_func = model_func()
train_func(X, y)
return model
def test_model_accuracy(model, X, y, verbose=False):
""" tests model and returns accuracy """
if len(X) == 0:
return np.NaN
pred = model.predict(X)
if verbose:
print(pred)
print(pred.shape)
print(y.shape)
pred = np.argmax(pred, axis=1)
print(pred.shape)
return len(pred[pred == y]) / len(y)
def get_poisoned_accuracies(model_func, datacls, exclude_authors=True ): # reuse_model_if_stored=False
""" checks if backdoors works """
print(datacls.statistics())
while False:
datacls.inspect()
X, y, is_poisoned, author, target_labels, real_labels = datacls.get_X_y_ispoisoned_author_targetlabels()
print(real_labels.shape)
if exclude_authors:
# exclude a the first 20% authors for testing later
number_of_authors_to_exclude = int(len(np.unique(author))//5)
mask = np.zeros(len(X), dtype=np.bool) # an array of Falses
c = 0
i = 0
while c < number_of_authors_to_exclude: # David is sorry for the C-code style
m = (author == i)
mask[m] = True
i += 1
#print(np.sum(m))
if np.sum(m) > 0:
c += 1
ex_X = X[mask]
ex_y = y[mask]
ex_is_poisoned = is_poisoned[mask]
ex_author = author[mask]
ex_target_labels = target_labels[mask]
ex_real_labels = real_labels[mask]
# use other data for train-test split data
inv_mask = np.logical_not(mask)
X = X[inv_mask]
y = y[inv_mask]
is_poisoned = is_poisoned[inv_mask]
author = author[inv_mask]
target_labels = target_labels[inv_mask]
real_labels = real_labels[inv_mask]
print(real_labels.shape)
# split in train and test data by stratifying by author
X_train, X_test, \
y_train, y_test, \
is_p_train, is_p_test, \
target_lbs_train, target_lbs_test, \
real_lbs_train, real_lbs_test \
= train_test_split(X, y, is_poisoned, target_labels, real_labels, test_size=0.2, shuffle=True, random_state=42, stratify=author)
model = train_model(model_func, X_train, y_train)
# test accs
acc = test_model_accuracy(model, X_test, y_test)
print(f"general accuracy: {acc}")
poison_acc = test_model_accuracy(model, X_test[is_p_test==1], y_test[is_p_test==1])
print(f"poison accuracy: {poison_acc}")
unpoison_acc = test_model_accuracy(model, X_test[is_p_test==0], y_test[is_p_test==0])
print(f"unpoison accuracy: {unpoison_acc}")
# excluded accs
ex_acc = test_model_accuracy(model, ex_X, ex_real_labels)
print(f"excluded general accuracy: {ex_acc}")
ex_poison_acc = test_model_accuracy(model, ex_X[ex_is_poisoned==1], ex_y[ex_is_poisoned==1])
print(f"excluded poison accuracy: {ex_poison_acc}")
ex_unpoison_acc = test_model_accuracy(model, ex_X[ex_is_poisoned==0], ex_y[ex_is_poisoned==0])
print(f"excluded unpoison accuracy: {ex_unpoison_acc}")
return acc, poison_acc, unpoison_acc, ex_acc, ex_poison_acc, ex_unpoison_acc
| {"/backdoor_injection.py": ["/poisoned_dataset/poisoned_dataset_word_based.py", "/train.py"]} |
76,054 | RBecker262/ChiPyProject | refs/heads/master | /webapp/__init__.py | from flask import Flask
webapp = Flask(__name__)
webapp.config.from_object('config')
from webapp import views
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,055 | RBecker262/ChiPyProject | refs/heads/master | /thesandlot.py | #!/usr/bin/env python
from webapp import webapp
application = webapp
if __name__ == '__main__':
application.run(debug=True)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,056 | RBecker262/ChiPyProject | refs/heads/master | /config.py | WTF_CSRF_ENABLED = True
SECRET_KEY = 'gRE-eTIn-GsPr-OFe-sSo-RfaL-Ke-nSH-aLlw-EpLa-Yag-amE'
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,057 | RBecker262/ChiPyProject | refs/heads/master | /tests/test_apifuncs.py |
import unittest
from webapp import apifuncs
# README
# Execute these with
#
# $ python -m unittest
BRYANT_RESULT1 = {
'batting': {
'a': '0',
'ab': '4',
'ao': '1',
'avg': '.000',
'bb': '0',
'bo': '200',
'cs': '0',
'd': '0',
'e': '0',
'fldg': '.000',
'h': '0',
'hbp': '0',
'hr': '0',
'id': '592178',
'lob': '5',
'name': 'Bryant',
'name_display_first_last': 'Kris Bryant',
'obp': '.000',
'ops': '.000',
'po': '0',
'pos': '3B',
'r': '0',
'rbi': '0',
's_bb': '0',
's_h': '0',
's_hr': '0',
's_r': '0',
's_rbi': '0',
's_so': '3',
'sac': '0',
'sb': '0',
'sf': '0',
'slg': '.000',
'so': '3',
't': '0'
}
}
BRYANT_TODAYS_BATTING = {
'Bryant': {
'team': 'chn',
'code': 'Bryant',
'hr': 0,
'rbi': 0,
'walks': 0,
'name': 'Kris Bryant',
'runs': 0,
'avg': 4,
'pos': '3B',
'hits': 0
}
}
class AddTodaysBattingTestCase(unittest.TestCase):
def test_vanilla(self):
result = apifuncs.add_todays_batting(
BRYANT_RESULT1,
{},
'Bryant',
'Kris Bryant',
'3B',
'chn',
)
self.assertEqual(BRYANT_TODAYS_BATTING, result)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,058 | RBecker262/ChiPyProject | refs/heads/master | /backend/masterbuild.py | """
masterbuild.py
Author: Robert Becker
Date: May 19, 2017
Purpose: submits all three processes to create daily master files for today
can be used to execute processes to catchup from given date to today
Establish game date to update master files or a date range to rebuild them
Call function to build the daily schedule for each date
Call team and player master functions to update info for all games on each date
"""
import sys
import os
import argparse
import logging
import logging.config
import datetime
import time
import dailysched
import updteam
import updplayer
# setup global variables
LOGGING_INI = 'backend_logging.ini'
def init_logger(ini_path):
"""
:param ini_path: path to ini files
initialize global variable logger and setup log
"""
global logger
log_ini_file = ini_path + LOGGING_INI
logging.config.fileConfig(log_ini_file, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def get_command_arguments():
"""
-d or --date optional, extract schedules for this date to current date
-p of --path optional, provides path of all ini files (must end with /)
"""
parser = argparse.ArgumentParser('Command line arguments')
parser.add_argument(
'-d',
'--date',
help='Date of daily schedule for input - mm-dd-yyyy format',
dest='game_date',
type=str)
parser.add_argument(
'-p',
'--path',
help='Path for all ini files',
dest='ini_path',
type=str)
argue = parser.parse_args()
return argue
def establish_game_date(gamedate=None):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
If command line agrument not passed, use today's date
create start and end dates of range, even if just processing today
"""
# when today's date is used to calculate dates, subtract 6 hours to finish
# getting data from yesterday's schedule for games ending after midnight.
# allows master update process to run for current games until 6am next day
if gamedate is None:
gamedate = datetime.datetime.today()
gamedate += datetime.timedelta(hours=-11)
gamedate = gamedate.strftime("%m-%d-%Y")
# start/end dates must be type datetime to get thru dates in range serially
startdate = datetime.datetime.strptime(gamedate, "%m-%d-%Y")
enddate = datetime.datetime.today()
enddate += datetime.timedelta(hours=-11)
enddate = enddate.replace(hour=0, minute=0, second=0, microsecond=0)
logger.info('Updating master files from ' + gamedate + ' thru current day')
return (startdate, enddate)
def main(gamedate, arg_path):
if arg_path is None:
ini_path = ''
else:
ini_path = arg_path
init_logger(ini_path)
dates = establish_game_date(gamedate)
currentdate = dates[0]
enddate = dates[1]
# loop to process all games from start date through today's games
while currentdate <= enddate:
# set current date to proper format for function command line args
date_of_game = currentdate.strftime("%m-%d-%Y")
logger.info('---------------------------------------')
logger.info('Building schedule and master files for: ' + date_of_game)
# create the daily schedule for the given date
rc = dailysched.invoke_dailysched_as_sub(date_of_game, arg_path)
# rc 0 from dailysched - everything is perfect
# rc 20 from dailysched - master scoreboard not ready yet, this is ok
# anything else is critical error and process needs to stop
if rc not in (0, 20):
return rc
# if the date's schedule is good then update master files
if rc == 0:
# update team master
rc = updteam.invoke_updteam_as_sub(date_of_game, arg_path)
if rc != 0:
return rc
# update player master
rc = updplayer.invoke_updplayer_as_sub(date_of_game, arg_path)
if rc != 0:
return rc
# sleep 5 seconds to avoid throttle from MLB only when running catchup
if currentdate < enddate:
logger.info('Sleeping 5 seconds.....')
time.sleep(5)
# bump the current date up by 1 day
currentdate += datetime.timedelta(days=1)
return rc
if __name__ == '__main__':
args = get_command_arguments()
cc = main(args.game_date, args.ini_path)
logger.info('Script completion code: ' + str(cc))
sys.exit(cc)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,059 | RBecker262/ChiPyProject | refs/heads/master | /backend/dailysched.py | """
dailysched.py
Author: Robert Becker
Date: May 13, 2017
Purpose: Parse the daily master scoreboard to extract MLB schedule for the day
Setup file/url names based on optional date arugment (mm-dd-yyyy), or use today
Load master scoreboard dictionary into memory
Search thru dictionary to get directory information for all MLB games for date
Create output file with all schedules for the given date
"""
import sys
import os
import argparse
import logging
import logging.config
import datetime
import json
import requests
import myutils
# setup global variables
LOGGING_INI = 'backend_logging.ini'
CONFIG_INI = 'backend_config.ini'
DAILY_SCHEDULE = 'schedule_YYYYMMDD.json'
URL1 = 'http://gd2.mlb.com/components/game/mlb/year_YYYY'
URL2 = '/month_MM'
URL3 = '/day_DD'
URL4 = '/master_scoreboard.json'
class LoadDictionaryError(ValueError):
pass
def init_logger(ini_path):
"""
:param ini_path: path to ini files
initialize global variable logger and setup log
"""
global logger
log_ini_file = ini_path + LOGGING_INI
logging.config.fileConfig(log_ini_file, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def get_command_arguments():
"""
-d or --date optional, extract schedules for this date if provided
-p of --path optional, provides path of all ini files (must end with /)
"""
parser = argparse.ArgumentParser('Gameday schedule command line arguments')
parser.add_argument(
'-d',
'--date',
help='Date of daily schedule to build - mm-dd-yyyy format',
dest='game_date',
type=str)
parser.add_argument(
'-p',
'--path',
help='Path for all ini files',
dest='ini_path',
type=str)
argue = parser.parse_args()
return argue
def determine_filenames(gamedate=None):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
gamedate is used throughout url and file names as guide to extracting data
"""
# subtract 6 hours from today's date for games ending after midnight
if gamedate is None:
gamedate = datetime.datetime.today()
gamedate += datetime.timedelta(hours=-11)
gamedate = gamedate.strftime("%m-%d-%Y")
yyyymmdd = gamedate[6:10] + gamedate[0:2] + gamedate[3:5]
urly = URL1.replace('YYYY', gamedate[6:10])
urlm = URL2.replace('MM', gamedate[0:2])
urld = URL3.replace('DD', gamedate[3:5])
url_input = urly + urlm + urld + URL4
sched_output = DAILY_SCHEDULE.replace('YYYYMMDD', yyyymmdd)
return (url_input, sched_output, gamedate)
def load_scoreboard_dictionary(scoreboardurl, gamedate):
"""
:param scoreboardurl: url of json dictionary to load into memory
:param gamedate: date of MLB games used to extract schedules
load the Master Scoreboard, it may not exist yet for current date
"""
logger.info('Loading master scoreboard dictionary from: ' + scoreboardurl)
try:
scoreboardresp = requests.get(scoreboardurl)
scoreboarddata = json.loads(scoreboardresp.text)
return scoreboarddata
except Exception:
errmsg = 'Master Scoreboard dictionary not created for: ' + gamedate
logger.warning(errmsg)
raise LoadDictionaryError(errmsg)
def search_dictionary(indict, resultdict):
"""
:param indict: dictionary to be parsed
:param resultdict: result dictionary, starts blank, updated for each entry
Function loops through dictionary keys and examines values
If function finds nested dictionary, call itself to parse next dict level
If function finds list, call function to parse the next list level
Once game data found capture data and return to previous recursion level
"""
keylist = list(indict.keys())
# if directory found, create entry in result dict and return to prev level
if 'game_data_directory' in keylist:
gcstart = len(indict['game_data_directory']) - 15
gamecode = indict['game_data_directory'][gcstart:]
entry = {gamecode:
{"directory": indict['game_data_directory'] + '/',
"away_code": indict['away_code'],
"away_short": indict['away_team_name'],
"home_code": indict['home_code'],
"home_short": indict['home_team_name'],
"game_time": indict['home_time'] + indict['home_ampm']}}
resultdict.update(entry)
logger.debug(gamecode + " entry added to result dictionary")
return resultdict
# for each dictionary value call appropriate function based on type
for dictkey in keylist:
if isinstance(indict[dictkey], dict):
resultdict = search_dictionary(indict[dictkey], resultdict)
elif isinstance(indict[dictkey], list):
resultdict = search_list(indict[dictkey], resultdict)
# return whatever is in result dicionary at end of this dictionary level
return resultdict
def search_list(inlist, resultdict):
"""
:param inlist: list to be parsed
:param resultdict: result dictionary, starts blank, updated for each entry
Function loops through a list and examines list entries
If function finds nested dictionary, call function to parse next dict level
If function finds list, call itself to parse the next list level
"""
# for each list value call appropriate function based on type
for listentry in inlist:
if isinstance(listentry, dict):
resultdict = search_dictionary(listentry, resultdict)
elif isinstance(listentry, list):
resultdict = search_list(listentry, resultdict)
# return whatever is in result dicionary at end of this list
return resultdict
def invoke_dailysched_as_sub(gamedate, arg_path):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
:param arg_path: path to ini files
this routine is invoked when running as imported function vs main driver
"""
rc = main(gamedate, arg_path)
logger.info('Script as function completion code: ' + str(rc))
return rc
def main(gamedate, arg_path):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
:param arg_path: path to ini files
"""
if arg_path is None:
ini_path = ''
else:
ini_path = arg_path
init_logger(ini_path)
# get the data directory from the config file
try:
section = "DirectoryPaths"
key = "DataPath"
config = myutils.load_config_file(ini_path + CONFIG_INI, logger)
data_path = myutils.get_config_value(config, logger, section, key)
except myutils.ConfigLoadError:
return 1
except myutils.ConfigKeyError:
return 2
# if the data directory does not exist then create it
if not os.path.isdir(data_path):
logger.info('Creating data directory: ' + data_path)
os.mkdir(data_path)
# setup master scoreboard input and daily schedule output
io = determine_filenames(gamedate)
scoreboard_loc = io[0]
schedule_out = data_path + io[1]
date_of_games = io[2]
# load master scoreboard dictionary into memory
try:
scoreboard_dict = load_scoreboard_dictionary(scoreboard_loc,
date_of_games)
except LoadDictionaryError:
return 20
logger.info('Creating daily schedule file: ' + schedule_out)
# initilize result and call function to search for team schedules
resultdict = {}
schedule_dict = search_dictionary(scoreboard_dict, resultdict)
# write all games scheduled to daily schedule output file
with open(schedule_out, 'w') as schedulefile:
json.dump(schedule_dict, schedulefile,
sort_keys=True, indent=4, ensure_ascii=False)
return 0
if __name__ == '__main__':
args = get_command_arguments()
cc = main(args.game_date, args.ini_path)
logger.info('Script as main completion code: ' + str(cc))
sys.exit(cc)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,060 | RBecker262/ChiPyProject | refs/heads/master | /webapp/forms.py | from flask_wtf import Form
from wtforms import StringField, BooleanField
class HomeForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
class AboutForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
class ByLastNameForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
lastname = StringField('Last Name')
class ByTeamForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
teamcode = StringField('Team Code')
class PlayerStatsForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
lastname = StringField('Last Name')
teamcode = StringField('Team Code')
playercode = StringField('Player Code')
class VanAllanEffectForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
class WatchListForm(Form):
displastname = BooleanField('Display Last Name on Web Page')
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,061 | RBecker262/ChiPyProject | refs/heads/master | /backend/updteam.py | """
updteam.py
Author: Robert Becker
Date: May 21, 2017
Purpose: use daily schedule to extract team data from gameday boxscore
Setup file/url names based on optional date arugment (mm-dd-yyyy), or use today
Load daily schedule dictionary into memory
Use schedule to search thru boxscore dictionaries to get all team info
Update teamMaster dictionary with any new team data found
Keep log of games played for each team and pointers to today's games
"""
import sys
import os
import shutil
import argparse
import logging
import logging.config
import datetime
import json
import requests
import myutils
# setup global variables
LOGGING_INI = 'backend_logging.ini'
CONFIG_INI = 'backend_config.ini'
DAILY_SCHEDULE = 'schedule_YYYYMMDD.json'
TEAM_MSTR_I = 'teamMaster.json'
TEAM_MSTR_O = 'teamMaster_YYYYMMDD.json'
BOXSCORE = 'http://gd2.mlb.com/_directory_/boxscore.json'
class LoadDictionaryError(ValueError):
pass
def init_logger(ini_path):
"""
:param ini_path: path to ini files
initialize global variable logger and setup log
"""
global logger
log_ini_file = ini_path + LOGGING_INI
logging.config.fileConfig(log_ini_file, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def get_command_arguments():
"""
-d or --date optional, extract player stats for this date if provided
-p of --path optional, provides path of all ini files (must end with /)
"""
parser = argparse.ArgumentParser('Command line arguments')
parser.add_argument(
'-d',
'--date',
help='Date of daily schedule for input - mm-dd-yyyy format',
dest='game_date',
type=str)
parser.add_argument(
'-p',
'--path',
help='Path for all ini files',
dest='ini_path',
type=str)
argue = parser.parse_args()
return argue
def determine_filenames(gamedate=None):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
gamedate is used throughout url and file names as guide to extracting data
"""
# subtract 6 hours from today's date for games ending after midnight
if gamedate is None:
gamedate = datetime.datetime.today()
gamedate += datetime.timedelta(hours=-11)
gamedate = gamedate.strftime("%m-%d-%Y")
yyyymmdd = gamedate[6:10] + gamedate[0:2] + gamedate[3:5]
schedule_input = DAILY_SCHEDULE.replace('YYYYMMDD', yyyymmdd)
team_output = TEAM_MSTR_O.replace('YYYYMMDD', yyyymmdd)
return (schedule_input, team_output)
def load_daily_schedule(schedule_in):
"""
:param schedule_in: filename for daily schedule file to load into memory
daily schedule must exist to extract any info from boxscore dictionaries
"""
logger.info('Loading daily schedule from: ' + schedule_in)
try:
with open(schedule_in) as schedulefile:
return json.load(schedulefile)
except Exception as e:
errmsg = 'Error loading dailySchedule dictionary. . .'
logger.critical(errmsg)
logger.exception(e)
raise LoadDictionaryError(errmsg)
def reset_todays_games_in_master(masterdict):
"""
:param masterdict: team master dictionary to be updated
remove game related keys from master dictionary for each team in case
teams don't play on a given date, keys are rebuilt for teams that do play
"""
teamkeylist = list(masterdict)
for teamkey in teamkeylist:
teamdatakeys = list(masterdict[teamkey])
if 'today_1' in teamdatakeys:
del masterdict[teamkey]['today_1']
del masterdict[teamkey]['today_1_time']
if 'today_2' in teamdatakeys:
del masterdict[teamkey]['today_2']
del masterdict[teamkey]['today_2_time']
if 'today_opp' in teamdatakeys:
del masterdict[teamkey]['today_opp']
return masterdict
def update_entry_for_game_1(sched_dict, mstr_dict, box_lev_3, homeaway, team):
"""
:param sched_dict: schedule dictionary entry for given game
:param mstr_dict: current team master dictionary
:param box_lev_3: boxscore dictionary level 3 where team info resides
:param homeaway: identifies team as home or away
:param team: identifies team 3 character code
create a complete updated team entry including team's schedule to date
"""
game_dir = sched_dict['directory']
gametime = sched_dict['game_time']
teamname = sched_dict[homeaway + '_short']
try:
# grab team schedule from master and add current game
sched = mstr_dict[team]['schedule']
sched.update({box_lev_3['game_id']: game_dir})
except Exception:
# first time schedule being created for this team
sched = {box_lev_3['game_id']: game_dir}
# set opponent for today's game
if homeaway == 'home':
atvs = 'vs '
oppkey = 'away'
else:
atvs = 'at '
oppkey = 'home'
# build team record (W-L) and opponent (at Chicago Cubs / vs Chicago Cubs)
rec = box_lev_3[homeaway + '_wins'] + '-' + box_lev_3[homeaway + '_loss']
opp = atvs + box_lev_3[oppkey + '_fname']
# create complete entry for all team fields including updated team schedule
entry = {team:
{'club_name': box_lev_3[homeaway + '_fname'],
'club_short': teamname,
'record': rec,
'today_home_away': homeaway,
'today_1': game_dir,
'today_1_time': gametime,
'today_opp': opp,
'schedule': sched}}
return entry
def game_1_update(sched_dict, mstr_dict, box_lev_3, daily_team):
"""
:param sched_dict: schedule dictionary entry for given game
:param mstr_dict: current team master dictionary
:param box_lev_3: boxscore dictionary level 3 where team info resides
:param daily_team: complete dictionary entries for teams playing today
create complete entries for home and away teams
merge home and away entries into the daily team dictionary
"""
home = sched_dict['home_code']
away = sched_dict['away_code']
# home team update for single game or 1st game of doubleheader
home_entry = update_entry_for_game_1(sched_dict,
mstr_dict,
box_lev_3,
'home',
home)
# away team update for single game or 1st game of doubleheader
away_entry = update_entry_for_game_1(sched_dict,
mstr_dict,
box_lev_3,
'away',
away)
# merge home and away entries into the today's team master
daily_team.update(home_entry)
daily_team.update(away_entry)
return daily_team
def update_entry_for_game_2(sched_dict, box_lev_3, homeaway, daily_team):
"""
:param sched_dict: schedule dictionary entry for given game
:param box_lev_3: boxscore dictionary level 3 where team info resides
:param homeaway: identifies team as home or away
:param daily_team: dictionary entry for team to be update with game 2 info
update entry created for game 1 of double header using data from game 2
"""
game_dir = sched_dict['directory']
gametime = sched_dict['game_time']
# update the team record but opponent would be the same
rec = box_lev_3[homeaway + '_wins'] + '-' + box_lev_3[homeaway + '_loss']
upd_entry = {'record': rec,
'today_2': game_dir,
'today_2_time': gametime}
daily_team.update(upd_entry)
# add game 2 of double header to team schedule history
upd_entry = {box_lev_3['game_id']: game_dir}
daily_team['schedule'].update(upd_entry)
return daily_team
def game_2_update(sched_dict, box_lev_3, daily_team):
"""
:param sched_dict: schedule dictionary entry for given game
:param box_lev_3: boxscore dictionary level 3 where team info resides
:param daily_team: complete dictionary entries for teams playing today
create complete entries for home and away teams
use home and away entries to update each team's entry in the daily master
"""
# home team update for 2nd game of doubleheader
home_entry = update_entry_for_game_2(sched_dict,
box_lev_3,
'home',
daily_team[sched_dict['home_code']])
# away team update for 2nd game of doubleheader
away_entry = update_entry_for_game_2(sched_dict,
box_lev_3,
'away',
daily_team[sched_dict['away_code']])
# merge entries into today's master at team level as not all keys updated
daily_team[sched_dict['home_code']].update(home_entry)
daily_team[sched_dict['away_code']].update(away_entry)
return daily_team
def load_boxscore(game_dir, game_id):
"""
:param game_dir: MLB data server directory where game boxscore exists
:param game_id: unique ID given to each game of season by MLB
load the boxscore for a given game and return the entire 3rd level
dictionary entry using key ['data']['directory'] as this contains
the most current team information desired to keep in the team master
"""
# load the boxscore dictionary based on current game directory
boxscoreurl = BOXSCORE.replace('/_directory_/', game_dir)
logger.info('Loading boxscore dictionary: ' + boxscoreurl)
try:
boxresp = requests.get(boxscoreurl)
boxscore_dict = json.loads(boxresp.text)
box_lev_3 = boxscore_dict['data']['boxscore']
except Exception:
errmsg = 'Boxscore dictionary not created yet for: ' + game_id
logger.warning(errmsg)
raise LoadDictionaryError(errmsg)
return box_lev_3
def process_schedule(sched_dict, mstr_dict):
"""
:param sched_dict: daily schedule used to drive team extract process
:param mstr_dict: team master dictionary
for each entry in daily schedule retrieve team info from game's boxscore
and apply to daily master. handling double headers made this way fun!
"""
# delete todays games in team master since we are rebuilding today
# get list of game ids from schedule (key) and init todays daily master
mstr_dict = reset_todays_games_in_master(mstr_dict)
gameidlist = sorted(sched_dict.keys())
daily_team_dict = {}
# for each key, go to boxscore for that game and exctract all team data
for key in gameidlist:
# load boxscore dictionary for current game directory
# if not found game was postponed so skip to next key (game id)
try:
box_level_3 = load_boxscore(sched_dict[key]['directory'], key)
except LoadDictionaryError:
continue
if key.endswith('1'):
# update home/away teams for single game or game 1 of doubleheader
daily_team_dict = game_1_update(sched_dict[key],
mstr_dict,
box_level_3,
daily_team_dict)
elif key.endswith('2'):
# use second game of doubleheader to update entry created by game 1
daily_team_dict = game_2_update(sched_dict[key],
box_level_3,
daily_team_dict)
# return newly updated master entries for all teams playing today
return daily_team_dict
def invoke_updteam_as_sub(gamedate, arg_path):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
:param arg_path: path to ini files
This routine is invoked when running as imported function vs main driver
"""
rc = main(gamedate, arg_path)
logger.info('Script as function completion code: ' + str(rc))
return rc
def main(gamedate, arg_path):
"""
main process to update the team master
determine file and url names based on date of games
load the day's schedule into memory (possible there are no games for date)
open current team master and load into memory
call function to process the day's schedule, returns updates to master
apply updates to team master dictionary, write to file with date in name
use shell utility to copy new dictionary to team master file name
- this gives us a new master after each udpate and a snapshot at end of day
"""
if arg_path is None:
ini_path = ''
else:
ini_path = arg_path
init_logger(ini_path)
# get the data directory from the config file
try:
section = "DirectoryPaths"
key = "DataPath"
config = myutils.load_config_file(ini_path + CONFIG_INI, logger)
data_path = myutils.get_config_value(config, logger, section, key)
except myutils.ConfigLoadError:
return 1
except myutils.ConfigKeyError:
return 2
io = determine_filenames(gamedate)
schedule_in = data_path + io[0]
team_out = data_path + io[1]
team_in = data_path + TEAM_MSTR_I
# load daily schedule dictionary into memory, must exist or will bypass
try:
todays_schedule_dict = load_daily_schedule(schedule_in)
except LoadDictionaryError:
return 20
# load team master dictionary into memory
try:
with open(team_in, 'r') as teamMaster:
team_mstr_dict = json.load(teamMaster)
# if no master build one from scratch
except Exception:
team_mstr_dict = {}
logger.info('Creating team master file: ' + team_out)
# use daily schedule to extract info from associated boxscore dictionaries
todays_team_dict = process_schedule(todays_schedule_dict, team_mstr_dict)
# update master dictionary variable and write to snapshot output file
team_mstr_dict.update(todays_team_dict)
with open(team_out, 'w') as teamfile:
json.dump(team_mstr_dict, teamfile,
sort_keys=True, indent=4, ensure_ascii=False)
# copy snapshot to master file name for ongoing updates through the season
shutil.move(team_out, team_in)
return 0
if __name__ == '__main__':
args = get_command_arguments()
cc = main(args.game_date, args.ini_path)
logger.info('Script as main completion code: ' + str(cc))
sys.exit(cc)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,062 | RBecker262/ChiPyProject | refs/heads/master | /backend/updplayer.py | """
updplayer.py
Author: Robert Becker
Date: May 16, 2017
Purpose: use daily schedule to extract player data from gameday boxscore
Setup file/url names based on optional date arugment (mm-dd-yyyy), or use today
Load daily schedule dictionary into memory
Use schedule to search thru boxscore dictionaries to get all player info
Update playerMaster dictionary with any new player data found
"""
import sys
import os
import shutil
import argparse
import logging
import logging.config
import datetime
import json
import requests
import myutils
# setup global variables
LOGGING_INI = 'backend_logging.ini'
CONFIG_INI = 'backend_config.ini'
DAILY_SCHEDULE = 'schedule_YYYYMMDD.json'
PLAYER_MSTR_I = 'playerMaster.json'
PLAYER_MSTR_O = 'playerMaster_YYYYMMDD.json'
BOXSCORE = 'http://gd2.mlb.com/_directory_/boxscore.json'
class LoadDictionaryError(ValueError):
pass
def init_logger(ini_path):
"""
:param ini_path: path to ini files
initialize global variable logger and setup log
"""
global logger
log_ini_file = ini_path + LOGGING_INI
logging.config.fileConfig(log_ini_file, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def get_command_arguments():
"""
-d or --date optional, extract player stats for this date if provided
-p of --path optional, provides path of all ini files (must end with /)
"""
parser = argparse.ArgumentParser('Command line arguments')
parser.add_argument(
'-d',
'--date',
help='Date of daily schedule for input - mm-dd-yyyy format',
dest='game_date',
type=str)
parser.add_argument(
'-p',
'--path',
help='Path for all ini files',
dest='ini_path',
type=str)
argue = parser.parse_args()
return argue
def determine_filenames(gamedate=None):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
gamedate is used throughout url and file names as guide to extracting data
"""
# subtract 6 hours from today's date for games ending after midnight
if gamedate is None:
gamedate = datetime.datetime.today()
gamedate += datetime.timedelta(hours=-11)
gamedate = gamedate.strftime("%m-%d-%Y")
yyyymmdd = gamedate[6:10] + gamedate[0:2] + gamedate[3:5]
schedule_input = DAILY_SCHEDULE.replace('YYYYMMDD', yyyymmdd)
player_output = PLAYER_MSTR_O.replace('YYYYMMDD', yyyymmdd)
return (schedule_input, player_output)
def load_daily_schedule(schedule_in):
"""
:param schedule_in: filename for daily schedule file to load into memory
daily schedule must exist to extract any info from boxscore dictionaries
"""
logger.info('Loading daily schedule from: ' + schedule_in)
try:
with open(schedule_in) as schedulefile:
return json.load(schedulefile)
except Exception as e:
errmsg = 'Error loading dailySchedule dictionary. . .'
logger.critical(errmsg)
logger.exception(e)
raise LoadDictionaryError(errmsg)
def load_boxscore(game_dir, game_id):
"""
:param game_dir: MLB data server directory where game boxscore exists
:param game_id: unique ID given to each game of season by MLB
load the boxscore for a given game and return the entire dictionary
"""
# load the boxscore dictionary based on current game directory
boxscoreurl = BOXSCORE.replace('/_directory_/', game_dir)
logger.info('Loading boxscore dictionary: ' + boxscoreurl)
try:
boxresp = requests.get(boxscoreurl)
boxscore_dict = json.loads(boxresp.text)
except Exception:
errmsg = 'Boxscore dictionary not created yet for: ' + game_id
logger.warning(errmsg)
raise LoadDictionaryError(errmsg)
return boxscore_dict
def process_schedule(sched_dict):
"""
:param sched_dict: daily schedule used to drive player extract process
for each entry in daily schedule, retrieve player info from that boxscore
and apply to master. doubleheaders are not an issue for player update.
daily_player_dict will hold all player updates to be made to master.
"""
# get dailysched keys and start with blank player dictionary for today
game_id_list = sorted(sched_dict.keys())
daily_player_dict = {}
# for each key, go to boxscore for that game and exctract all player data
for key in game_id_list:
# reset result dictionary to hold all player data from current game
result = {}
# load boxscore dictionary for current game directory
# if not found game was postponed so skip to next key (game id)
try:
boxscore_dict = load_boxscore(sched_dict[key]['directory'], key)
except LoadDictionaryError:
continue
# search thru boxscore for current game and retrieve all player data
entry = search_dictionary(boxscore_dict,
sched_dict[key]['home_code'],
sched_dict[key]['away_code'],
None,
result,
None)
# merge player data from current game into today's player dictionary
daily_player_dict.update(entry)
# return newly updated master entries for all players playing today
return daily_player_dict
def build_pitching_stats(indict):
"""
:param indict: boxscore dictionary from which to extract player data
this is called when the pitching stats for one player is found
extract all pitching data, create an entry and return it
"""
keylist = list(indict.keys())
# key used for nested dictionary within player entry for pitching stats
statkey = 'stats_pitching'
# set initial values in case given keys are not found in dictionary
wins = 0
so = 0
era = 0
walks = 0
hits = 0
ip = 0
er = 0
saves = 0
# for each pitching stat found update the associated variable
# can't convert era to float type when pitcher era is infinity
if 'w' in keylist:
wins = int(indict['w'])
if 's_so' in keylist:
so = int(indict['s_so'])
if 'era' in keylist and '-' not in indict['era']:
era = float(indict['era'])
if 's_bb' in keylist:
walks = int(indict['s_bb'])
if 's_h' in keylist:
hits = int(indict['s_h'])
if 's_ip' in keylist:
ip = float(indict['s_ip'])
if 'er' in keylist:
er = int(indict['s_er'])
if 'sv' in keylist:
saves = int(indict['sv'])
pitcherstats = {'wins': wins,
'so': so,
'era': era,
'walks': walks,
'hits': hits,
'ip': ip,
'er': er,
'saves': saves}
return (statkey, pitcherstats)
def build_batting_stats(indict):
"""
:param indict: boxscore dictionary from which to extract player data
this is called when the batting stats for one player is found
extract all batting data, create an entry and return it
"""
keylist = list(indict.keys())
# key used for nested dictionary within player entry for batting stats
statkey = 'stats_batting'
# set initial values in case given keys are not found in dictionary
hits = 0
walks = 0
hr = 0
rbi = 0
runs = 0
avg = '.000'
# for each batting stat found update the associated variable
# avg stays type string since no math will be done and format is consistent
if 's_h' in keylist:
hits = int(indict['s_h'])
if 's_bb' in keylist:
walks = int(indict['s_bb'])
if 's_hr' in keylist:
hr = int(indict['s_hr'])
if 's_rbi' in keylist:
rbi = int(indict['s_rbi'])
if 's_r' in keylist:
runs = int(indict['s_r'])
if 'avg' in keylist:
avg = indict['avg']
batterstats = {'hits': hits,
'walks': walks,
'hr': hr,
'rbi': rbi,
'runs': runs,
'avg': avg}
return (statkey, batterstats)
def build_player_stats(prevlev, indict):
"""
:param prevlev: previous level, used to identify pitching vs batting stats
:param indict: boxscore dictionary from which to extract player data
establish the position type for the player
call appropriate routine to get pitching vs batting stats
"""
if indict['pos'] == 'P':
pos_type = 'P'
else:
pos_type = 'B'
if prevlev == 'pitcher':
pitcher = build_pitching_stats(indict)
statkey = pitcher[0]
playerstats = pitcher[1]
elif prevlev == 'batter':
batter = build_batting_stats(indict)
statkey = batter[0]
playerstats = batter[1]
return (statkey, playerstats, pos_type)
def update_result_dict(indict, result, statkey, stats, teamcode, pos_type):
"""
:param indict: boxscore dictionary from which to extract player data
:param result: all dictionary entries being created from the current game
:param statkey: sub dictionary key to identify picther vs batter stats
:param stats: pitcher or batter stats for player found at current level
:param teamcode: team code that player belongs to
:param pos_type: P for pitcher or B for Batter
called to create or update a player entry in result dictionary with stats
some players have both pitching and batting stats (NL pitchers), but only
one set of stats is updated at a time, hence the need to update entries
"""
# if player already in result dictionary, update that entry with the
# pitcher or batter stats just found, only one is found at a time
if indict['name'] in list(result.keys()):
logger.debug(indict['name'] + " updating player in master")
result[indict['name']].update({statkey: stats})
# name does not already exist so create new entry for player
else:
logger.debug(indict['name'] + " adding player to master")
entry = {indict['name']:
{'full_name': indict['name_display_first_last'],
'club_code': teamcode,
'position': indict['pos'],
'pos_type': pos_type,
statkey: stats}}
result.update(entry)
return result
def search_dictionary(indict, hometeam, awayteam, teamcode, result, prevlev):
"""
:param indict: boxscore dictionary for current game
:param hometeam: team code starts as None and set once found to pass deeper
:param awayteam: team code starts as None and set once found to pass deeper
:param teamcode: set to hometeam or awayteam once team_flag is found
:param result: result dictionary, starts blank, updated for each entry
:param prevlev: previous level key, used to identify pitchers vs batters
Function loops through dictionary keys and examines values
If function finds nested dictionary, call itself to parse next dict level
If function finds list, call function to parse the next list level
As soon as player data is found return result to previous recursion level
"""
keylist = list(indict.keys())
# save team codes to pass to lower dict levels where player data resides
if 'team_flag' in keylist:
if indict['team_flag'] == 'home':
teamcode = hometeam
else:
teamcode = awayteam
# if player found, create entry in result and return to previous level
if 'name' in keylist:
player_stats = build_player_stats(prevlev,
indict)
statkey = player_stats[0]
stats = player_stats[1]
pos_type = player_stats[2]
result = update_result_dict(indict,
result,
statkey,
stats,
teamcode,
pos_type)
return result
# for each dictionary value call appropriate function based on type
for dictkey in keylist:
if isinstance(indict[dictkey], dict):
result = search_dictionary(indict[dictkey],
hometeam,
awayteam,
teamcode,
result,
dictkey)
elif isinstance(indict[dictkey], list):
result = search_list(indict[dictkey],
hometeam,
awayteam,
teamcode,
result,
dictkey)
# return whatever is in result dicionary at end of this dictionary level
return result
def search_list(inlist, hometeam, awayteam, teamcode, result, prevlev):
"""
:param indict: list from boxscore dictionary to be parsed
:param hometeam: team code starts as None and set once found to pass deeper
:param awayteam: team code starts as None and set once found to pass deeper
:param teamcode: set to hometeam or awayteam once team_flag is found
:param result: result dictionary, starts blank, updated for each entry
:param prevlev: previous level key, used to identify pitchers vs batters
If function finds nested dictionary, call function to parse next dict level
If function finds list, call itself to parse the next list level
"""
# for each list value call appropriate function based on type
for listentry in inlist:
if isinstance(listentry, dict):
result = search_dictionary(listentry,
hometeam,
awayteam,
teamcode,
result,
prevlev)
elif isinstance(listentry, list):
result = search_list(listentry,
hometeam,
awayteam,
teamcode,
result,
prevlev)
# return whatever is in result dicionary at end of this list
return result
def invoke_updplayer_as_sub(gamedate, arg_path):
"""
:param gamedate: date of the games in format "MM-DD-YYYY"
:param arg_path: path to ini files
This routine is invoked when running as imported function vs main driver
"""
rc = main(gamedate, arg_path)
logger.info('Script as function completion code: ' + str(rc))
return rc
def main(gamedate, arg_path):
"""
main process to update the player master
determine file and url names based on date of games
load the day's schedule into memory (possible there are no games for date)
open current player master and load into memory
call function to process the day's schedule, returns updates to master
apply updates to player master dictionary, write to file with date in name
use shell utility to copy new dictionary to player master file name
- this gives us a new master after each udpate and a snapshot at end of day
"""
if arg_path is None:
ini_path = ''
else:
ini_path = arg_path
init_logger(ini_path)
# get the data directory from the config file
try:
section = "DirectoryPaths"
key = "DataPath"
config = myutils.load_config_file(ini_path + CONFIG_INI, logger)
data_path = myutils.get_config_value(config, logger, section, key)
except myutils.ConfigLoadError:
return 1
except myutils.ConfigKeyError:
return 2
io = determine_filenames(gamedate)
schedule_in = data_path + io[0]
player_out = data_path + io[1]
player_in = data_path + PLAYER_MSTR_I
# load daily schedule dictionary into memory, must exist or will bypass
try:
todays_schedule_dict = load_daily_schedule(schedule_in)
except LoadDictionaryError:
return 20
# load player master dictionary into memory
try:
with open(player_in, 'r') as playerMaster:
player_mstr_dict = json.load(playerMaster)
# if no master build one from scratch
except Exception:
player_mstr_dict = {}
logger.info('Creating player master file: ' + player_out)
# use daily schedule to extract info from associated boxscore dictionaries
todays_player_dict = process_schedule(todays_schedule_dict)
# update master dictionary variable and write to snapshot output file
player_mstr_dict.update(todays_player_dict)
with open(player_out, 'w') as playerfile:
json.dump(player_mstr_dict, playerfile,
sort_keys=True, indent=4, ensure_ascii=True)
# copy snapshot to master file name for ongoing updates through the season
shutil.move(player_out, player_in)
return 0
if __name__ == '__main__':
args = get_command_arguments()
cc = main(args.game_date, args.ini_path)
logger.info('Script as main completion code: ' + str(cc))
sys.exit(cc)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,063 | RBecker262/ChiPyProject | refs/heads/master | /webapp/apifuncs.py | """
apifuncs.py
Author: Robert Becker
Date: July 2, 2017
Purpose: contains all functions used by the various API's of this website
"""
import requests
import json
import collections
class LoadDictionaryError(ValueError):
pass
def load_dictionary_from_url(dict_url):
"""
:param dict_url: url of online dictionary to be loaded
try loading dictionary and return to caller, raise error if any issue
"""
try:
dictresp = requests.get(dict_url)
dictload = json.loads(dictresp.text)
except Exception:
errmsg = 'Dictionary not available'
raise LoadDictionaryError(errmsg)
return dictload
def get_player_stats(api_url):
"""
:param api_url: specific url used to get player stats (season only)
API's using this function:
/api/v1/players/team/xxxxx argument is team code
/api/v1/players/lastname/xxxxx argument is partial last name
/api/v1/boxscore/player/xxxxx argument is player code
"""
# make API call using url passed to function and load dictionary result
api_resp = requests.get(api_url)
api_str = api_resp.content.decode()
loaded = json.loads(api_str)
# start with empty result lists and load info into tables sorting on key
batters = []
pitchers = []
for key in sorted(loaded['batters'].keys()):
batters.append(loaded['batters'][key])
for key in sorted(loaded['pitchers'].keys()):
pitchers.append(loaded['pitchers'][key])
return (batters, pitchers)
def get_pitching_stats(plyr_data, key, team):
"""
:param plyr_data: player entry from player master file
:param key: player key used in result dictionary
:param team: team master entry for player's team
formats the pitchers dictionary for player entry used in html form
"""
statkey = 'stats_pitching'
pitching = {key:
{"name": plyr_data['full_name'],
"team": team['club_short'],
"wins": plyr_data[statkey]['wins'],
"era": str("{:.2f}".format(plyr_data[statkey]['era'])),
"er": plyr_data[statkey]['er'],
"ip": plyr_data[statkey]['ip'],
"hits": plyr_data[statkey]['hits'],
"so": plyr_data[statkey]['so'],
"walks": plyr_data[statkey]['walks'],
"saves": plyr_data[statkey]['saves'],
"code": key}}
return pitching
def get_batting_stats(plyr_data, key, team):
"""
:param plyr_data: player entry from player master file
:param key: player key used in result dictionary
:param team: team master entry for player's team
formats the batters dictionary for player entry used in html form
"""
statkey = 'stats_batting'
# take all batters (position players) or any pitcher who has batting data
if plyr_data['pos_type'] == 'B' or plyr_data[statkey]['avg'] > '.000':
batting = {key:
{"name": plyr_data['full_name'],
"team": team['club_short'],
"pos": plyr_data['position'],
"avg": plyr_data[statkey]['avg'],
"hits": plyr_data[statkey]['hits'],
"hr": plyr_data[statkey]['hr'],
"rbi": plyr_data[statkey]['rbi'],
"runs": plyr_data[statkey]['runs'],
"walks": plyr_data[statkey]['walks'],
"code": key}}
else:
batting = None
return batting
def get_today_stats(box_url, homeaway, playercode):
"""
:param box_url: MLB url of boxscore for this game to get player data
:param homeaway: indicates if team is 'home' or 'away'
:param playercode: player code to find within boxscore
load boxscore dictionary then loop thru pitching and batting sections
boxscore may not be available yet which is valid, so return empty result
determine if player's team is home or away today
find him in batting and/or pitching subdictionaries and save stats
"""
stats = {}
try:
box_dict = load_dictionary_from_url(box_url)
batting = box_dict['data']['boxscore']['batting']
pitching = box_dict['data']['boxscore']['pitching']
except Exception:
return stats
for x in range(0, 2):
# match batting dictionary list item with player home or away value
if batting[x]['team_flag'] == homeaway:
for p in range(0, len(batting[x]['batter'])):
if batting[x]['batter'][p]['name'] == playercode:
stats.update({"batting": batting[x]['batter'][p]})
# match pitching dictionary list item with player home or away value
if pitching[x]['team_flag'] == homeaway:
if isinstance(pitching[x]['pitcher'], dict):
if pitching[x]['pitcher']['name'] == playercode:
stats.update({"pitching": pitching[x]['pitcher']})
else:
for p in range(0, len(pitching[x]['pitcher'])):
if pitching[x]['pitcher'][p]['name'] == playercode:
stats.update({"pitching": pitching[x]['pitcher'][p]})
return stats
def add_todays_results(result1, result2, playercode, fullname, pos, clubname):
"""
:param result1: player stats from game1 (if he played)
:param result2: player stats from game2 (if he played)
:param playercode: player key to player master
:param fullname: player full name
:param pos: position played
:param clubname: team short name
add stats from each game, player might have data in both, neither or just 1
player might have hitting or pitching stats, or both, or neither, by game
"""
batting = add_todays_batting(result1,
result2,
playercode,
fullname,
pos,
clubname)
pitching = add_todays_pitching(result1,
result2,
playercode,
fullname,
clubname)
result = {"batters": batting, "pitchers": pitching}
return result
def add_todays_batting(result1, result2, playercode, fullname, pos, clubname):
"""
:param result1: player stats from game1 (if he played)
:param result2: player stats from game2 (if he played)
:param playercode: player key to player master
:param fullname: player full name
:param pos: position played
:param clubname: team short name
add stats from each game, player might have data in both, neither or just 1
"""
# establish stat translation table from MLB's stat keys to Rob's stat keys
bat_translation = {
'h': 'bhits',
'bb': 'bwalks',
'hr': 'hr',
'rbi': 'rbi',
'r': 'runs',
'ab': 'avg'
}
# set initial values to have a basis for doing math
game_stats = {}
for stat in bat_translation.keys():
game_stats[bat_translation[stat]] = 0
# for each batting stat found update the associated variable
# avg is field name in html but for todays stats will hold At Bats instead
# column heading will reflect AVG or AB based on which stats are displayed
if 'batting' in result1.keys():
game_stats = collate_stats(result1['batting'],
game_stats,
bat_translation)
if 'batting' in result2.keys():
game_stats = collate_stats(result2['batting'],
game_stats,
bat_translation)
# if all stats added together are at least 1 then player batted today
# if all stats added together = 0 then he did not bat
allstats = collections.Counter(game_stats)
if sum(allstats.values()) > 0:
batting = {playercode:
{"name": fullname,
"team": clubname,
"pos": pos,
"avg": game_stats['avg'],
"hits": game_stats['bhits'],
"hr": game_stats['hr'],
"rbi": game_stats['rbi'],
"runs": game_stats['runs'],
"walks": game_stats['bwalks'],
"code": playercode}}
else:
batting = {}
return batting
def add_todays_pitching(result1, result2, playercode, fullname, clubname):
"""
:param result1: player stats from game1 (if he played)
:param result2: player stats from game2 (if he played)
:param playercode: player key to player master
:param fullname: player full name
:param clubname: team short name
add stats from each game, player might have data in both, neither or just 1
"""
# establish stat translation table from MLB's stat keys to Rob's stat keys
pitch_translation = {
'win': 'wins',
'so': 'so',
'bb': 'pwalks',
'h': 'phits',
'out': 'ip',
'er': 'er',
'save': 'saves',
}
# set initial values to have a basis for doing math
game_stats = {}
for stat in pitch_translation.keys():
game_stats[pitch_translation[stat]] = 0
# for each pitching stat found update the associated variable
# wins and saves are boolean variables so if true 1 will be added to total
# innings pitched for today is counted as outs but will use same column
# column heading will reflect Outs or IP based on which stats are displayed
# era is calculated to 2 decimals based on outs and earned runs
if 'pitching' in result1.keys():
game_stats = collate_stats(result1['pitching'],
game_stats,
pitch_translation)
if 'pitching' in result2.keys():
game_stats = collate_stats(result2['pitching'],
game_stats,
pitch_translation)
if game_stats['ip'] > 0:
ert = (27 / game_stats['ip']) * game_stats['er']
era = str("{:.2f}".format(ert))
# if all stats added together are at least 1 then player pitched today
# if all stats added together = 0 then he did not pitch
allstats = collections.Counter(game_stats)
if sum(allstats.values()) > 0:
pitching = {playercode:
{"name": fullname,
"team": clubname,
"wins": game_stats['wins'],
"era": era,
"er": game_stats['er'],
"ip": game_stats['ip'],
"hits": game_stats['phits'],
"so": game_stats['so'],
"walks": game_stats['pwalks'],
"saves": game_stats['saves'],
"code": playercode}}
else:
pitching = {}
return pitching
def collate_stats(stats, day_stats, stat_trans):
"""
:param stats: batting or pitching entry from MLB boxscore for 1 game
:param day_stats: accumulated stats for a day to which to add
:param stat_trans: translates MLB stat keys to Rob's stat keys
add a player-game stats to a running total for a day
"""
for statkey in stat_trans.keys():
if statkey in stats.keys():
if stats[statkey] == 'true':
day_stats[stat_trans[statkey]] += 1
else:
day_stats[stat_trans[statkey]] += int(
stats[statkey])
return day_stats
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,064 | RBecker262/ChiPyProject | refs/heads/master | /webapp/views.py | from flask import render_template, redirect, flash, request, url_for
import requests
import json
from webapp import webapp
from .forms import HomeForm, AboutForm, ByLastNameForm, ByTeamForm
from .forms import PlayerStatsForm, WatchListForm, VanAllanEffectForm
from webapp import apifuncs
DATA_FOLDER = '/usr/tmp/data/'
TEAM_MSTR_I = DATA_FOLDER + 'teamMaster.json'
PLYR_MSTR_I = DATA_FOLDER + 'playerMaster.json'
BOXSCORE = 'http://gd2.mlb.com/_directory_/boxscore.json'
API_SERVER = 'http://thesandlot-env.aquapjmcqz.us-east-2.elasticbeanstalk.com'
@webapp.route('/')
@webapp.route('/index')
@webapp.route('/TheSandlot')
def thesandlot():
"""
display Home page
"""
form = HomeForm()
flash("Welcome to Rob's Sandlot!")
return render_template("base.html",
title='Home',
form=form,
errors=False,
dispflash=True)
@webapp.route('/ByTeam', methods=['GET', 'POST'])
def byteam():
"""
Search By Teams was clicked to display all teams, or a team was selected
"""
form = ByTeamForm()
# team had to be clicked from team list, redirect to playerstats
if form.teamcode.data is not None:
return redirect(url_for('playerstats', teamcode=form.teamcode.data))
# make API call to retrieve all team information and display
url = API_SERVER + '/api/v1/teams'
api_resp = requests.get(url)
api_str = api_resp.content.decode()
loaded = json.loads(api_str)
# load teams dictionary entries into list sorted on teamcode (team key)
teams = []
for key in sorted(loaded.keys()):
teams.append(loaded[key])
flash('Click on row to view team roster / stats')
return render_template("byteam.html",
form=form,
teams=teams,
errors=False,
dispflash=True)
@webapp.route('/ByLastName', methods=['GET', 'POST'])
def bylastname():
"""
Search By Last Name was clicked to display players who match the partial
last name entered, or a partial name was entered and user clicked Submit
"""
form = ByLastNameForm()
# get lastname from form and set errors to false
# errors controls green font for normal message and red for error message
lastname = form.lastname.data
errors = False
# if POST check for len of argument > 0, if true redirect to playerstats
# if POST and len of argument = 0 then user didn't enter anything
if request.method == 'POST':
if len(lastname.strip()) > 0:
return redirect(url_for('playerstats',
lastname=lastname,
displastname=True))
# user didn't enter anything in lastname field so highlight in red
else:
errors = True
flash('Enter at least 1 letter of player last name:')
return render_template("base.html",
form=form,
displastname=True,
errors=errors,
dispflash=True)
@webapp.route('/PlayerStats', methods=['GET', 'POST'])
def playerstats(teamcode=None, lastname=None, displastname=False):
"""
:param teamcode: code of team for displaying all players for team
:param lastname: partial name for displaying players by name search
:param displastname: used in HTML to control display of Last Name field
this handles all possible ways of displaying player stats, season and today
season stats display by clicking on a team or entering partial last name
if coming here from search by team the Last Name field is not displayed
if coming here from search by last name then Last Name field is displayed
boolean variable errors controls font color to use for any flash messages
- errors = False: green for happy normal messages
- errors = True: red for error messages or when no results found
"""
form = PlayerStatsForm()
errors = False
# get possible arguments passed to this routine
teamcode = request.args.get('teamcode')
lastname = request.args.get('lastname')
playercode = form.playercode.data
# set column headings for batting average and innings pitched
# when displaying today's stats these will change to AB and Outs
avg_ab = 'AVG'
ip_outs = 'IP'
if request.method == "GET" and teamcode:
# request to display all players for team, call teamplayers api
url = API_SERVER + '/api/v1/players/team/' + teamcode
list = apifuncs.get_player_stats(url)
batters = list[0]
pitchers = list[1]
form.displastname.data = displastname
flash("Season stats displayed, toggle player row for today's stats")
elif request.method == "GET" and lastname:
# request to display players matching partial name, call lastname api
# first get displastname argument, move it and lastname to form fields
displastname = request.args.get('displastname')
form.displastname.data = displastname
form.lastname.data = lastname
url = API_SERVER + '/api/v1/players/lastname/' + lastname
list = apifuncs.get_player_stats(url)
batters = list[0]
pitchers = list[1]
flash("Season stats displayed, toggle player row for today's stats")
elif playercode is not None and str(playercode).strip() != '':
# user clicked on player row, call todays stats api
# get displastname from form since at this point we don't know if we
# came here from search by team or search by last name
# if from search by last name keep the last name field displayed
# if from search by team do not display the last name field
displastname = form.displastname.data
if displastname is not None:
lastname = form.lastname.data
url = API_SERVER + '/api/v1/boxscore/player/' + playercode
list = apifuncs.get_player_stats(url)
batters = list[0]
pitchers = list[1]
avg_ab = 'AB'
ip_outs = 'Outs'
form.playercode.data = playercode
if len(batters) == 0 and len(pitchers) == 0:
flashmsg = playercode + ' has not played yet today'
errors = True
else:
flashmsg = 'Stats from games played today'
flash(flashmsg)
else:
# partial name field updated, if valid (len > 0) call lastname api
displastname = form.displastname.data
lastname = form.lastname.data
if request.method == 'POST' and len(lastname.strip()) > 0:
url = API_SERVER + '/api/v1/players/lastname/' + lastname
list = apifuncs.get_player_stats(url)
batters = list[0]
pitchers = list[1]
msg = "Season stats displayed, toggle player row for today's stats"
flash(msg)
else:
batters = []
pitchers = []
errors = True
flash('Enter at least 1 letter of player last name:')
return render_template("playerstats.html",
form=form,
playercode=playercode,
batters=batters,
pitchers=pitchers,
displastname=displastname,
errors=errors,
avgab=avg_ab,
ipouts=ip_outs,
dispflash=True)
@webapp.route('/WatchList')
def watchlist():
"""
display Watch List page
"""
form = WatchListForm()
flash('View and update the Watch List')
return render_template("watchlist.html",
form=form,
displastname=False,
errors=False,
dispflash=True)
@webapp.route('/VanAllanEffect')
def vanallaneffect():
"""
display VanAllanEffect page
"""
form = VanAllanEffectForm()
flash('SAVE THE CARDINALS!')
return render_template("vanallaneffect.html",
form=form,
displastname=False,
errors=True,
dispflash=True)
@webapp.route('/About')
def about():
"""
display About page
"""
form = AboutForm()
return render_template("about.html",
form=form,
displastname=False,
errors=False,
dispflash=False)
###############################################################################
#
# A PPPPPPPPP IIIIIIIIIIII
# AAA PP PP II
# AA AA PP PP II
# AA AA PP PP II S E C T I O N
# AAAAAAAAA PPPPPPPPP II ===================
# AA AA PP II
# AA AA PP II
# AA AA PP IIIIIIIIIIII
#
###############################################################################
@webapp.route('/api/v1/teams', methods=['GET'])
def api_v1_teams():
"""
retrieves info for all teams with opponents ant times for today's games
"""
result = {}
with open(TEAM_MSTR_I, 'r') as teamMaster:
team_mstr = json.load(teamMaster)
for key in team_mstr.keys():
# establish opponent and time of game (or game 1 of double header)
gameopp = ''
gametime = ''
if 'today_1_time' in team_mstr[key].keys():
gameopp = team_mstr[key]['today_opp']
gametime = team_mstr[key]['today_1_time']
# display times of both games if playing a double header
if 'today_2_time' in team_mstr[key].keys():
gametime += " / " + team_mstr[key]['today_2_time']
# result will include all teams whether they play today or not
team = {team_mstr[key]['club_name']:
{"code": key,
"club": team_mstr[key]['club_name'],
"short": team_mstr[key]['club_short'],
"record": team_mstr[key]['record'],
"opponent": gameopp,
"time": gametime}}
result.update(team)
# convert dictionary result to a string and return
result_str = json.dumps(result)
return result_str
@webapp.route('/api/v1/players/team/<string:teamcode>', methods=['GET'])
def api_v1_players_team(teamcode):
"""
:param teamcode: team for which to display player season stats
retrieves season stats for all players for the specified team
"""
batters = {}
pitchers = {}
result = {}
with open(PLYR_MSTR_I, 'r') as playerMaster:
plyr_mstr = json.load(playerMaster)
with open(TEAM_MSTR_I, 'r') as teamMaster:
teams = json.load(teamMaster)
# examine club code for every player to see if it matches team selected
for key in plyr_mstr.keys():
if plyr_mstr[key]['club_code'] == teamcode:
# grab team entry for player's team
team = teams[plyr_mstr[key]['club_code']]
# player on team selected so see if he has batting statistics
if 'stats_batting' in plyr_mstr[key]:
plyr = apifuncs.get_batting_stats(plyr_mstr[key], key, team)
# update is conditional for pitcher, must have batting stat > 0
if plyr is not None:
batters.update(plyr)
# player on team selected so see if he has pitching statistics
if 'stats_pitching' in plyr_mstr[key]:
plyr = apifuncs.get_pitching_stats(plyr_mstr[key], key, team)
pitchers.update(plyr)
# create dictionary result with a section for each, batters and pitchers
result = {"batters": batters, "pitchers": pitchers}
# convert dictionary result to a string and return
result_str = json.dumps(result)
return result_str
@webapp.route('/api/v1/players/lastname/<string:lastname>', methods=['GET'])
def api_v1_players_lastname(lastname):
"""
:param lastname: partial last name of player
retrieves season stats for all players who match partial last name entered
"""
batters = {}
pitchers = {}
result = {}
with open(PLYR_MSTR_I, 'r') as playerMaster:
plyr_mstr = json.load(playerMaster)
with open(TEAM_MSTR_I, 'r') as teamMaster:
teams = json.load(teamMaster)
# examine all player keys (last name) in master file
for key in plyr_mstr.keys():
# strip trailing spaces and convert all chars to lower for both the
# last name entered and last name on file, save len of argument
stripkey = key.strip().lower()
striplast = lastname.strip().lower()
arglen = len(striplast)
# compare argument and last name up to number of letters entered
if stripkey[0:arglen] == striplast[0:arglen]:
# grab team entry for player's team
team = teams[plyr_mstr[key]['club_code']]
# player name matches argument so see if he has batting statistics
if 'stats_batting' in plyr_mstr[key]:
plyr = apifuncs.get_batting_stats(plyr_mstr[key], key, team)
# update is conditional for pitcher, must have batting stat > 0
if plyr is not None:
batters.update(plyr)
# player name matches argument so see if he has pitching statistics
if 'stats_pitching' in plyr_mstr[key]:
plyr = apifuncs.get_pitching_stats(plyr_mstr[key], key, team)
pitchers.update(plyr)
# create dictionary result with a section for each, batters and pitchers
result = {"batters": batters, "pitchers": pitchers}
# convert dictionary result to a string and return
result_str = json.dumps(result)
return result_str
@webapp.route('/api/v1/boxscore/player/<string:playercode>', methods=['GET'])
def api_v1_boxscore_player(playercode):
"""
:param playercode: identifies player for which to retrieve today's stats
retrieves today's stats from MLB boxscore for a specific player
"""
with open(PLYR_MSTR_I, 'r') as playerMaster:
players = json.load(playerMaster)
with open(TEAM_MSTR_I, 'r') as teamMaster:
teams = json.load(teamMaster)
teamcode = players[playercode]['club_code']
result1 = {}
result2 = {}
# if a game is scheduled (link on team master), get stats from game
if "today_1" in teams[teamcode].keys():
boxurl = BOXSCORE.replace('/_directory_/', teams[teamcode]['today_1'])
result1 = apifuncs.get_today_stats(boxurl,
teams[teamcode]['today_home_away'],
playercode)
# if a double header is scheduled, get stats from that game too
if "today_2" in teams[teamcode].keys():
boxurl = BOXSCORE.replace('/_directory_/', teams[teamcode]['today_2'])
result2 = apifuncs.get_today_stats(teams[teamcode]['today_2'],
teams[teamcode]['today_home_away'],
playercode)
# add stats together from both games though there is usually only one
result = apifuncs.add_todays_results(result1,
result2,
playercode,
players[playercode]['full_name'],
players[playercode]['position'],
teams[teamcode]['club_short'])
# convert dictionary result to a string and return
result_str = json.dumps(result)
return result_str
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,065 | RBecker262/ChiPyProject | refs/heads/master | /backend/myutils.py | """
myutils.py
Author: Robert Becker
Date: June 25, 2017
Purpose: to house various common utilities
load_config_file - loads config file into an object and returns
get_config_value - retrieves value for section/key specified
"""
import configparser
class ConfigLoadError(ValueError):
pass
class ConfigKeyError(ValueError):
pass
def load_config_file(config_file, logger):
"""
:param config_file name of config file to retrieve
:param logger logger object to log messages during process
create and verify config object based on config file input
"""
logger.info('Config file location: ' + config_file)
config = configparser.ConfigParser()
# open config file to verify existence and return object
try:
config.read_file(open(config_file))
config.read(config_file)
except Exception as e:
errmsg = 'Error loading Configuration file. . .'
logger.critical(errmsg)
logger.exception(e)
raise ConfigLoadError(errmsg)
return config
def get_config_value(config_obj, logger, section, key):
"""
:param config_file name of config file to retrieve
:param logger logger object to log messages during process
:param section name of section to retrieve
:param key name of key value to retrieve within section
retrieve value for section/key from config object
"""
# verify key exists before retrieving and returning value
if config_obj.has_option(section, key):
config_value = config_obj.get(section, key)
return config_value
else:
errmsg = 'Config key ' + key + ' missing from section ' + section
logger.critical(errmsg)
raise ConfigKeyError(errmsg)
| {"/thesandlot.py": ["/webapp/__init__.py"], "/tests/test_apifuncs.py": ["/webapp/__init__.py"], "/webapp/views.py": ["/webapp/__init__.py", "/webapp/forms.py"]} |
76,066 | shubhampachori12110095/MTA | refs/heads/master | /greedy.py | import gym, numpy as np
from utils import *
from methods import *
from true_online_GTD import *
def eval_greedy_per_run(env, runtime, runtimes, episodes, target, behavior, gamma, Lambda, alpha, beta, evaluation):
print('running %d of %d for greedy' % (runtime + 1, runtimes))
value_trace, lambda_trace, var_trace = greedy(env, episodes, target, behavior, Lambda, gamma = lambda x: 0.95, alpha = 0.05, beta = 0.0001, diagnose = False, evaluation = evaluation)
return (value_trace, lambda_trace, var_trace)
def eval_greedy(env, expectation, variance, stat_dist, behavior, target, gamma = lambda x: 0.95, alpha=0.05, beta=0.05, runtimes=20, episodes=int(1e5), evaluation = None):
LAMBDAS = []
for runtime in range(runtimes):
LAMBDAS.append(LAMBDA(env, lambda_type = 'constant', initial_value = np.ones(env.observation_space.n)))
results = Parallel(n_jobs = -1)(delayed(eval_greedy_per_run)(env, runtime, runtimes, episodes, target, behavior, gamma, LAMBDAS[runtime], alpha, beta, evaluation) for runtime in range(runtimes))
value_traces = [entry[0] for entry in results]
lambda_trace = [entry[1] for entry in results]
var_traces = [entry[2] for entry in results]
if evaluation is None:
error_value = np.zeros((runtimes, episodes))
error_var = np.zeros((runtimes, episodes))
for runtime in range(runtimes):
w_trace = var_traces[runtime]
for j in range(len(w_trace)):
error_var[runtime, j] = mse(w_trace[j], variance, stat_dist)
for runtime in range(runtimes):
w_trace = value_traces[runtime]
for j in range(len(w_trace)):
error_value[runtime, j] = mse(w_trace[j], expectation, stat_dist)
return error_value, np.concatenate(lambda_trace, axis = 1).T, error_var
else:
return np.concatenate(value_traces, axis = 1).T, np.concatenate(lambda_trace, axis = 1).T, np.concatenate(var_traces, axis = 1).T
def greedy(env, episodes, target, behavior, Lambda, gamma = lambda x: 0.95, alpha = 0.05, beta = 0.05, diagnose = False, evaluation = None):
N = env.observation_space.n
lambda_trace = np.zeros((episodes, 1))
lambda_trace[:] = np.nan
first_moment_learner, variance_learner, value_learner = TRUE_ONLINE_GTD_LEARNER(env), TRUE_ONLINE_GTD_LEARNER(env), TRUE_ONLINE_GTD_LEARNER(env)
variance_learner.w_prev, variance_learner.w_curr = np.zeros(env.observation_space.n), np.zeros(env.observation_space.n)
if evaluation is not None:
value_trace, var_trace = np.zeros((episodes, 1)), np.zeros((episodes, 1))
value_trace[:] = np.nan; var_trace[:] = np.nan
else:
value_trace, var_trace = [], []
for epi in range(episodes):
s_curr, done = env.reset(), False
starting_state = s_curr
x_curr = onehot(s_curr, N)
value_learner.refresh(); first_moment_learner.refresh(); variance_learner.refresh()
if evaluation is not None:
value_trace[epi, 0] = evaluation(value_learner.w_curr, 'expectation')
var_trace[epi, 0] = evaluation(variance_learner.w_curr, 'variance')
else:
value_trace.append(np.copy(value_learner.w_curr))
var_trace.append(np.copy(variance_learner.w_curr))
while not done:
action = decide(s_curr, behavior)
rho_curr = importance_sampling_ratio(target, behavior, s_curr, action)
s_next, R_next, done, _ = env.step(action)
x_next = onehot(s_next, N)
first_moment_learner.learn(R_next, gamma(x_next), gamma(x_curr), x_next, x_curr, 1.0, 1.0, rho_curr, alpha, beta)
delta_curr = R_next + gamma(x_next) * np.dot(x_next, value_learner.w_curr) - np.dot(x_curr, value_learner.w_curr)
r_bar_next = delta_curr ** 2
gamma_bar_next = (rho_curr * gamma(x_next)) ** 2
variance_learner.learn(r_bar_next, gamma_bar_next, 1, x_next, x_curr, 1, 1, 1, alpha, beta)
errsq = (np.dot(x_next, first_moment_learner.w_next) - np.dot(x_next, value_learner.w_curr)) ** 2
varg = max(0, np.dot(x_next, variance_learner.w_next))
if errsq + varg > 0:
Lambda.w[s_next] = errsq / (errsq + varg)
if s_curr == starting_state and s_next == starting_state + 1:
lambda_trace[epi, 0] = Lambda.w[s_next]
if diagnose:
print("s_curr %d s_next %d errsq %.2e, varg %.2e, lambda_next %.2e" % (s_curr, s_next, errsq, varg, Lambda.w[s_next]))
value_learner.learn(R_next, gamma(x_next), gamma(x_curr), x_next, x_curr, Lambda.value(x_next), Lambda.value(x_curr), rho_curr, alpha, beta)
first_moment_learner.next(); variance_learner.next(); value_learner.next()
s_curr, x_curr = s_next, x_next
return value_trace, lambda_trace, var_trace | {"/ringworld_MTA.py": ["/greedy.py", "/mta.py"]} |
76,067 | shubhampachori12110095/MTA | refs/heads/master | /ringworld_MTA.py | import matplotlib as mpl
mpl.use('Agg')
from utils import *
from methods import *
from greedy import *
from mta import *
import numpy as np
import numpy.matlib as npm
import warnings, argparse, scipy.io
parser = argparse.ArgumentParser(description='')
parser.add_argument('--alpha', type=float, default=0.05, help='')
parser.add_argument('--beta', type=float, default=0.05, help='')
parser.add_argument('--kappa', type=float, default=0.01, help='')
parser.add_argument('--episodes', type=int, default=100000, help='')
parser.add_argument('--runtimes', type=int, default=160, help='')
parser.add_argument('--N', type=int, default=11, help='')
parser.add_argument('--target', type=float, default=0.4, help='')
parser.add_argument('--behavior', type=float, default=0.5, help='')
args = parser.parse_args()
# experiment Preparation
N = args.N; env, runtimes, episodes, gamma = RingWorldEnv(args.N, unit = 1), args.runtimes, args.episodes, lambda x: 0.95
alpha, beta, kappa = args.alpha, args.beta, args.kappa
target_policy = npm.repmat(np.array([args.target, 1 - args.target]).reshape(1, -1), env.observation_space.n, 1)
behavior_policy = npm.repmat(np.array([args.behavior, 1 - args.behavior]).reshape(1, -1), env.observation_space.n, 1)
# get ground truth expectation, variance and stationary distribution
true_expectation, true_variance, stationary_dist = iterative_policy_evaluation(env, target_policy, gamma=gamma)
evaluation = lambda estimate, stat_type: evaluate_estimate(estimate, true_expectation, true_variance, stationary_dist, stat_type)
things_to_save = {}
error_value_mta, lambda_mta, error_var_mta = eval_MTA(env, true_expectation, true_variance, stationary_dist, behavior_policy, target_policy, kappa = kappa, gamma = gamma, alpha=alpha, beta=beta, runtimes=runtimes, episodes=episodes)
things_to_save['error_value_mta'], things_to_save['lambda_mta'], things_to_save['error_var_mta'] = error_value_mta, lambda_mta, error_var_mta
if args.kappa == 0.01:
BASELINE_LAMBDAS = [0, 0.2, 0.4, 0.6, 0.8, 1]
for baseline_lambda in BASELINE_LAMBDAS:
Lambda = LAMBDA(env, lambda_type = 'constant', initial_value = baseline_lambda * np.ones(N))
results = eval_togtd(env, true_expectation, stationary_dist, behavior_policy, target_policy, Lambda, gamma = gamma, alpha=alpha, beta=beta, runtimes=runtimes, episodes=episodes, evaluation = evaluation)
exec("things_to_save[\'error_value_togtd_%g\'] = results.copy()" % (baseline_lambda * 1e5))
error_value_greedy, lambda_greedy, error_var_greedy = eval_greedy(env, true_expectation, true_variance, stationary_dist, behavior_policy, target_policy, gamma = gamma, alpha=alpha, beta=beta, runtimes=runtimes, episodes=episodes, evaluation = evaluation)
things_to_save['error_value_greedy'], things_to_save['lambda_greedy'], things_to_save['error_var_greedy'] = error_value_greedy, lambda_greedy, error_var_greedy
filename = 'ringworld_N_%s_behavior_%g_target_%g_episodes_%g_kappa_%g' % (N, behavior_policy[0, 0], target_policy[0, 0], episodes, kappa)
scipy.io.savemat(filename, things_to_save)
pass | {"/ringworld_MTA.py": ["/greedy.py", "/mta.py"]} |
76,068 | shubhampachori12110095/MTA | refs/heads/master | /mta.py | import gym, numpy as np
from utils import *
from methods import *
from true_online_GTD import *
def eval_MTA_per_run(env, runtime, runtimes, episodes, target, behavior, kappa, gamma, Lambda, alpha, beta, evaluation):
print('running %d of %d for MTA' % (runtime + 1, runtimes))
value_trace, lambda_trace, L_var_trace = MTA(env, episodes, target, behavior, Lambda, kappa = kappa, gamma = gamma, alpha = 0.05, beta = 0.05, diagnose = False, evaluation = evaluation)
return (value_trace, lambda_trace, L_var_trace)
def eval_MTA(env, expectation, variance, stat_dist, behavior, target, kappa = 0.1, gamma = lambda x: 0.95, alpha=0.05, beta=0.05, runtimes=20, episodes=int(1e5), evaluation = None):
LAMBDAS = []
for runtime in range(runtimes):
LAMBDAS.append(LAMBDA(env, lambda_type = 'variable'))
results = Parallel(n_jobs = -1)(delayed(eval_MTA_per_run)(env, runtime, runtimes, episodes, target, behavior, kappa, gamma, LAMBDAS[runtime], alpha, beta, evaluation) for runtime in range(runtimes))
value_traces = [entry[0] for entry in results]
lambda_trace = [entry[1] for entry in results]
L_var_traces = [entry[2] for entry in results]
if evaluation is None:
error_L_var = np.zeros((runtimes, episodes))
error_value = np.zeros((runtimes, episodes))
for runtime in range(runtimes):
w_trace = L_var_traces[runtime]
for j in range(len(w_trace)):
error_L_var[runtime, j] = mse(w_trace[j], variance, stat_dist)
for runtime in range(runtimes):
w_trace = value_traces[runtime]
for j in range(len(w_trace)):
error_value[runtime, j] = mse(w_trace[j], expectation, stat_dist)
return error_value, np.concatenate(lambda_trace, axis = 1).T, error_L_var
else:
return np.concatenate(value_traces, axis = 1).T, np.concatenate(lambda_trace, axis = 1).T, np.concatenate(L_var_traces, axis = 1).T
def MTA(env, episodes, target, behavior, Lambda, kappa = 0.1, gamma = lambda x: 0.95, alpha = 0.05, beta = 0.05, diagnose = False, evaluation = None):
N = env.observation_space.n
lambda_trace = np.zeros((episodes, 1))
lambda_trace[:] = np.nan
if evaluation is not None:
value_trace, L_var_trace = np.zeros((episodes, 1)), np.zeros((episodes, 1))
value_trace[:] = np.nan; L_var_trace[:] = np.nan
else:
value_trace, L_var_trace = [], []
MC_exp_learner, L_exp_learner, L_var_learner, value_learner = TRUE_ONLINE_GTD_LEARNER(env), TRUE_ONLINE_GTD_LEARNER(env), TRUE_ONLINE_GTD_LEARNER(env), TRUE_ONLINE_GTD_LEARNER(env)
for epi in range(episodes):
s_curr, done = env.reset(), False
starting_state = s_curr
x_curr = onehot(s_curr, N)
rho_accu_nume, rho_accu_deno = 1.0, 1.0
MC_exp_learner.refresh(); L_exp_learner.refresh(); L_var_learner.refresh(); value_learner.refresh()
# MC_exp_trace, L_exp_trace, L_var_trace, value_trace
if evaluation is not None:
value_trace[epi, 0] = evaluation(value_learner.w_curr, 'expectation')
L_var_trace[epi, 0] = evaluation(L_var_learner.w_curr, 'variance')
else:
value_trace.append(np.copy(value_learner.w_curr))
L_var_trace.append(np.copy(L_var_learner.w_curr))
while not done:
action = decide(s_curr, behavior)
rho_curr = importance_sampling_ratio(target, behavior, s_curr, action)
rho_accu_nume *= target[s_curr, action]; rho_accu_deno *= behavior[s_curr, action]
if rho_accu_nume / rho_accu_deno > 1e6: break # for stability issue
s_next, R_next, done, _ = env.step(action)
x_next = onehot(s_next, N)
if s_curr == starting_state and s_next == starting_state + 1:
lambda_trace[epi, 0] = Lambda.value(x_next)
# learn expectation of MC-return!
MC_exp_learner.learn(R_next, gamma(x_next), gamma(x_curr), x_next, x_curr, 1.0, 1.0, rho_curr, alpha, beta)
# learn expectation of \Lambda-return!
L_exp_learner.learn(R_next, gamma(x_next), gamma(x_curr), x_next, x_curr, Lambda.value(x_next), Lambda.value(x_curr), rho_curr, 1.1 * alpha, 1.1 * beta)
# learn variance of \Lambda-return!
delta_curr = R_next + gamma(x_next) * np.dot(x_next, value_learner.w_curr) - np.dot(x_curr, value_learner.w_curr)
try:
r_bar_next = delta_curr ** 2
except RuntimeWarning:
pass
gamma_bar_next = (Lambda.value(x_next) * gamma(x_next)) ** 2
L_var_learner.learn(r_bar_next, gamma_bar_next, 1, x_next, x_curr, 1, 1, rho_curr, alpha, beta)
# GD on greedy meta-objective
v_next = np.dot(x_next, value_learner.w_curr)
var_L_next, exp_L_next, exp_MC_next = np.dot(x_next, L_var_learner.w_curr), np.dot(x_next, L_exp_learner.w_curr), np.dot(x_next, MC_exp_learner.w_curr)
coefficient = gamma(x_next) ** 2 * Lambda.value(x_next) * ((v_next - exp_L_next) ** 2 + var_L_next) + v_next * (exp_L_next + exp_MC_next) - v_next ** 2 - exp_L_next * exp_MC_next
Lambda.gradient_descent(x_next, kappa * rho_accu_nume / rho_accu_deno * coefficient)
# learn value
value_learner.learn(R_next, gamma(x_next), gamma(x_curr), x_next, x_curr, Lambda.value(x_next), Lambda.value(x_curr), rho_curr, alpha, beta)
MC_exp_learner.next(); L_exp_learner.next(); L_var_learner.next(); value_learner.next()
s_curr, x_curr = s_next, x_next
return value_trace, lambda_trace, L_var_trace, | {"/ringworld_MTA.py": ["/greedy.py", "/mta.py"]} |
76,072 | dhvidding/mercury-python | refs/heads/master | /mercury/resources/constants.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mercury.system.singleton import Singleton
@Singleton
class AppConfig:
#
# Normally, language connector should be a sidecar in the same memory space.
# Therefore the url is ws://127.0.0.1:8090/ws/lang
#
# In case multiple python applications want to share the same set of
# language connectors, you may change the NETWORK_CONNECTOR to a list of
# comma separated URLs
#
NETWORK_CONNECTOR = 'ws://127.0.0.1:8090/ws/lang'
#
# By default, API key location points to environment variable "LANG_API_KEY".
#
# If this environment variable is not available, a random API key will be deposited in the temp file system,
# assuming that the language connector serves the language pack as a "sidecar".
#
API_KEY_LOCATION = 'LANG_API_KEY'
#
# temporary work directory
# (for cloud native apps, local file system must be considered transient)
#
WORK_DIRECTORY = '/tmp/python'
#
# Logger will write logs to the local file system if LOG_FILE is provided.
# To log to console only, set it to None.
#
# LOG_FILE = 'mercury'
LOG_FILE = None
#
# DEBUG | INFO | WARN | ERROR | FATAL
#
LOG_LEVEL = 'INFO'
#
# Max number of threads for ThreadPoolExecutor
#
MAX_THREADS = 250
#
# Optional user defined "distributed trace processor"
#
# If this named service is available anywhere in the system, we will forward
# all distributed tracing information to it so that you may save it to a database
# or search engine for visualization in a UI. Alternatively, you may also reformat
# the tracing information and forward them to an external distributed tracing server
# for centralized processing.
#
DISTRIBUTED_TRACE_PROCESSOR = 'distributed.trace.processor'
| {"/examples/publisher-example.py": ["/mercury/system/pubsub.py"]} |
76,073 | dhvidding/mercury-python | refs/heads/master | /examples/publisher-example.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from mercury.platform import Platform
from mercury.system.pubsub import PubSub
from mercury.system.utility import Utility
def main():
platform = Platform()
platform.connect_to_cloud()
# wait until connected
while not platform.cloud_ready():
try:
time.sleep(0.1)
except KeyboardInterrupt:
# this allows us to stop the application while waiting for cloud connection
platform.stop()
return
util = Utility()
pubsub = PubSub()
if pubsub.feature_enabled():
# Publish an event
# headers = optional parameters for the event
# body = event payload
for x in range(10):
print("publishing event#", x)
pubsub.publish("hello.topic", headers={"some_parameter": "some_value", "n": x},
body="hello world "+util.get_iso_8601(time.time()))
# quit application
platform.stop()
else:
print("Pub/Sub feature not available from the underlying event stream")
print("Did you start the language connector with Kafka?")
print("e.g. java -Dcloud.connector=kafka -Dcloud.services=kafka.reporter -jar language-connector-1.12.31.jar")
if __name__ == '__main__':
main()
| {"/examples/publisher-example.py": ["/mercury/system/pubsub.py"]} |
76,074 | dhvidding/mercury-python | refs/heads/master | /mercury/system/pubsub.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mercury.system.singleton import Singleton
from mercury.platform import Platform
from mercury.system.po import PostOffice
from mercury.system.models import EventEnvelope, AppException
from mercury.system.utility import Utility
@Singleton
class PubSub:
def __init__(self):
self.platform = Platform()
self.po = PostOffice()
self.util = Utility()
self.subscription = dict()
def subscription_sync(headers: dict, body: any):
if 'type' in headers and headers['type'] == 'subscription_sync':
if len(self.subscription) > 0:
for topic in self.subscription:
route_map = self.subscription[topic]
for route in route_map:
parameters = route_map[route]
self.platform.log.info('Update subscription '+topic+' -> '+route)
self.subscribe(topic, route, parameters)
else:
self.platform.log.info('No subscription to update')
self.platform.register('pub.sub.sync', subscription_sync, 1, is_private=True)
def feature_enabled(self):
result = self.po.request('pub.sub.controller', 10.0, headers={'type': 'feature'})
return self._normalize_result(result, True)
def list_topics(self):
result = self.po.request('pub.sub.controller', 10.0, headers={'type': 'list'})
return self._normalize_result(result, list())
def exists(self, topic: str):
if isinstance(topic, str):
result = self.po.request('pub.sub.controller', 10.0, headers={'type': 'exists', 'topic': topic})
return self._normalize_result(result, True)
else:
return False
def create_topic(self, topic: str):
if isinstance(topic, str):
result = self.po.request('pub.sub.controller', 10.0, headers={'type': 'create', 'topic': topic})
return self._normalize_result(result, True)
else:
raise ValueError("topic must be str")
def delete_topic(self, topic: str):
if isinstance(topic, str):
result = self.po.request('pub.sub.controller', 10.0, headers={'type': 'delete', 'topic': topic})
return self._normalize_result(result, True)
else:
raise ValueError("topic must be str")
def publish(self, topic: str, headers: dict = None, body: any = None):
if isinstance(topic, str):
# encode payload
payload = dict()
payload['body'] = body
payload['headers'] = self._normalize_headers(headers)
result = self.po.request('pub.sub.controller', 10.0,
headers={'type': 'publish', 'topic': topic}, body=payload)
return self._normalize_result(result, True)
else:
raise ValueError("topic must be str")
def subscribe(self, topic: str, route: str, parameters: list = None):
if isinstance(topic, str) and isinstance(route, str):
if self.platform.has_route(route):
normalized_config = self._normalize_parameters(parameters)
result = self.po.request('pub.sub.controller', 10.0, body=normalized_config,
headers={'type': 'subscribe', 'topic': topic, 'route': route})
done = self._normalize_result(result, True)
if done:
if topic not in self.subscription:
self.subscription[topic] = dict()
self.platform.log.info('Subscribed topic ' + topic)
route_map: dict = self.subscription[topic]
if route not in route_map:
route_map[route] = normalized_config
self.platform.log.info('Adding '+route+' to topic '+topic)
return done
else:
raise ValueError("Unable to subscribe topic " + topic + " because route " + route + " not registered")
else:
raise ValueError("topic and route must be str")
def unsubscribe(self, topic: str, route: str):
if isinstance(topic, str) and isinstance(route, str):
if self.platform.has_route(route):
result = self.po.request('pub.sub.controller', 10.0,
headers={'type': 'unsubscribe', 'topic': topic, 'route': route})
done = self._normalize_result(result, True)
if done:
if topic in self.subscription:
route_map: dict = self.subscription[topic]
if route in route_map:
route_map.pop(route)
self.platform.log.info('Removing ' + route + ' from topic ' + topic)
if len(route_map) == 0:
self.subscription.pop(topic)
self.platform.log.info('Unsubscribed topic ' + topic)
return done
else:
raise ValueError("Unable to unsubscribe topic " + topic + " because route " + route + " not registered")
else:
raise ValueError("topic and route must be str")
@staticmethod
def _normalize_result(result: EventEnvelope, result_obj: any):
if isinstance(result, EventEnvelope):
if result.get_status() == 200:
if isinstance(result.get_body(), type(result_obj)):
return result.get_body()
else:
raise AppException(500, str(result.get_body()))
else:
raise AppException(result.get_status(), str(result.get_body()))
@staticmethod
def _normalize_headers(headers: dict):
if headers is None:
return dict()
if isinstance(headers, dict):
result = dict()
for h in headers:
result[str(h)] = str(headers[h])
return result
else:
raise ValueError("headers must be dict of str key-values")
@staticmethod
def _normalize_parameters(parameters: list):
if parameters is None:
return list()
if isinstance(parameters, list):
result = list()
for h in parameters:
result.append(str(h))
return result
else:
raise ValueError("headers must be a list of str")
| {"/examples/publisher-example.py": ["/mercury/system/pubsub.py"]} |
76,075 | dhvidding/mercury-python | refs/heads/master | /examples/more-demo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from mercury.platform import Platform
from mercury.system.models import EventEnvelope
from mercury.system.po import PostOffice
class Hi:
MY_NAME = 'Hi'
def hello(self, headers: dict, body: any):
# singleton function signature (headers: dict, body: any)
Platform().log.info(self.MY_NAME+" "+str(headers) + ", " + str(body))
return body
def hello(headers: dict, body: any, instance: int):
# regular function signature (headers: dict, body: any, instance: int)
Platform().log.info("#"+str(instance)+" "+str(headers)+" body="+str(body))
# to set status, headers and body, return them in an event envelope
result = EventEnvelope().set_header('hello', 'world').set_body(body)
for h in headers:
result.set_header(h, headers[h])
return result
def main():
platform = Platform()
# you can register a method of a class
platform.register('hello.world.1', Hi().hello, 5)
# or register a function
platform.register('hello.world.2', hello, 10)
po = PostOffice()
# demonstrate sending asynchronously. Note that key-values in the headers will be encoded as strings
po.send('hello.world.1', headers={'one': 1}, body='hello world one')
po.send('hello.world.2', headers={'two': 2}, body='hello world two')
# demonstrate a RPC request
try:
result = po.request('hello.world.2', 2.0, headers={'some_key': 'some_value'}, body='hello world')
if isinstance(result, EventEnvelope):
print('Received RPC response:')
print("HEADERS =", result.get_headers(), ", BODY =", result.get_body(),
", STATUS =", result.get_status(),
", EXEC =", result.get_exec_time(), ", ROUND TRIP =", result.get_round_trip(), "ms")
except TimeoutError as e:
print("Exception: ", str(e))
# illustrate parallel RPC requests
event_list = list()
event_list.append(EventEnvelope().set_to('hello.world.1').set_body("first request"))
event_list.append(EventEnvelope().set_to('hello.world.2').set_body("second request"))
try:
result = po.parallel_request(event_list, 2.0)
if isinstance(result, list):
print('Received', len(result), 'RPC responses:')
for res in result:
print("HEADERS =", res.get_headers(), ", BODY =", res.get_body(),
", STATUS =", res.get_status(),
", EXEC =", res.get_exec_time(), ", ROUND TRIP =", res.get_round_trip(), "ms")
except TimeoutError as e:
print("Exception: ", str(e))
# connect to the network
platform.connect_to_cloud()
# wait until connected
while not platform.cloud_ready():
try:
time.sleep(0.1)
except KeyboardInterrupt:
# this allows us to stop the application while waiting for cloud connection
platform.stop()
return
# Demonstrate broadcast feature
# the event will be broadcast to multiple application instances that serve the same route
po.broadcast("hello.world.1", body="this is a broadcast message from "+platform.get_origin())
# demonstrate deferred delivery
po.send_later('hello.world.1', headers={'hello': 'world'}, body='this message arrives 5 seconds later', seconds=5.0)
#
# this will keep the main thread running in the background
# so we can use Control-C or KILL signal to stop the application
platform.run_forever()
if __name__ == '__main__':
main()
| {"/examples/publisher-example.py": ["/mercury/system/pubsub.py"]} |
76,076 | shreshtashetty/MulticamCalibrationCheck | refs/heads/master | /utils.py | import tensorflow as tf
import math
import numpy as np
# from tensorflow.python.framework import ops
from flow_transformer import transformer
# import hyperparams as hyp
from PIL import Image
from scipy.misc import imsave
from math import pi
from skimage.draw import *
# from sklearn.decomposition import PCA
# from sklearn.manifold import TSNE
from colorize import *
EPS = 1e-6
def stop_execution(t, msg = ''):
def f(t):
print(msg)
exit()
return t
return tf.py_func(f, [t], t.dtype)
def print_shape(t):
print(t.name, t.get_shape().as_list())
def print_shape2(t, msg =''):
def f(A):
print(np.shape(A), msg)
return A
return tf.py_func(f, [t], t.dtype)
def split_rt(rt):
shape = rt.get_shape()
bs = int(shape[0])
r = tf.slice(rt,[0,0,0],[-1,3,3])
t = tf.reshape(tf.slice(rt,[0,0,3],[-1,3,1]),[bs,3])
return r, t
def split_intrinsics(k):
shape = k.get_shape()
bs = int(shape[0])
# fx = tf.slice(k,[0,0,0],[-1,1,1])
# print_shape(fx)
# fy = tf.slice(k,[0,1,0],[-1,1,1])
# print_shape(fy)
# x0 = tf.slice(k,[0,0,3],[-1,1,1])
# print_shape(x0)
# y0 = tf.slice(k,[0,1,3],[-1,1,1])
# print_shape(y0)
# fy = tf.reshape(tf.slice(k,[0,0,0],[-1,1,1]),[bs])
# fx = tf.reshape(tf.slice(k,[0,1,1],[-1,1,1]),[bs])
# y0 = tf.reshape(tf.slice(k,[0,0,2],[-1,1,1]),[bs])
# x0 = tf.reshape(tf.slice(k,[0,1,2],[-1,1,1]),[bs])
fx = tf.reshape(tf.slice(k,[0,0,0],[-1,1,1]),[bs])
fy = tf.reshape(tf.slice(k,[0,1,1],[-1,1,1]),[bs])
x0 = tf.reshape(tf.slice(k,[0,0,2],[-1,1,1]),[bs])
y0 = tf.reshape(tf.slice(k,[0,1,2],[-1,1,1]),[bs])
return fx,fy,x0,y0
def merge_rt(r,t):
shape = r.get_shape()
bs = int(shape[0])
bottom_row = tf.tile(tf.reshape(tf.stack([0.,0.,0.,1.]),[1,1,4]),
[bs,1,1],name="bottom_row")
rt = tf.concat(axis=2,values=[r,tf.expand_dims(t,2)],name="rt_3x4")
rt = tf.concat(axis=1,values=[rt,bottom_row],name="rt_4x4")
return rt
def random_crop(t,crop_h,crop_w,h,w):
def off_h(): return tf.random_uniform([], minval=0, maxval=(h-crop_h-1), dtype=tf.int32)
def off_w(): return tf.random_uniform([], minval=0, maxval=(w-crop_w-1), dtype=tf.int32)
def zero(): return tf.constant(0)
offset_h = tf.cond(tf.less(crop_h, h-1), off_h, zero)
offset_w = tf.cond(tf.less(crop_w, w-1), off_w, zero)
t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
return t_crop, offset_h, offset_w
def near_topleft_crop(t,crop_h,crop_w,h,w,amount):
## take a random crop/pad somewhere in [-amount,amount]
def get_rand(): return tf.random_uniform([], minval=0, maxval=amount, dtype=tf.int32)
def get_zero(): return tf.constant(0)
# pad a bit
pad_h = tf.cond(tf.greater(amount, 0), get_rand, get_zero)
pad_w = tf.cond(tf.greater(amount, 0), get_rand, get_zero)
# t = tf.pad(t, [[pad_h, pad_h], [pad_w, pad_w], [0, 0]], "SYMMETRIC")
t = tf.pad(t, [[pad_h, pad_h], [pad_w, pad_w], [0, 0]])
t = tf.slice(t,[0,0,0],[h,w,-1])
# crop a bit
offset_h = tf.cond(tf.less(crop_h, h), get_rand, get_zero)
offset_w = tf.cond(tf.less(crop_w, w), get_rand, get_zero)
t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
return t_crop, offset_h, offset_w
def topleft_crop(t,crop_h,crop_w,h,w):
offset_h = 0
offset_w = 0
t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
return t_crop, offset_h, offset_w
def bottomright_crop(t,crop_h,crop_w,h,w):
offset_h = h-crop_h
offset_w = w-crop_w
t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
return t_crop, offset_h, offset_w
def bottomleft_crop(t,crop_h,crop_w,h,w):
offset_h = h-crop_h
offset_w = 0
t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
return t_crop, offset_h, offset_w
def bottomcenter_crop(t,crop_h,crop_w,h,w):
offset_h = h-crop_h
offset_w = (w-crop_w)/2
t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
return t_crop, offset_h, offset_w
# def param_crop((t,crop_h,crop_w,offset_h,offset_w)):
# t_crop = tf.slice(t,[offset_h,offset_w,0],[crop_h,crop_w,-1],name="cropped_tensor")
# return t_crop
def norm(x):
# x should be B x H
# returns size B
return tf.sqrt(tf.reduce_sum(tf.square(x),axis=1))
def compute_distance(transform):
"""
Compute the distance of the translational component of a 4x4 homogeneous matrix.
"""
# return numpy.linalg.norm(transform[0:3,3])
r,t = split_rt(transform)
# t should now be bs x 3
return norm(t)
def compute_angle_3x3(R):
return tf.acos(tf.minimum(1.,tf.maximum(-1.,(tf.trace(R)-1.)/2.)))
def compute_angle(transform):
"""
Compute the rotation angle from a 4x4 homogeneous matrix.
"""
# an invitation to 3-d vision, p 27
# return numpy.arccos( min(1,max(-1, (numpy.trace(transform[0:3,0:3]) - 1)/2) ))
r,t = split_rt(transform)
return compute_angle_3x3(r)
#return tf.acos(tf.minimum(1.,tf.maximum(-1.,(tf.trace(r)-1.)/2.)))
def compute_t_diff(rt1, rt2):
"""
Compute the difference between the magnitudes of the translational components of the two transformations.
"""
t1 = tf.reshape(tf.slice(rt1,[0,0,3],[-1,3,1]),[-1,3])
t2 = tf.reshape(tf.slice(rt2,[0,0,3],[-1,3,1]),[-1,3])
# each t should now be bs x 3
mag_t1 = tf.sqrt(tf.reduce_sum(tf.square(t1),axis=1))
mag_t2 = tf.sqrt(tf.reduce_sum(tf.square(t2),axis=1))
return tf.abs(mag_t1-mag_t2)
def compute_t_ang(rt1, rt2):
"""
Compute the angle between the translational components of two transformations.
"""
t1 = tf.reshape(tf.slice(rt1,[0,0,3],[-1,3,1]),[-1,3])
t2 = tf.reshape(tf.slice(rt2,[0,0,3],[-1,3,1]),[-1,3])
# each t should now be bs x 3
mag_t1 = tf.sqrt(tf.reduce_sum(tf.square(t1),axis=1))
mag_t2 = tf.sqrt(tf.reduce_sum(tf.square(t2),axis=1))
dot = tf.reduce_sum(t1*t2,axis=1)
return tf.acos(dot/(mag_t1*mag_t2 + EPS))
def safe_inverse(a):
"""
safe inverse for rigid transformations
should be equivalent to
a_inv = tf.matrix_inverse(a)
for well-behaved matrices
"""
bs = tf.shape(a)[0]
R,T = split_rt(a)
R_transpose = tf.transpose(R,[0,2,1])
bottom_row = tf.tile(tf.reshape(tf.stack([0.,0.,0.,1.]),[1,1,4]),tf.stack([bs,1,1]))
inv = tf.concat(axis=2,values=[R_transpose,-tf.matmul(R_transpose, tf.expand_dims(T,2))])
inv = tf.concat(axis=1,values=[inv,bottom_row])
return inv
def ominus(a,b):
"""
Compute the relative 3D transformation between a and b.
Input:
a -- first pose (homogeneous 4x4 matrix)
b -- second pose (homogeneous 4x4 matrix)
Output:
Relative 3D transformation from a to b.
https://github.com/liruihao/tools-for-rgbd-SLAM-evaluation/blob/master/evaluate_rpe.py
"""
with tf.name_scope("ominus"):
a_inv = safe_inverse(a)
return tf.matmul(a_inv,b)
def sincos2r(sina,sinb,sing,cosa,cosb,cosg):
# i am using Tait-Bryan angles here, so that
# alph corresponds to x
# beta corresponds to y
# gamm corresponds to z
shape = sina.get_shape()
one = tf.ones_like(sina,name="one")
zero = tf.zeros_like(sina,name="zero")
Rx = tf.reshape(tf.stack([one, zero, zero,
zero, cosa, -sina,
zero, sina, cosa],
axis=1),[-1, 3, 3])
Ry = tf.reshape(tf.stack([cosb, zero, sinb,
zero, one, zero,
-sinb, zero, cosb],
axis=1),[-1, 3, 3])
Rz = tf.reshape(tf.stack([cosg, -sing, zero,
sing, cosg, zero,
zero, zero, one],
axis=1),[-1, 3, 3])
# Rz, Ry, Rx order works with r2abg_v3 and abg2r_v2
# i like it because of this tutorial
# http://danceswithcode.net/engineeringnotes/rotations_in_3d/rotations_in_3d_part1.html
# Rcam = tf.matmul(tf.matmul(Rz,Ry),Rx,name="Rcam")
# Rx, Ry, Rz order works with r2abg_v2, r2abg, and abg2r
# i like it because it matches with Matlab
Rcam = tf.matmul(tf.matmul(Rx,Ry),Rz,name="Rcam")
return Rcam
def sinabg2r(sina,sinb,sing):
cosa = tf.sqrt(1 - tf.square(sina) + EPS)
cosb = tf.sqrt(1 - tf.square(sinb) + EPS)
cosg = tf.sqrt(1 - tf.square(sing) + EPS)
return sincos2r(sina,sinb,sing,cosa,cosb,cosg)
def abg2r(a,b,g):
sina = tf.sin(a)
sinb = tf.sin(b)
sing = tf.sin(g)
cosa = tf.cos(a)
cosb = tf.cos(b)
cosg = tf.cos(g)
return sincos2r(sina,sinb,sing,cosa,cosb,cosg)
def abg2r_v2(a,b,g):
shape = a.get_shape()
bs = int(shape[0])
return tf.reshape(euler2mat(tf.expand_dims(g,1),
tf.expand_dims(b,1),
tf.expand_dims(a,1)),
[bs,3,3])
# def r2abg_bs1((r)):
# r00 = r[0,0]
# r10 = r[1,0]
# r11 = r[1,1]
# r12 = r[1,2]
# r20 = r[2,0]
# r21 = r[2,1]
# r22 = r[2,2]
# # singular = sy < 1e-6
# # if not singular :
# # x = math.atan2(R[2,1] , R[2,2])
# # y = math.atan2(-R[2,0], sy)
# # z = math.atan2(R[1,0], R[0,0])
# # else :
# # x = math.atan2(-R[1,2], R[1,1])
# # y = math.atan2(-R[2,0], sy)
# # z = 0
# sy = tf.sqrt(r00*r00 + r10*r10)
# x = tf.cond(sy > 1e-6, lambda: tf.atan2(r21, r22), lambda: tf.atan2(-r12, r11))
# y = tf.cond(sy > 1e-6, lambda: tf.atan2(-r20, sy), lambda: tf.atan2(-r20, sy))
# z = tf.cond(sy > 1e-6, lambda: tf.atan2(r10, r00), lambda: tf.identity(0.0))
# xyz = tf.stack([x,y,z],axis=0)
# return xyz
# def r2abg_v2_bs1((r)):
# r00 = r[0,0]
# r10 = r[1,0]
# r11 = r[1,1]
# r12 = r[1,2]
# r20 = r[2,0]
# r21 = r[2,1]
# r22 = r[2,2]
# x = tf.atan2(r21, r22)
# y = tf.atan2(-r20, tf.sqrt(r21*r21 + r22*r22))
# z = tf.atan2(r10, r00)
# xyz = tf.stack([x,y,z],axis=0)
# return xyz
# def r2abg_v3_bs1((r)):
# r11 = r[0,0]
# r21 = r[1,0]
# r22 = r[1,1]
# r23 = r[1,2]
# r31 = r[2,0]
# r32 = r[2,1]
# r33 = r[2,2]
# z = tf.atan2(r21, r11)
# y = -tf.asin(r31)
# x = tf.atan2(r32, r33)
# xyz = tf.stack([x,y,z],axis=0)
# return xyz
# def r2abg(r):
# # r is 3x3.
# # get alpha, beta, gamma
# # i copied a nice python function from the internet, copied from matlab
# xyz = tf.map_fn(r2abg_bs1, (r), dtype=tf.float32)
# x,y,z = tf.unstack(xyz,axis=1)
# return x,y,z
def r2abg_v2(r):
# r is 3x3.
# get alpha, beta, gamma
# i copied a nice python function from the internet, copied from matlab
xyz = tf.map_fn(r2abg_v2_bs1, (r), dtype=tf.float32)
x,y,z = tf.unstack(xyz,axis=1)
return x,y,z
def r2abg_v3(r):
# http://danceswithcode.net/engineeringnotes/rotations_in_3d/rotations_in_3d_part1.html
# r is 3x3.
# get alpha, beta, gamma
xyz = tf.map_fn(r2abg_v3_bs1, (r), dtype=tf.float32)
x,y,z = tf.unstack(xyz,axis=1)
return x,y,z
def zrt2flow_helper(Z, rt12, fy, fx, y0, x0):
r12, t12 = split_rt(rt12)
# if hyp.dataset_name == 'KITTI' or hyp.dataset_name=='KITTI2':
# flow = zrt2flow_kitti(Z, r12, t12, fy, fx, y0, x0)
# else:
flow = zrt2flow(Z, r12, t12, fy, fx, y0, x0)
return flow
def zrt2flow_kitti(Z, R, T, fy, fx, y0, x0):
shape = Z.get_shape()
bs = int(shape[0])
ed = lambda x : tf.expand_dims(x, axis = 0)
upk = lambda x : tf.unstack(x, axis = 0)
upked = lambda x : map(ed,upk(x))
Zu = upked(Z)
Ru = upked(R)
Tu = upked(T)
fxu = upk(fx)
fyu = upk(fy)
x0u = upked(x0)
y0u = upked(y0)
result1 = []
result2 = []
for i in range(bs):
Zs = Zu[i]
Rs = Ru[i]
Ts = Tu[i]
fxs = fxu[i]
fys = fyu[i]
x0s = x0u[i]
y0s = y0u[i]
r1 = zrt2flow(Zs, Rs, Ts, fys, fxs, y0s, x0s)
result1.append(r1)
# result2.append(r2)
flow = tf.concat(axis=0, values=result1)
# XYZ2 = tf.concat(0, result2)
return flow
def zrt2flow(Z, RT, fy, fx, y0, x0):
with tf.variable_scope("zrt2flow"):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# RT is B x 4 x 4
# get pointcloud1
[grid_x1,grid_y1] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z,[bs,h,w],name="Z")
XYZ = Camera2World(grid_x1,grid_y1,Z,fx,fy,x0,y0)
# XYZ is B x H*W x 3
# add a stack of ones, for homog
XYZ1 = tf.concat([XYZ,tf.ones([bs,h*w,1])],axis=2)
# print_shape(XYZ1)
XYZ1_t = tf.transpose(XYZ1,perm=[0,2,1],name="XYZ1_t") # B x 4 x H*W
# print_shape(XYZ1_t)
XYZ1_rot_t = tf.matmul(RT,XYZ1_t,name="XYZ1_rot_t") # B x 4 x H*W
XYZ1_rot = tf.transpose(XYZ1_rot_t,perm=[0,2,1],name="XYZ1_rot") # B x H*W x 4
# bring it back from homog
XYZ2 = XYZ1_rot[:,:,0:3]/tf.expand_dims(XYZ1_rot[:,:,-1],-1)
# project pointcloud2 down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ2, name="splitXYZ")
x2y2_flat = World2Camera(X2,Y2,Z2,fx,fy,x0,y0)
[x2_flat,y2_flat]=tf.split(axis=2,num_or_size_splits=2,value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
flow_flat = tf.concat(axis=2,values=[x2_flat-x1_flat,y2_flat-y1_flat],name="flow_flat")
flow = tf.reshape(flow_flat,[bs,h,w,2],name="flow")
return flow, XYZ2
def xyzrt2flow(XYZ, RT, fy, fx, y0, x0, h, w):
with tf.variable_scope("zrt2flow"):
shape = XYZ.get_shape()
bs = int(shape[0])
# RT is B x 4 x 4
# XYZ is B x H*W x 3, and is in camera coordinates
XYZ = Camera2World(grid_x1,grid_y1,Z,fx,fy,x0,y0)
# XYZ is B x H*W x 3
# add a stack of ones, for homog
XYZ1 = tf.concat([XYZ,tf.ones([bs,h*w,1])],axis=2)
# print_shape(XYZ1)
XYZ1_t = tf.transpose(XYZ1,perm=[0,2,1],name="XYZ1_t") # B x 4 x H*W
# print_shape(XYZ1_t)
XYZ1_rot_t = tf.matmul(RT,XYZ1_t,name="XYZ1_rot_t") # B x 4 x H*W
XYZ1_rot = tf.transpose(XYZ1_rot_t,perm=[0,2,1],name="XYZ1_rot") # B x H*W x 4
# bring it back from homog
XYZ2 = XYZ1_rot[:,:,0:3]/tf.expand_dims(XYZ1_rot[:,:,-1],-1)
# project pointcloud2 down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ2, name="splitXYZ")
x2y2_flat = World2Camera(X2,Y2,Z2,fx,fy,x0,y0)
[x2_flat,y2_flat]=tf.split(axis=2,num_or_size_splits=2,value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
flow_flat = tf.concat(axis=2,values=[x2_flat-x1_flat,y2_flat-y1_flat],name="flow_flat")
flow = tf.reshape(flow_flat,[bs,h,w,2],name="flow")
return flow, XYZ2
def Camera2World(x,y,Z,fx,fy,x0,y0):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# the intrinsics are shaped [bs]
# we need them to be shaped [bs,h,w]
fy = tf.tile(tf.expand_dims(fy,1),[1,h*w])
fx = tf.tile(tf.expand_dims(fx,1),[1,h*w])
fy = tf.reshape(fy,[bs,h,w])
fx = tf.reshape(fx,[bs,h,w])
y0 = tf.tile(tf.expand_dims(y0,1),[1,h*w])
x0 = tf.tile(tf.expand_dims(x0,1),[1,h*w])
y0 = tf.reshape(y0,[bs,h,w])
x0 = tf.reshape(x0,[bs,h,w])
X=(Z/fx)*(x-x0)
Y=(Z/fy)*(y-y0)
pointcloud=tf.stack([tf.reshape(X,[bs,-1]),
tf.reshape(Y,[bs,-1]),
tf.reshape(Z,[bs,-1])],
axis=2,name="world_pointcloud")
return pointcloud
def Camera2World_p(x,y,Z,fx,fy):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# the intrinsics are shaped [bs]
# we need them to be shaped [bs,h,w]
fy = tf.tile(tf.expand_dims(fy,1),[1,h*w])
fx = tf.tile(tf.expand_dims(fx,1),[1,h*w])
fy = tf.reshape(fy,[bs,h,w])
fx = tf.reshape(fx,[bs,h,w])
X=(Z/fx)*x
Y=(Z/fy)*y
pointcloud=tf.stack([tf.reshape(X,[bs,-1]),
tf.reshape(Y,[bs,-1]),
tf.reshape(Z,[bs,-1])],
axis=2,name="world_pointcloud")
return pointcloud
def World2Camera(X,Y,Z,fx,fy,x0,y0):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# the intrinsics are shaped [bs]
# we need them to be shaped [bs,h*w,1]
fy = tf.tile(tf.expand_dims(fy,1),[1,h*w])
fx = tf.tile(tf.expand_dims(fx,1),[1,h*w])
y0 = tf.tile(tf.expand_dims(y0,1),[1,h*w])
x0 = tf.tile(tf.expand_dims(x0,1),[1,h*w])
fy = tf.reshape(fy,[bs,-1,1])
fx = tf.reshape(fx,[bs,-1,1])
y0 = tf.reshape(y0,[bs,-1,1])
x0 = tf.reshape(x0,[bs,-1,1])
x=(X*fx)/(Z+EPS)+x0
y=(Y*fy)/(Z+EPS)+y0
proj=tf.concat(axis=2,values=[x,y],name="camera_projection")
return proj
def World2Camera_p(X,Y,Z,fx,fy):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# the intrinsics are shaped [bs]
# we need them to be shaped [bs,h*w,1]
fy = tf.tile(tf.expand_dims(fy,1),[1,h*w])
fx = tf.tile(tf.expand_dims(fx,1),[1,h*w])
fy = tf.reshape(fy,[bs,-1,1])
fx = tf.reshape(fx,[bs,-1,1])
x=(X*fx)/(Z+EPS)
y=(Y*fy)/(Z+EPS)
proj=tf.concat(axis=2,values=[x,y],name="camera_projection")
return proj
def World2Camera_single(X,Y,Z,fx,fy):
shape = Z.get_shape()
bs = int(shape[0])
x=(X*fx)/(Z+EPS)
y=(Y*fy)/(Z+EPS)
# print_shape(x)
proj=tf.stack(axis=1,values=[x,y],name="camera_projection")
return proj
def Camera2World_single(x,y,Z,fx,fy):
shape = Z.get_shape()
bs = int(shape[0])
X=(Z/fx)*x
Y=(Z/fy)*y
point = tf.concat([tf.reshape(X,[bs,-1]),
tf.reshape(Y,[bs,-1]),
tf.reshape(Z,[bs,-1])],
axis=1,name="world_point")
return point
def atan2(y, x):
with tf.variable_scope("atan2"):
angle = tf.where(tf.greater(x, 0.0), tf.atan(y / x), tf.zeros_like(x))
angle = tf.where(tf.greater(y, 0.0), 0.5 * np.pi - tf.atan(x / y), angle)
angle = tf.where(tf.less(y, 0.0), -0.5 * np.pi - tf.atan(x / y), angle)
angle = tf.where(tf.less(x, 0.0), tf.atan(y / x) + np.pi, angle)
angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)),
np.nan * tf.zeros_like(x), angle)
indices = tf.where(tf.less(angle, 0.0))
updated_values = tf.gather_nd(angle, indices) + (2 * np.pi)
update = tf.SparseTensor(indices, updated_values, angle.get_shape())
update_dense = tf.sparse_tensor_to_dense(update)
return angle + update_dense
def atan2_ocv(y, x):
with tf.variable_scope("atan2_ocv"):
# constants
DBL_EPSILON = 2.2204460492503131e-16
atan2_p1 = 0.9997878412794807 * (180 / np.pi)
atan2_p3 = -0.3258083974640975 * (180 / np.pi)
atan2_p5 = 0.1555786518463281 * (180 / np.pi)
atan2_p7 = -0.04432655554792128 * (180 / np.pi)
ax, ay = tf.abs(x), tf.abs(y)
c = tf.where(tf.greater_equal(ax, ay), tf.div(ay, ax + DBL_EPSILON),
tf.div(ax, ay + DBL_EPSILON))
c2 = tf.square(c)
angle = (((atan2_p7 * c2 + atan2_p5) * c2 + atan2_p3) * c2 + atan2_p1) * c
angle = tf.where(tf.greater_equal(ax, ay), angle, 90.0 - angle)
angle = tf.where(tf.less(x, 0.0), 180.0 - angle, angle)
angle = tf.where(tf.less(y, 0.0), 360.0 - angle, angle)
return angle
def normalize(tensor, a=0, b=1):
with tf.variable_scope("normalize"):
return tf.div(tf.multiply(tf.subtract(tensor, tf.reduce_min(tensor)), b - a),
tf.subtract(tf.reduce_max(tensor), tf.reduce_min(tensor)))
def cart_to_polar_ocv(x, y, angle_in_degrees=False):
with tf.variable_scope("cart_to_polar_ocv"):
v = tf.sqrt(tf.add(tf.square(x), tf.square(y)))
ang = atan2_ocv(y, x)
scale = 1 if angle_in_degrees else np.pi / 180
return v, tf.multiply(ang, scale)
def cart_to_polar(x, y, angle_in_degrees=False):
with tf.variable_scope("cart_to_polar"):
v = tf.sqrt(tf.add(tf.square(x), tf.square(y)))
ang = atan2(y, x)
scale = 180 / np.pi if angle_in_degrees else 1
return v, tf.multiply(ang, scale)
def flow2color(flow, adaptive_max=False, maxFlow=10):
with tf.variable_scope("flow2color"):
shape = flow.get_shape()
bs, h, w, c = shape
if adaptive_max:
maxFlow = tf.reduce_max(flow)
# maxFlow = tf.maximum(50.0,tf.reduce_max(flow))
# maxFlow = 50.0 #tf.maximum(20.0,tf.reduce_max(flow))
# maxFlow = tf.maximum(20.0,tf.reduce_max(flow))
# maxFlow = 5.0 #tf.maximum(20.0,tf.reduce_max(flow))
# maxFlow = tf.maximum(5.0,tf.reduce_max(flow))
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[maxFlow*tf.ones([bs,h,1,1]),-maxFlow*tf.ones([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[-maxFlow*tf.ones([bs,h,1,1]),maxFlow*tf.ones([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[maxFlow*tf.ones([bs,h,1,1]),tf.zeros([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[-maxFlow*tf.ones([bs,h,1,1]),tf.zeros([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[tf.zeros([bs,h,1,1]),maxFlow*tf.ones([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[tf.zeros([bs,h,1,1]),-maxFlow*tf.ones([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.concat(axis=3,values=[tf.zeros([bs,h,1,1]),tf.zeros([bs,h,1,1])]),flow])
flow = tf.concat(axis=2,values=[tf.zeros([bs,h,1,2]),flow])
flow = tf.concat(axis=2,values=[maxFlow*tf.ones([bs,h,1,2]),flow])
flow = tf.concat(axis=2,values=[-maxFlow*tf.ones([bs,h,1,2]),flow])
fx, fy = flow[:, :, :, 0], flow[:, :, :, 1]
fx = tf.clip_by_value(fx, -maxFlow, maxFlow)
fy = tf.clip_by_value(fy, -maxFlow, maxFlow)
v, ang = cart_to_polar_ocv(fx, fy)
h = normalize(tf.multiply(ang, 180 / np.pi))
s = tf.ones_like(h)
v = normalize(v)
hsv = tf.stack([h, s, v], 3)
rgb = tf.image.hsv_to_rgb(hsv) * 255
rgb = tf.slice(rgb,[0,0,10,0],[-1,-1,-1,-1])
# rgb = rgb[0,0,1:,:]
return tf.cast(rgb, tf.uint8)
def flow2color_triple(flow1, flow2, flow3):
with tf.variable_scope("flow2color"):
maxFlow = tf.maximum(tf.reduce_max(flow1),
tf.maximum(tf.reduce_max(flow2),
tf.reduce_max(flow3)))
rgb1 = flow2color(flow1, maxFlow=maxFlow)
rgb2 = flow2color(flow2, maxFlow=maxFlow)
rgb3 = flow2color(flow3, maxFlow=maxFlow)
return rgb1, rgb2, rgb3
def flow2color_double(flow1, flow2):
with tf.variable_scope("flow2color"):
maxFlow = tf.maximum(tf.reduce_max(flow1),
tf.reduce_max(flow2))
rgb1 = flow2color(flow1, maxFlow=maxFlow)
rgb2 = flow2color(flow2, maxFlow=maxFlow)
return rgb1, rgb2
def meshgrid2D(bs, height, width):
with tf.variable_scope("meshgrid2D"):
grid_x = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(0.0, width-1, width), 1), [1, 0]))
grid_y = tf.matmul(tf.expand_dims(tf.linspace(0.0, height-1, height), 1),
tf.ones(shape=tf.stack([1, width])))
grid_x = tf.tile(tf.expand_dims(grid_x,0),[bs,1,1],name="grid_x")
grid_y = tf.tile(tf.expand_dims(grid_y,0),[bs,1,1],name="grid_y")
return grid_x, grid_y
def warper(frame, flow, name="warper", reuse=False):
with tf.variable_scope(name):
shape = flow.get_shape()
bs, h, w, c = shape
if reuse:
tf.get_variable_scope().reuse_variables()
warp, occ = transformer(frame, flow, (int(h), int(w)))
return warp, occ
def meshGridFlat(batchSize, height, width):
with tf.name_scope('meshGridFlat'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
baseGrid = tf.expand_dims(grid,0)
grids = []
for i in range(batchSize):
grids.append(baseGrid)
identityGrid = tf.concat(axis=0,values=grids)
return identityGrid
def flowTransformGrid(flow):
with tf.name_scope("flowTransformGrid"):
flowShape = flow.get_shape()
batchSize = flowShape[0]
height = flowShape[1]
width = flowShape[2]
identityGrid = meshGridFlat(batchSize,height,width)
flowU = tf.slice(flow,[0,0,0,0],[-1,-1,-1,1])
flowV = tf.slice(flow,[0,0,0,1],[-1,-1,-1,1])
#scale it to normalized range [-1,1]
flowU = tf.reshape((flowU*2)/tf.cast(width, tf.float32),shape=tf.stack([batchSize,1,-1]))
flowV = tf.reshape((flowV*2)/tf.cast(height, tf.float32),shape=tf.stack([batchSize,1,-1]))
zeros = tf.zeros(shape=tf.stack([batchSize,1, height*width]))
flowScaled = tf.concat(axis=1,values=[flowU,flowV,zeros])
return identityGrid + flowScaled
def flowSamplingDensity(flow):
def flatGridToDensity(u,v,h,w):
with tf.name_scope("flatGridToDensity"):
u = tf.squeeze(tf.clip_by_value(u,0,w-1))
v = tf.squeeze(tf.clip_by_value(v,0,h-1))
ids = tf.cast(u + (v*w),tf.int32)
uniques,_,counts = tf.unique_with_counts(ids)
densityMap = tf.sparse_to_dense(uniques,[h*w],counts,validate_indices=False)
densityMap = tf.reshape(densityMap,[1,h,w])
return densityMap
with tf.name_scope("flowSamplingDensity"):
flowShape = flow.get_shape()
batchSize = flowShape[0].value
h = flowShape[1].value
w = flowShape[2].value
grid = flowTransformGrid(flow)
densities = []
for it in range(0,batchSize):
bGrid = tf.slice(grid,[it,0,0],[1,-1,-1])
u = tf.cast(tf.floor((bGrid[:,0,:]+1)*w/2),tf.int32)
v = tf.cast(tf.floor((bGrid[:,1,:]+1)*h/2),tf.int32)
da = flatGridToDensity(u,v,h,w)
#db = flatGridToDensity(u+1,v,h,w)
#dc = flatGridToDensity(u,v+1,h,w)
#dd = flatGridToDensity(u+1,v+1,h,w)
densities.append(da)
out = tf.concat(axis=0,values=densities)
return out
def angleGrid(bs, h, w, y0, x0):
grid_x, grid_y = meshgrid2D(bs, h, w)
y0 = tf.tile(tf.reshape(y0,[bs,1,1]),[1,h,w])
x0 = tf.tile(tf.reshape(x0,[bs,1,1]),[1,h,w])
grid_y = grid_y - y0
grid_x = grid_x - x0
angleGrid = atan2_ocv(grid_y, grid_x)
return angleGrid
def angle2color(angles):
v = tf.ones_like(angles)
h = normalize(tf.multiply(angles, 180 / np.pi))
s = tf.ones_like(h)
# v = normalize(v)
hsv = tf.stack([h, s, v], 3)
rgb = tf.image.hsv_to_rgb(hsv) * 255
return tf.cast(rgb, tf.uint8)
def pseudoFlowColor(angles,depth):
v = 1/depth
h = normalize(tf.multiply(angles, 180 / np.pi))
s = tf.ones_like(h)
v = normalize(v)
hsv = tf.stack([h, s, v], 3)
rgb = tf.image.hsv_to_rgb(hsv) * 255
return tf.cast(rgb, tf.uint8)
def resFlowColor(flow,angles,depth,tz):
fx, fy = flow[:, :, :, 0], flow[:, :, :, 1]
v, fang = cart_to_polar_ocv(fx, fy)
# fang = atan2_ocv(fy,fx)
# angles = fang-angles
# angles = fang
# v = 1/tf.square(fang-angles)
# ax, ay = angles[:, :, :, 0], angles[:, :, :, 1]
v = tz/depth
# v = tf.ones_like(depth)
h = normalize(tf.multiply(angles, 180 / np.pi))
s = tf.ones_like(h)
v = normalize(v)
hsv = tf.stack([h, s, v], 3)
rgb = tf.image.hsv_to_rgb(hsv) * 255
return tf.cast(rgb, tf.uint8)
def mynormalize(d):
dmin = tf.reduce_min(d)
dmax = tf.reduce_max(d)
d = (d-dmin)/(dmax-dmin)
return d
def normalize_within_ex(d):
return tf.map_fn(mynormalize, (d), dtype=tf.float32)
def oned2color(d,norm=True):
# convert a 1chan input to a 3chan image output
# (it's not very colorful yet)
if norm:
d = mynormalize(d)
return tf.cast(tf.tile(255*d,[1,1,1,3]),tf.uint8)
def paired_oned2color(d1,d2,norm=True):
# convert a two 1chan inputs to two 3chan image outputs,
# normalized together
# (it's not very colorful yet)
if norm:
dmin = tf.reduce_min(tf.concat(axis=3,values=[d1,d2]))
dmax = tf.reduce_max(tf.concat(axis=3,values=[d1,d2]))
else:
dmin = 0
dmax = 1
d1 = tf.cast(tf.tile(255*((d1-dmin)/(dmax-dmin)),[1,1,1,3]),tf.uint8)
d2 = tf.cast(tf.tile(255*((d2-dmin)/(dmax-dmin)),[1,1,1,3]),tf.uint8)
return d1, d2
def triple_oned2color(d1,d2,d3,zeros=False):
# normalize the three together, and don't let zeros ruin it
cat = tf.concat(axis=3,values=[d1,d2,d3])
if not zeros:
zero_mask = tf.cast(tf.equal(cat,0.0), tf.float32)
notzero_indices = tf.squeeze(tf.where(tf.not_equal(cat, 0.0)))
nonzero_max = tf.reduce_max(tf.gather_nd(cat,notzero_indices))
# set zeros to the max value
cat = cat + zero_mask*nonzero_max
# normalize to [0,1]
dmin = tf.reduce_min(cat)
dmax = tf.reduce_max(cat)
cat = (cat-dmin)/(dmax-dmin)
cat = tf.cast(255*cat,tf.uint8)
[d1,d2,d3] = tf.split(cat, 3, axis=3)
d1 = tf.tile(d1,[1,1,1,3])
d2 = tf.tile(d2,[1,1,1,3])
d3 = tf.tile(d3,[1,1,1,3])
return d1, d2, d3
def preprocess_color(x):
return tf.cast(x,tf.float32) * 1./255 - 0.5
def preprocess_depth(x):
return tf.cast(x,tf.float32)
def preprocess_valid(x):
return 1-tf.cast(x,tf.float32)
def back2color(i):
return tf.cast((i+0.5)*255,tf.uint8)
def zdrt2flow_fc(Z, dp, R, T, scale, fy, fx):
with tf.variable_scope("zdrt2flow_fc"):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# x is a 3D point
# p is x's pivot point in 3D
# dp is the 3D delta from the x to p
# p = (x + dp)
# x' = R(x-p) + p + t
# = R(x - (x+dp)) + (x+dp) + t
# = R(x - x-dp ) + x+dp + t
# = R( -dp ) + x+dp + t
# = R(-dp) + x + dp + t
# note that if dp=0 everywhere, then every point has a different pivot (i.e., itself).
# what we desire is for everything (on the object) to have the SAME pivot.
# so, ideally, each dp would point to the center.
# i think this is all fine, i'm just using the wrong function for what i'm doing right now.
# put the delta pivots into world coordinates
[dpx, dpy, dpz] = tf.unstack(dp,axis=2)
dpx = tf.reshape(dpx,[bs,h,w],name="dpx")
dpy = tf.reshape(dpy,[bs,h,w],name="dpy")
dpz = tf.reshape(dpz,[bs,h,w],name="dpz")
XYZ_dp = Camera2World_p(dpx,dpy,dpz,fx,fy)
# rotate the negative delta pivots
XYZ_dp_rot = tf.matmul(R,-tf.expand_dims(XYZ_dp,3))
XYZ_dp_rot = tf.reshape(XYZ_dp_rot,[bs,h*w,3])
# create a pointcloud for the scene
[grid_x1,grid_y1] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z,[bs,h,w],name="Z")
XYZ = Camera2World_p(grid_x1,grid_y1,Z,fx,fy)
# add it all up
XYZ_transformed = XYZ_dp_rot + XYZ + XYZ_dp + T
# project down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ_transformed, name="splitXYZ")
x2y2_flat = World2Camera_p(X2,Y2,Z2,fx,fy)
[x2_flat,y2_flat]=tf.split(axis=2,num_or_size_splits=2,value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
u = tf.reshape(x2_flat-x1_flat,[bs,h,w,1])
v = tf.reshape(y2_flat-y1_flat,[bs,h,w,1])
flow = tf.concat(axis=3,values=[u,v],name="flow")
return flow, u, v, XYZ_transformed
def zprt2flow(Z, P, R, T, fy, fx):
# i might want to change this later, but right now let's assume that P is Bx3
# that is, there is a single pivot for everything in the scene.
# the pivot, P, is already in world coordinates
with tf.variable_scope("zprt2flow"):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# Xt = R(X-P) + P + T
# R is B x 3 x 3
# P and T are both B x 3
P = tf.expand_dims(P,axis=1) # B x 1 x 3
T = tf.expand_dims(T,axis=1) # B x 1 x 3
P = tf.tile(P,[1,h*w,1]) # B x H*W x 3
T = tf.tile(T,[1,h*w,1]) # B x H*W x 3
# get pointcloud1
[grid_x1,grid_y1] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z,[bs,h,w],name="Z")
XYZ = Camera2World_p(grid_x1,grid_y1,Z,fx,fy)
# XYZ is B x H*W x 3
## transform pointcloud1 using r and t, to estimate pointcloud2
# first we need to transpose XYZ-P, so that 3 is the inner dim
XYZ_t = tf.transpose(XYZ-P,perm=[0,2,1],name="XYZ_t") # B x 3 x H*W
# now we can multiply
XYZ_rot_t = tf.matmul(R,XYZ_t,name="XYZ_rot_t") # B x 3 x H*W
# and untranspose.
XYZ_rot = tf.transpose(XYZ_rot_t,perm=[0,2,1],name="XYZ_rot") # B x H*W x 3
# add in the P and T
XYZ2 = XYZ_rot + P + T # B x H*W x 3
# project pointcloud2 down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ2, name="splitXYZ")
x2y2_flat = World2Camera_p(X2,Y2,Z2,fx,fy)
[x2_flat,y2_flat]=tf.split(axis=2,
num_or_size_splits=2,
value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
flow_flat = tf.concat(axis=2,
values=[x2_flat-x1_flat,y2_flat-y1_flat],
name="flow_flat")
flow = tf.reshape(flow_flat,[bs,h,w,2],name="flow")
return flow, XYZ2
def p2flow_centered(P, h, w, fy, fx):
# pared down zprt2flow_centered. puts the pivot at the center of the image
with tf.variable_scope("p2flow_centered"):
shape = P.get_shape()
bs = int(shape[0])
[x, y] = get_xy_from_3d(P, fy, fx)
x = tf.cast(x,tf.float32)
y = tf.cast(y,tf.float32)
x = x-(w/2)
y = y-(h/2)
flow_x = tf.tile(tf.reshape(x,[bs,1,1,1]),[1,h,w,1])
flow_y = tf.tile(tf.reshape(y,[bs,1,1,1]),[1,h,w,1])
flow = tf.concat(axis=3,values=[flow_x,flow_y])
return flow
def p2flow_centered_3D(P, h, w, fy, fx):
# pared down zprt2flow_centered. puts the pivot at the center of the image
with tf.variable_scope("p2flow_centered"):
shape = P.get_shape()
bs = int(shape[0])
[x, y] = get_xy_from_3d(P, fy, fx)
x = tf.cast(x,tf.float32)
y = tf.cast(y,tf.float32)
x = x-(w/2)
y = y-(h/2)
flow_x = tf.tile(tf.reshape(x,[bs,1,1,1]),[1,h,w,1])
flow_y = tf.tile(tf.reshape(y,[bs,1,1,1]),[1,h,w,1])
flow = tf.concat(axis=3,values=[flow_x,flow_y])
return flow
def zt2flow_p(Z, t, fy, fx):
with tf.variable_scope("zt2flow_p"):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# create a pointcloud for the scene
[grid_x1,grid_y1] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z,[bs,h,w],name="Z")
XYZ = Camera2World_p(grid_x1,grid_y1,Z,fx,fy)
# add it up
XYZ_transformed = XYZ + t
# project down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ_transformed, name="splitXYZ")
x2y2_flat = World2Camera_p(X2,Y2,Z2,fx,fy)
[x2_flat,y2_flat]=tf.split(axis=2,num_or_size_splits=2,value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
u = tf.reshape(x2_flat-x1_flat,[bs,h,w,1])
v = tf.reshape(y2_flat-y1_flat,[bs,h,w,1])
flow = tf.concat(axis=3,values=[u,v],name="flow")
return flow, XYZ_transformed, x2y2_flat
def zt2flow(Z, t, fy, fx, y0, x0):
with tf.variable_scope("zt2flow_p"):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# create a pointcloud for the scene
[grid_x1,grid_y1] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z,[bs,h,w],name="Z")
XYZ = Camera2World(grid_x1,grid_y1,Z,fx,fy,x0,y0)
# add it up
XYZ_transformed = XYZ + t
# project down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ_transformed, name="splitXYZ")
x2y2_flat = World2Camera(X2,Y2,Z2,fx,fy,x0,y0)
[x2_flat,y2_flat]=tf.split(axis=2,num_or_size_splits=2,value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
u = tf.reshape(x2_flat-x1_flat,[bs,h,w,1])
v = tf.reshape(y2_flat-y1_flat,[bs,h,w,1])
flow = tf.concat(axis=3,values=[u,v],name="flow")
return flow
def zcom2flow(Z, com1, com2, fy, fx):
with tf.variable_scope("zcom2flow_fc"):
# com1 and com2 are translations from the curr pixel to the com.
# attribute motion to the center of mass changing place.
t = com2 - com1
return zt2flow_p(Z, t, fy, fx)
# def xy2mask((x, y, im)):
# # call this from a tf.map_fn in, e.g., paired_xy2target
# with tf.variable_scope("xy2mask"):
# # im is H x W x 3
# shape = im.get_shape()
# h = int(shape[0])
# w = int(shape[1])
# x = tf.cast(x,tf.int32)
# y = tf.cast(y,tf.int32)
# x = tf.squeeze(x)
# y = tf.squeeze(y)
# x = tf.maximum(1,x)
# x = tf.minimum(x,w-1)
# y = tf.maximum(1,y)
# y = tf.minimum(y,h-1)
# # make a mask with zeros along the cross,
# # and mult the image with that.
# left = tf.ones([h,x-1,1])
# vert = tf.zeros([h,1,1])
# right = tf.ones([h,w-x,1])
# mask = tf.concat([left,vert,right],axis=1)
# top = tf.slice(mask,[0,0,0],[y-1,-1,-1])
# horz = tf.zeros([1,w,1])
# bottom = tf.slice(mask,[y,0,0],[-1,-1,-1])
# mask = tf.concat([top, horz, bottom], axis=0, name="mask")
# # print_shape(mask)
# return mask
# def xy2dot_mask((x, y, im)):
# # generates an image (sized like im) with a single 1 at the xy
# # call this from a tf.map_fn in, e.g., paired_xy2target
# # don't try to pass in h,w instead of im
# with tf.variable_scope("xy2dot_mask"):
# shape = im.get_shape()
# h = int(shape[0])
# w = int(shape[1])
# x = tf.cast(x,tf.int32)
# y = tf.cast(y,tf.int32)
# x = tf.squeeze(x)
# y = tf.squeeze(y)
# # make sure x,y are within the bounds
# x = tf.maximum(1,x)
# x = tf.minimum(x,w-1)
# y = tf.maximum(1,y)
# y = tf.minimum(y,h-1)
# # make a mask of zeros
# mask = tf.zeros([h,w,1])
# # add 1 at the specified coordinate
# indices = [[tf.cast(y,tf.int64), tf.cast(x,tf.int64), 0]]
# values = [1.0]
# shape = [h, w, 1]
# delta = tf.SparseTensor(indices, values, shape)
# mask = mask + tf.sparse_tensor_to_dense(delta)
# # print_shape(mask)
# return mask
def paired_xyz2target(xyz1, xyz2, im, fy, fx):
# xyz1 and xyz2 should be B x 3
with tf.variable_scope("paired_xyz2target"):
[x1, y1] = get_xy_from_3d(xyz1, fy, fx)
[x2, y2] = get_xy_from_3d(xyz2, fy, fx)
return paired_xy2target(x1, y1, x2, y2, im)
def xyz2target(xyz, im, fy, fx):
# paints a target (a cross) on the image,
# at the specified xyz location.
# xyz should be B x 3
with tf.variable_scope("xyz2target"):
[x, y] = get_xy_from_3d(xyz, fy, fx)
return xy2target(x, y, im)
def xyz2target_grid(xyz, im, fy, fx):
# xyz should be B x 3
with tf.variable_scope("xyz2target"):
[x, y] = get_xy_from_3d(xyz, fy, fx)
im = xy2target(x, y, im)
op=0.1
im = xy2target(x*0.5, y, im, opacity=op)
im = xy2target(x, y*0.5, im, opacity=op)
im = xy2target(x*1.5, y, im, opacity=op)
im = xy2target(x, y*1.5, im, opacity=op)
im = xy2target(x*0.5, y*0.5, im, opacity=op)
im = xy2target(x*1.5, y*1.5, im, opacity=op)
return im
def paired_xy2target(x1, y1, x2, y2, im):
with tf.variable_scope("paired_xy2target"):
# print_shape(im)
# mark the target with a cross of zeros
mask1 = tf.map_fn(xy2mask, [x1, y1, im], dtype=tf.float32)
mask2 = tf.map_fn(xy2mask, [x2, y2, im], dtype=tf.float32)
# print_shape(mask1)
# mask1 = xy2mask(x1,y1,im)
# mask2 = xy2mask(x2,y2,im)
mask1_3 = tf.tile(mask1,[1,1,1,3])
mask2_3 = tf.tile(mask2,[1,1,1,3])
# put the image into [0,1]
im = im+0.5
# cross out both targets
im = im * mask1_3
im = im * mask2_3
# paint the second mask in red
zero = tf.zeros_like(mask2)
mask2_3 = tf.concat([(1-mask2),zero,zero],axis=3)
im = im + mask2_3
# restore the image to [-0.5,0.5]
im = im-0.5
return im
def xy2target(x, y, im, opacity=1.0):
with tf.variable_scope("xy2target"):
# print_shape(im)
# mark the target with a cross of zeros
mask = tf.map_fn(xy2mask, [x, y, im], dtype=tf.float32)
# print_shape(mask)
mask_3 = tf.tile(mask,[1,1,1,3])
# put the image into [0,1]
im = im+0.5
# cross out the target
im = im * (1-(1-mask_3)*opacity)
# restore the image to [-0.5,0.5]
im = im-0.5
return im
def zcom2com_fc(Z, com, fy, fx):
with tf.variable_scope("zcom2com_fc"):
shape = Z.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# create a pointcloud for the scene
[grid_x,grid_y] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z,[bs,h,w],name="Z")
XYZ = Camera2World_p(grid_x,grid_y,Z,fx,fy)
# move every point (on the toy, at least) to the com
XYZ_transformed = XYZ + com
# project down, so that we get the 2D location of all of these pixels
[X2,Y2,Z2] = tf.split(axis=2, num_or_size_splits=3, value=XYZ_transformed, name="splitXYZ")
x2y2_flat = World2Camera_p(X2,Y2,Z2,fx,fy)
[x2_flat,y2_flat]=tf.split(axis=2,num_or_size_splits=2,value=x2y2_flat,name="splitxyz_flat")
# subtract the new 2D locations from the old ones to get optical flow
[grid_x1,grid_y1] = meshgrid2D(bs, h, w)
x1_flat = tf.reshape(grid_x1,[bs,-1,1],name="x1")
y1_flat = tf.reshape(grid_y1,[bs,-1,1],name="y1")
u = tf.reshape(x2_flat-x1_flat,[bs,h,w,1])
v = tf.reshape(y2_flat-y1_flat,[bs,h,w,1])
flow = tf.concat(axis=3,values=[u,v],name="flow")
return flow, XYZ_transformed
def offsetbbox(o1b, off_h, off_w, crop_h, crop_w, dotrim = False):
o1b = tf.cast(o1b, tf.int32)
#first get all 4 edges
Ls = tf.slice(o1b, [0,0], [-1,1])
Ts = tf.slice(o1b, [0,1], [-1,1])
Rs = tf.slice(o1b, [0,2], [-1,1])
Bs = tf.slice(o1b, [0,3], [-1,1])
#next, offset by crop
Ls = Ls - off_w
Rs = Rs - off_w
Ts = Ts - off_h
Bs = Bs - off_h
#finally, trim boxes if they go past edges
if dotrim:
assert False
Ls = tf.maximum(Ls, 0)
Rs = tf.maximum(Rs, 0)
Ts = tf.maximum(Ts, 0)
Bs = tf.maximum(Bs, 0)
Ls = tf.minimum(Ls, crop_w)
Rs = tf.minimum(Rs, crop_w)
Ts = tf.minimum(Ts, crop_h)
Bs = tf.minimum(Bs, crop_h)
#then repack
o1b = tf.concat(axis=1, values=[Ls, Ts, Rs, Bs])
o1b = tf.cast(o1b, tf.int64)
return o1b
def avg2(a,b):
return (a+b)/2.0
def decode_labels(mask, palette, num_images=1):
n_classes = len(palette)
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img2 = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img2.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
assert 0 < k <= 14
pixels[k_,j_] = palette[int(k)-1]
outputs[i] = np.array(img2)
return outputs
# we may want to easily change the way we encode/decode depth
# brox encodes it as 1/z
# eigen encodes it as log(z)
def encode_depth_inv(x):
return 1/(x+EPS)
def decode_depth_inv(x):
return 1/(x+EPS)
def encode_depth_log(x):
return tf.exp(x+EPS)
def decode_depth_log(x):
return tf.log(x+EPS)
def encode_depth_id(x):
return x
def decode_depth_id(x):
return x
def encode_depth_invsig(x, delta = 0.01):
return 1/(tf.nn.sigmoid(x)+delta)
def decode_depth_invsig(x, delta = 0.01):
p = 1/(depth+EPS)-delta
p = tf.clip_by_value(p, EPS, 1.0-EPS)
return -tf.log(1/p-1)
# only identity right now
def encode_depth(x):
return x
# if hyp.depth_encoding == 'id':
# return encode_depth_id(x)
# elif hyp.depth_encoding == 'log':
# return encode_depth_log(x)
# elif hyp.depth_encoding == 'inv':
# return encode_depth_inv(x)
# elif hyp.depth_encoding == 'invsig':
# return encode_depth_invsig(x)
# else:
# assert False
def decode_depth(depth):
# if hyp.depth_encoding == 'id':
# return decode_depth_id(depth)
# elif hyp.depth_encoding == 'log':
# return decode_depth_log(depth)
# elif hyp.depth_encoding == 'inv':
# return decode_depth_inv(depth)
# elif hyp.depth_encoding == 'invsig':
# return decode_depth_invsig(depth)
# else:
# assert False
return depth
def match(xs, ys): #sort of like a nested zip
result = {}
for i, (x,y) in enumerate(zip(xs, ys)):
if type(x) == type([]):
subresult = match(x, y)
result.update(subresult)
else:
result[x] = y
return result
def feed_from(inputs, variables, sess):
return match(variables, sess.run(inputs))
def feed_from2(inputs1, variables1, inputs2, variables2, sess):
match([variables1,variables2], sess.run([inputs1, inputs2]))
def poses2rots_v2(poses):
rx = tf.slice(poses, [0,0], [-1,1])
ry = tf.slice(poses, [0,1], [-1,1])+math.pi/2.0
rz = tf.slice(poses, [0,2], [-1,1])
rots = sinabg2r(tf.sin(rz), tf.sin(ry), tf.sin(rx))
return rots
def poses2rots(poses):
rxryrz = tf.slice(poses, [0, 6], [-1, 3])
rx = tf.slice(rxryrz, [0,0], [-1,1])
ry = tf.slice(rxryrz, [0,1], [-1,1])+math.pi/2.0
rz = tf.slice(rxryrz, [0,2], [-1,1])
rots = sinabg2r(tf.sin(rz), tf.sin(ry), tf.sin(rx))
return rots
def decompress_seg(num, seg):
'''input: HxWx1, with max value being seg
'''
pass
def masks2boxes(masks):
pass
def seg2masksandboxes(num, seg):
#'gt_masks': 'masks of instances in this image. (instance-level masks), of shape (N, image_height, image_width)',
#'gt_boxes': 'bounding boxes and classes of instances in this image, of shape (N, 5), each entry is (x1, y1, x2, y2)'
#the fifth feature of each box is the class id
masks = decompress_seg(num, seg)
boxes = masks2boxes(masks)
masks = []
boxes = []
#assert False #not yet implemented
return masks, boxes
# def selectmask(cls_idx, masks):
# def select_single((idx, mask)):
# #idx is (), mask is 14x14x2
# return tf.slice(mask, tf.stack([0,0,idx]), [-1,-1,1])
# return tf.map_fn(select_single, [cls_idx, masks],
# parallel_iterations = 128, dtype = tf.float32)
# def select_by_last(indices, items):
# axis = len(items.get_shape())
# def select_single((idx, item)):
# start = tf.stack([0 for i in range(axis-2)]+[idx])
# end = [-1 for i in range(axis-2)]+[1]
# return tf.squeeze(tf.slice(item, start, end))
# return tf.map_fn(select_single, [indices, items],
# parallel_iterations = 128, dtype = items.dtype)
# def extract_gt_boxes((counts, segs)):
# n = tf.squeeze(counts) #bs is 1
# seg = tf.squeeze(segs) #bs is 1
# def extract_instance(idx):
# mask = tf.equal(seg, idx+1)
# bbox = tf.concat(axis=0, values=[mask2bbox(mask), [0]])
# return bbox
# it = tf.range(0, n)
# result = tf.map_fn(extract_instance, it, dtype = tf.int32)
# return result
def vis_detections(im, class_name, dets, thresh=0.3):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[0,:,:,:]
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
print("%.2f confident that there is a %s at " % (score, class_name))
print(bbox)
y = int(bbox[0])
x = int(bbox[1])
y2 = int(bbox[2])
x2 = int(bbox[3])
im[x:x2,y,0] = 255
im[x:x2,y,1] = 0
im[x:x2,y,1] = 0
im[x:x2,y2,0] = 255
im[x:x2,y2,1] = 0
im[x:x2,y2,1] = 0
im[x,y:y2,0] = 255
im[x,y:y2,1] = 0
im[x,y:y2,2] = 0
im[x2,y:y2,0] = 255
im[x2,y:y2,1] = 0
im[x2,y:y2,2] = 0
return im #imsave(out_file, im)
def box_correspond_np(pred_boxes, gt_boxes, thresh = 0.0):
#print 'box correspond'
#print np.shape(pred_boxes)
#print np.shape(gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(pred_boxes, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float)) #Nx32
dets_argmax_overlaps = overlaps.argmax(axis=1)
dets_max_overlaps = overlaps.max(axis=1)
matchmask = dets_max_overlaps > thresh
return dets_argmax_overlaps, matchmask
def box_correspond(pred_boxes, gt_boxes, thresh = 0.75):
dets, mask = tf.py_func(box_correspond_np, [pred_boxes, gt_boxes, thresh], (tf.int64, tf.bool))
#isempty = tf.greater(tf.shape(gt_boxes)[0], 0)
dets = tf.reshape(dets, (tf.shape(pred_boxes)[0],))
masks = tf.reshape(mask, (tf.shape(pred_boxes)[0],))
return dets, masks
def tf_nms(dets, thresh):
keep = tf.py_func(lambda a, b, c: np.array(nms(a,b,c)).astype(np.int64),
[dets, thresh, True],
tf.int64, stateful=False)
return tf.cast(keep,tf.int32)
def get_good_detections(feats, probs, bboxdeltas, rois, many_classes=True,
conf_thresh = 0.8, nms_thresh = 0.3):
boxes = tf_rois_and_deltas_to_bbox(bboxdeltas, rois)
if many_classes:
cls_ind = 7
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_probs = probs[:, cls_ind]
else:
cls_boxes = boxes
cls_probs = probs
indices = tf.squeeze(tf.where(cls_probs > conf_thresh))
feats = tf.gather(feats, indices)
cls_probs = tf.gather(cls_probs, indices)
cls_boxes = tf.gather(cls_boxes, indices)
rois = tf.gather(rois, indices)
# cls_boxes is [N x 4]
# cls_probs is [N]; make it [N x 1] so we can concat
cls_probs = tf.expand_dims(cls_probs,axis=-1)
dets = tf.concat([cls_boxes, cls_probs],axis=1)
keep = tf_nms(dets, nms_thresh)
dets = tf.gather(dets, keep)
feats = tf.gather(feats, keep)
probs = tf.gather(probs, keep)
deltas = tf.gather(bboxdeltas, keep)
rois = tf.gather(rois, keep)
# return dets, feats, probs, deltas
# return dets, feats
dets = tf.slice(dets, [0,0], [-1,4])
return dets, feats
def pose_cls2angle(rxc, ryc, rzc):
__f = lambda x: tf.cast(tf.argmax(x, axis = 1), tf.float32)
xi = __f(rxc)
yi = __f(ryc)
zi = __f(rzc)
rx = xi/36*2*pi - pi
ry = yi/36*2*pi - pi
rz = zi/36*2*pi - pi
return tf.stack([rx, ry, rz], axis = 1)
def pose_angle2cls(rots):
rx = rots[:,0]
ry = rots[:,1]
rz = rots[:,2]
__round = lambda w: tf.cast(tf.round((w+pi)*36/(2*pi)), tf.int32)
binx = __round(rx)
biny = __round(ry)
binz = __round(rz)
#now make it categorical
__oh = lambda w: tf.one_hot(w, depth = 36, axis = 1)
clsx = __oh(binx)
clsy = __oh(biny)
clsz = __oh(binz)
return clsx, clsy, clsz
def box2corners(bbox):
L, T, R, B = bbox
lb = np.array([L, B])
lt = np.array([L, T])
rb = np.array([R, B])
rt = np.array([R, T])
return [lb, lt, rb, rt]
# def drawbox((lb, lt, rb, rt), c, canvas, scale = 1):
# drawline(lb, lt, c, canvas, scale)
# drawline(rb, rt, c, canvas, scale)
# drawline(lb, rb, c, canvas, scale)
# drawline(lt, rt, c, canvas, scale)
def drawbox2(bbox, canvas, c, scale = 1):
corners = box2corners(bbox)
drawbox([corner*scale for corner in corners], c, canvas, scale)
def plotcuboid2d(corners, canvas, c, scale = 1):
for box in corners:
#there are twelve edges
for i1, i2 in [(0,1),(0,2),(0,4),
(1,3),(1,5),(2,3),
(2,6),(3,7),(4,5),
(4,6),(5,7),(6,7)]:
p1 = box[i1]*scale
p2 = box[i2]*scale
drawline(p1, p2, c, canvas, scale)
def masked_stats(masks, field):
mean, var = tf.nn.weighted_moments(field, [1,2], masks)
return mean, tf.sqrt(var+EPS)
def pcaembed(img, clip = True):
H, W, K = np.shape(img)
pixelskd = np.reshape(img, (H*W, K))
P = PCA(3)
#only fit a small subset for efficiency
P.fit(np.random.permutation(pixelskd)[:4096])
pixels3d = P.transform(pixelskd)
out_img = np.reshape(pixels3d, (H, W, 3))
if clip:
std = np.std(out_img)
mu = np.mean(out_img)
out_img = np.clip(out_img, mu-std*2, mu+std*2)
out_img -= np.min(out_img)
out_img /= np.max(out_img)
out_img *= 255
return out_img
def tsneembed(img, clip = True):
H, W, K = np.shape(img)
pixelskd = np.reshape(img, (H*W, K))
P = TSNE(3, perplexity=10.0)
pixels3d = P.fit_transform(pixelskd)
out_img = np.reshape(pixels3d, (H, W, 3))
if clip:
std = np.std(out_img)
mu = np.mean(out_img)
out_img = np.clip(out_img, mu-std*2, mu+std*2)
out_img -= np.min(out_img)
out_img /= np.max(out_img)
out_img *= 255
return out_img
def get_mean_3D_com_in_mask(XYZ, mask):
print_shape(XYZ)
shape = XYZ.get_shape()
bs = int(shape[0])
[X,Y,Z] = tf.split(axis=2, num_or_size_splits=3, value=XYZ)
mask = tf.reshape(mask,[bs,-1,1])
mean_X = tf.reduce_sum(X*mask,axis=[1,2])/(EPS + tf.reduce_sum(mask,axis=[1,2]))
mean_Y = tf.reduce_sum(Y*mask,axis=[1,2])/(EPS + tf.reduce_sum(mask,axis=[1,2]))
mean_Z = tf.reduce_sum(Z*mask,axis=[1,2])/(EPS + tf.reduce_sum(mask,axis=[1,2]))
com = tf.stack([mean_X,mean_Y,mean_Z],axis=1)
return com
def get_xy_from_3d(p, fy, fx):
shape = p.get_shape()
bs = int(shape[0])
[X, Y, Z] = tf.unstack(p,axis=1)
xyt_flat = World2Camera_single(X,Y,Z,fx,fy)
[x,y] = tf.split(axis=1,num_or_size_splits=2,value=xyt_flat)
x = tf.reshape(x,[bs])
y = tf.reshape(y,[bs])
return x, y
# def get_xyz_from_2D(x, y, Z, fy, fx, y0, x0):
# # print_shape(Z)
# shape = Z.get_shape()
# bs = int(shape[0])
# h = int(shape[1])
# w = int(shape[2])
# zero = tf.zeros([bs, h, w, 1])
# zero = tf.zeros([h, w, 1])
# mask = xy2dot_mask((x, y, zero))
# mask = tf.expand_dims(mask,0)
# XYZ = Camera2World_single(x,y,Z,fy,fx,y0,x0)
# XYZ = get_mean_3D_in_mask(XYZ, mask)
# return XYZ
def spin(image,depth,com,R,fy,fx):
shape = image.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# generate 3 images: before R, orig, and after R; all centered
T = tf.zeros([bs,3])
[R_inverse, T_inverse] = split_rt(safe_inverse(merge_rt(R,T)))
spin_flow_12, _ = zprt2flow(depth,com,R,T,fy,fx)
spin_flow_21, _ = zprt2flow(depth,com,R_inverse,T_inverse,fy,fx)
image2 = xyz2target_grid(com, image, fy, fx)
image3, occ1 = warper(image2, spin_flow_12)
image1, occ3 = warper(image2, spin_flow_21)
image1 = image1*occ1
image3 = image3*occ3
image1 = tf.reshape(image1, [bs,h,w,3])
image3 = tf.reshape(image3, [bs,h,w,3])
# translate so the com is at h/2, w/2
center12 = p2flow_centered(com,h,w,fy,fx)
image2, _ = warper(image2, center12)
image1, _ = warper(image1, center12)
image3, _ = warper(image3, center12)
_, occ = warper(image2, -center12)
image2 *= occ
image1 *= occ
image3 *= occ
return image1, image2, image3
def split_tta(Z_b,Z_g,v_b,v_g,tab_val,toy_val,arm_val):
md = (Z_b-Z_g)
md_sum = tf.summary.histogram("md", md)
tab = tf.where(md < tab_val, tf.ones_like(md), tf.zeros_like(md))
toy = tf.where(md > toy_val, tf.ones_like(md), tf.zeros_like(md))
arm = tf.where(md > arm_val, tf.ones_like(md), tf.zeros_like(md))
arm = arm*v_b*v_g
tab = tab*v_b*v_g
tab_or_arm = tf.where(tab+arm > 0, tf.ones_like(tab), tf.zeros_like(tab))
toy = (toy*(1-tab_or_arm))*v_b*v_g
return tab, toy, arm
def average_in_mask(x, mask):
shape = x.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
c = int(shape[3])
mask_c = tf.tile(mask,[1,1,1,c])
x_keep = x*mask_c
x = tf.reduce_sum(x_keep,axis=[1,2])/tf.reduce_sum(mask+EPS,axis=[1,2])
return x
def get_com_free(Z1,Zb,mask,fy,fx):
shape = Z1.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
[grid_x,grid_y] = meshgrid2D(bs, h, w)
Z = tf.reshape(Z1,[bs,h,w])
Zb = tf.reshape(Zb,[bs,h,w])
XYZ = Camera2World_p(grid_x,grid_y,Z,fx,fy)
XYZb = Camera2World_p(grid_x,grid_y,Zb,fx,fy)
com_on_obj = get_mean_3D_com_in_mask(XYZ, mask)
com_on_table = get_mean_3D_com_in_mask(XYZb, mask)
# com_free = com_on_table
com_free = com_on_obj
# # com_free = (com_on_table+com_on_obj)/2
# # let's try something else:
# [grid_x,grid_y] = meshgrid2D(bs, h, w)
# x = average_in_mask(tf.expand_dims(grid_x,3), mask)
# y = average_in_mask(tf.expand_dims(grid_y,3), mask)
# z = average_in_mask(Z1, mask)
# # x = tf.Print(x, [x], 'x')
# # y = tf.Print(y, [y], 'y')
# x = tf.reshape(x,[-1])
# y = tf.reshape(y,[-1])
# z = tf.reshape(z,[-1])
# com_free = Camera2World_single(x,y,z,fx,fy)
return com_free
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R) :
R = R[0]
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
def triple_sin(a,b,g):
sina = tf.sin(a)
sinb = tf.sin(b)
sing = tf.sin(g)
return sina, sinb, sing
def triple_cos(a,b,g):
cosa = tf.cos(a)
cosb = tf.cos(b)
cosg = tf.cos(g)
return cosa, cosb, cosg
def sincos_norm(sin,cos):
n = norm(tf.stack([sin,cos],axis=1))
sin = sin/n
cos = cos/n
return sin, cos
def euler2mat(z, y, x):
# https://github.com/tinghuiz/SfMLearner/blob/master/utils.py
"""Converts euler angles to rotation matrix
TODO: remove the dimension for 'N' (deprecated for converting all source
poses altogether)
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
z: rotation angle along z axis (in radians) -- size = [B, N]
y: rotation angle along y axis (in radians) -- size = [B, N]
x: rotation angle along x axis (in radians) -- size = [B, N]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, N, 3, 3]
"""
B = tf.shape(z)[0]
N = 1
z = tf.clip_by_value(z, -np.pi, np.pi)
y = tf.clip_by_value(y, -np.pi, np.pi)
x = tf.clip_by_value(x, -np.pi, np.pi)
# Expand to B x N x 1 x 1
z = tf.expand_dims(tf.expand_dims(z, -1), -1)
y = tf.expand_dims(tf.expand_dims(y, -1), -1)
x = tf.expand_dims(tf.expand_dims(x, -1), -1)
zeros = tf.zeros([B, N, 1, 1])
ones = tf.ones([B, N, 1, 1])
cosz = tf.cos(z)
sinz = tf.sin(z)
rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)
cosy = tf.cos(y)
siny = tf.sin(y)
roty_1 = tf.concat([cosy, zeros, siny], axis=3)
roty_2 = tf.concat([zeros, ones, zeros], axis=3)
roty_3 = tf.concat([-siny,zeros, cosy], axis=3)
ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)
cosx = tf.cos(x)
sinx = tf.sin(x)
rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)
rotMat = tf.matmul(tf.matmul(xmat, ymat), zmat)
return rotMat
# def masked_min_max(x, mask):
# # uh, this doesn't really work
# indices = tf.squeeze(tf.where(tf.greater(mask, 0.0)))
# min = tf.reduce_min(tf.gather_nd(x,indices))
# # print_shape(min)
# max = tf.reduce_max(tf.gather_nd(x,indices))
# # print_shape(max)
# return min, max
# def masked_min_max((x, mask)):
# # this doesn't work!
# # use the separate max/min things below
# indices = tf.squeeze(tf.where(tf.greater(mask, 0.0)))
# min = tf.reduce_min(tf.gather_nd(x,indices))
# max = tf.reduce_max(tf.gather_nd(x,indices))
# return min, max
# def masked_min((x, mask)):
# indices = tf.squeeze(tf.where(tf.greater(mask, 0.0)))
# min = tf.reduce_min(tf.gather_nd(x,indices))
# return min
# def masked_max((x, mask)):
# indices = tf.squeeze(tf.where(tf.greater(mask, 0.0)))
# max = tf.reduce_max(tf.gather_nd(x,indices))
# return max
def crop_and_resize_around_com(com, image, sideLength, fy, fx):
# this func is flexible enough to crop AND resize,
# but right now it actually just crops
# crop_around_com (below) is more straightforward
shape = image.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
# get the xy of the com
com_x, com_y = get_xy_from_3d(com, fy, fx)
# tensorflow wants the coords normalized for height/width
y1 = (com_y - sideLength/2)/h
y2 = (com_y + sideLength/2)/h
x1 = (com_x - sideLength/2)/w
x2 = (com_x + sideLength/2)/w
boxes = tf.stack([y1,x1,y2,x2],axis=1)
# one box per batch ind
box_ind = tf.cast(tf.range(0, bs), tf.int32)
size = tf.constant([sideLength, sideLength], tf.int32)
# crop and resize!
u_image = tf.image.crop_and_resize(image, boxes, box_ind, size)
return u_image
def crop_around_com(com, image, sideLength, fy, fx):
# get the xy of the com
com_x, com_y = get_xy_from_3d(com, fy, fx)
# com_x = tf.Print(com_x, [com_x], 'com_x')
# com_y = tf.Print(com_y, [com_y], 'com_y')
# set it as the center of our glimpse. (y then x)
offsets = tf.stack([com_y, com_x], axis=1)
# glimpse!
u_image = tf.image.extract_glimpse(image,
[sideLength, sideLength],
offsets,
centered=False,
normalized=False)
return u_image
def basic_blur(image):
blur_kernel = tf.transpose(tf.constant([[[[1./16,1./8,1./16],
[1./8,1./4,1./8],
[1./16,1./8,1./16]]]],
dtype=tf.float32),perm=[3,2,1,0])
image = tf.pad(image, [[0, 0], [1, 1], [1, 1], [0, 0]], "SYMMETRIC")
image = tf.nn.conv2d(image, blur_kernel, strides=[1,1,1,1], padding="VALID")
return image
def judge_inters_sq(up_left1, low_right1, up_left2, low_right2):
"""
judge if two squares intersect with each other
"""
def in_sq(pt, up_left, low_right):
return pt[0]>up_left[0] and pt[0]<low_right[0] and pt[1]>up_left[1] and pt[1]<low_right[1]
pts = [up_left1, low_right1, [up_left1[0], low_right1[1]], [low_right1[0],up_left1[1]]]
for p in pts:
if in_sq(p, up_left2, low_right2):
return True
pts = [up_left2, low_right2, [up_left2[0], low_right2[1]], [low_right2[0], up_left2[1]]]
for p in pts:
if in_sq(p, up_left1, low_right1):
return True
return False
def oned2inferno(d, norm=True):
# convert a 1chan input to a 3chan image output
if norm:
d = normalize(d)
rgb = colorize(d, cmap='inferno')
else:
rgb = colorize(d, vmin=0., vmax=1., cmap='inferno')
rgb = tf.cast(255.0*rgb, tf.uint8)
# rgb = tf.reshape(rgb, [-1, hyp.H, hyp.W, 3])
# rgb = tf.expand_dims(rgb, axis=0)
return rgb
def depth2inferno(depth):
# depth should be [B x H x W x 1]
max_depth = 4.38 # ln(80)
depth = tf.log(depth)
depth = tf.clip_by_value(depth, 0, max_depth)
depth = depth/max_depth
depth_im = oned2inferno(depth, norm=False)
return depth_im
| {"/utils.py": ["/flow_transformer.py", "/colorize.py"]} |
76,077 | shreshtashetty/MulticamCalibrationCheck | refs/heads/master | /colorize.py | import matplotlib
import matplotlib.cm
import tensorflow as tf
import numpy as np
def print_shape(t):
print(t.name, t.get_shape().as_list())
def colorize(value, normalize=True, vmin=None, vmax=None, cmap=None, vals=255):
"""
A utility function for TensorFlow that maps a grayscale image to a matplotlib
colormap for use with TensorBoard image summaries.
By default it will normalize the input value to the range 0..1 before mapping
to a grayscale colormap.
Arguments:
- value: 2D Tensor of shape [height, width] or 3D Tensor of shape
[height, width, 1].
- vmin: the minimum value of the range used for normalization.
(Default: value minimum)
- vmax: the maximum value of the range used for normalization.
(Default: value maximum)
- cmap: a valid cmap named for use with matplotlib's `get_cmap`.
(Default: 'gray')
- vals: the number of values in the cmap minus one
Example usage:
```
output = tf.random_uniform(shape=[256, 256, 1])
output_color = colorize(output, vmin=0.0, vmax=1.0, cmap='viridis')
tf.summary.image('output', output_color)
```
Returns a 3D tensor of shape [height, width, 3].
"""
value = tf.squeeze(value, axis=3)
if normalize:
vmin = tf.reduce_min(value) if vmin is None else vmin
vmax = tf.reduce_max(value) if vmax is None else vmax
value = (value - vmin) / (vmax - vmin) # vmin..vmax
# dma = tf.reduce_max(value)
# dma = tf.Print(dma, [dma], 'dma', summarize=16)
# tf.summary.histogram('dma', dma) # just so tf.Print works
# quantize
indices = tf.to_int32(tf.round(value * float(vals)))
else:
# quantize
indices = tf.to_int32(value)
# 00 Unknown 0 0 0
# 01 Terrain 210 0 200
# 02 Sky 90 200 255
# 03 Tree 0 199 0
# 04 Vegetation 90 240 0
# 05 Building 140 140 140
# 06 Road 100 60 100
# 07 GuardRail 255 100 255
# 08 TrafficSign 255 255 0
# 09 TrafficLight 200 200 0
# 10 Pole 255 130 0
# 11 Misc 80 80 80
# 12 Truck 160 60 60
# 13 Car:0 200 200 200
if cmap=='vkitti':
colors = np.array([0, 0, 0,
210, 0, 200,
90, 200, 255,
0, 199, 0,
90, 240, 0,
140, 140, 140,
100, 60, 100,
255, 100, 255,
255, 255, 0,
200, 200, 0,
255, 130, 0,
80, 80, 80,
160, 60, 60,
200, 200, 200,
230, 208, 202]);
colors = np.reshape(colors, [15, 3]).astype(np.float32)/255.0
colors = tf.constant(colors)
else:
# gather
cm = matplotlib.cm.get_cmap(cmap if cmap is not None else 'gray')
if cmap=='RdBu' or cmap=='RdYlGn':
colors = cm(np.arange(256))[:, :3]
else:
colors = cm.colors
colors = np.array(colors).astype(np.float32)
colors = np.reshape(colors, [-1, 3])
colors = tf.constant(colors, dtype=tf.float32)
value = tf.gather(colors, indices)
# value is float32, in [0,1]
return value
| {"/utils.py": ["/flow_transformer.py", "/colorize.py"]} |
76,078 | shreshtashetty/MulticamCalibrationCheck | refs/heads/master | /flow_transformer.py | import tensorflow as tf
def transformer(im, flow, out_size, name='SpatialTransformer', **kwargs):
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolateHW(im, x, y, out_size):
with tf.variable_scope('_interpolateHW'):
# constants
shape = im.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
c = int(shape[3])
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
h_f = tf.cast(h, 'float32')
w_f = tf.cast(w, 'float32')
out_h = out_size[0]
out_w = out_size[1]
# clip coordinates to [0, dim-1]
x = tf.clip_by_value(x, 0, w_f-1)
y = tf.clip_by_value(y, 0, h_f-1)
x0_f = tf.floor(x)
y0_f = tf.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = tf.cast(x0_f, 'int32')
y0 = tf.cast(y0_f, 'int32')
x1 = tf.cast(tf.minimum(x1_f, w_f-1), 'int32')
y1 = tf.cast(tf.minimum(y1_f, h_f-1), 'int32')
dim2 = w
dim1 = w*h
base = _repeat(tf.range(bs)*dim1, out_h*out_w)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore c dim
im_flat = tf.reshape(im, tf.stack([-1, c]))
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# grab the occluded pixels this flow uncovers
d_a = tf.sparse_to_dense(tf.cast(idx_a,'int32'), [bs*h*w], 1, default_value=0, validate_indices=False, name="d_a")
d_a = tf.reshape(tf.tile(tf.reshape(tf.cast(d_a,'float32'),[bs,h,w,1]),[1,1,1,c]),[-1,c])
d_b = tf.sparse_to_dense(tf.cast(idx_b,'int32'), [bs*h*w], 1, default_value=0, validate_indices=False, name="d_b")
d_b = tf.reshape(tf.tile(tf.reshape(tf.cast(d_b,'float32'),[bs,h,w,1]),[1,1,1,c]),[-1,c])
d_c = tf.sparse_to_dense(tf.cast(idx_c,'int32'), [bs*h*w], 1, default_value=0, validate_indices=False, name="d_c")
d_c = tf.reshape(tf.tile(tf.reshape(tf.cast(d_c,'float32'),[bs,h,w,1]),[1,1,1,c]),[-1,c])
d_d = tf.sparse_to_dense(tf.cast(idx_d,'int32'), [bs*h*w], 1, default_value=0, validate_indices=False, name="d_d")
d_d = tf.reshape(tf.tile(tf.reshape(tf.cast(d_d,'float32'),[bs,h,w,1]),[1,1,1,c]),[-1,c])
# and finally calculate interpolated values
wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
warp = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
occ = tf.add_n([wa*d_a, wb*d_b, wc*d_c, wd*d_d])
return warp, occ
def _meshgridHW(h, w):
with tf.variable_scope('_meshgridHW'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(0, w-1, w),
# np.linspace(0, h-1, h))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.stack([h, 1])),
tf.transpose(tf.expand_dims(tf.linspace(0.0, w-1, w), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(0.0, h-1, h), 1),
tf.ones(shape=tf.stack([1, w])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
return grid
def _transform(flow, im, out_size):
with tf.variable_scope('_transform'):
shape = im.get_shape()
bs = int(shape[0])
h = int(shape[1])
w = int(shape[2])
c = int(shape[3])
# grid of (x_t, y_t, 1), eq (1) in ref [1]
h_f = tf.cast(h, 'float32')
w_f = tf.cast(w, 'float32')
out_h = out_size[0]
out_w = out_size[1]
grid = _meshgridHW(out_h, out_w)
[x_f, y_f] = tf.unstack(flow, axis=3)
x_f_flat = tf.expand_dims(tf.reshape(x_f, (bs, -1)),1)
y_f_flat = tf.expand_dims(tf.reshape(y_f, (bs, -1)),1)
zeros = tf.zeros_like(x_f_flat)
flowgrid = tf.concat(axis=1, values=[x_f_flat,y_f_flat,zeros])
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([bs]))
grid = tf.reshape(grid, tf.stack([bs, 3, -1]),name="grid")
# using flow from 1 to 2, warp image 2 to image 1
grid=grid+flowgrid
x_s = tf.slice(grid, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(grid, [0, 1, 0], [-1, 1, -1])
x_s_flat = tf.reshape(x_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
w, o = _interpolateHW(
im, x_s_flat, y_s_flat,
out_size)
warp = tf.reshape(w,tf.stack([bs,
out_h,
out_w,
c]),
name="warp")
occ = tf.reshape(o,tf.stack([bs,
out_h,
out_w,
c]),
name="occ")
return warp, occ
with tf.variable_scope(name):
warp, occ = _transform(flow, im, out_size)
return warp, occ
| {"/utils.py": ["/flow_transformer.py", "/colorize.py"]} |
76,079 | shreshtashetty/MulticamCalibrationCheck | refs/heads/master | /check_warp.py | from PIL import Image
import scipy.misc
import scipy.io
import os
import argparse
import numpy as np
import tensorflow as tf
import sys
from os import listdir
from os.path import isfile, join
import glob, os
from utils import *
import imageio
out_dir = '.'
# for idx in [0, 10, 20]:
for idx in [100]:
idx2 = idx + 10
# set up intrinsics
H = 128
W = 384
FOV = 90.0
focal = float(float(W)/(2.0*np.tan(FOV*np.pi/360.0)))
print 'focal = %.2f' % focal
fx = focal
fy = focal
x0 = float(W)/2.0
y0 = float(H)/2.0
# set data paths
root = 'city_02_vehicles_140_episode_0061_2019-05-07'
cam1 = 0
cam2 = 20
depth_path1 = '%s/CameraDepth%d/%06d.npy' % (root, cam1, idx)
depth_path2 = '%s/CameraDepth%d/%06d.npy' % (root, cam2, idx2)
rgb_path1 = '%s/CameraRGB%d/%06d.npy' % (root, cam1, idx)
rgb_path2 = '%s/CameraRGB%d/%06d.npy' % (root, cam2, idx2)
p1_path = '%s/CameraRGB%d/%06d_c2w.npy' % (root, cam1, idx)
p2_path = '%s/CameraRGB%d/%06d_c2w.npy' % (root, cam2, idx2)
# load the image data
i1 = np.load(rgb_path1)
i2 = np.load(rgb_path2)
i1 = tf.expand_dims(tf.constant(i1.astype(np.uint8)), axis=0)
i2 = tf.expand_dims(tf.constant(i2.astype(np.uint8)), axis=0)
# load the camera poses
origin_T_carla1 = np.load(p1_path)
origin_T_carla2 = np.load(p2_path)
# convert camera poses into standard format (origin_T_cam)
carla_T_cam = np.eye(4, dtype=np.float32)
carla_T_cam[0,0] = -1.0
carla_T_cam[1,1] = -1.0
origin_T_cam1 = np.matmul(origin_T_carla1, carla_T_cam)
origin_T_cam2 = np.matmul(origin_T_carla2, carla_T_cam)
# load the depth data
d1 = np.load(depth_path1)
d1 = d1.astype(np.float64)
d1 = d1*1000.0
d1 = tf.constant(d1)
d1 = tf.expand_dims(tf.expand_dims(d1, axis=0), axis=3)
# compute the relative transformation between cameras
cam2_T_cam1 = np.matmul(np.linalg.inv(origin_T_cam2), origin_T_cam1)
## tensorflow
# add a batch dim
cam2_T_cam1 = tf.expand_dims(tf.constant(cam2_T_cam1.astype(np.float32)), 0)
x0_ = tf.expand_dims(x0,0)
y0_ = tf.expand_dims(y0,0)
fx = tf.expand_dims(fx,0)
fy = tf.expand_dims(fy,0)
cam2_T_cam1 = tf.cast(cam2_T_cam1, tf.float32)
d1 = tf.cast(d1, tf.float32)
# turn the flow and relative transformatino into a flow field
flow, XYZ2 = zrt2flow(d1, cam2_T_cam1, fy, fx, y0_, x0_)
# visualize this
flow_z = flow2color(flow, adaptive_max=False, maxFlow=100)
# turn the images into floats (in range [-0.5, 0.5])
i1_g = preprocess_color(i1)
i2_g = preprocess_color(i2)
# warp image2 into image1
i1_m, _ = warper(i2_g, flow)
# convert things to uint8 images for visualization
d1_z = depth2inferno(d1)
i1_m = back2color(i1_m)
i1_g = back2color(i1_g)
i2_g = back2color(i2_g)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
# evaluate the tensors
cam2_T_cam1_, flow_z_, d1_z_, i1_g_, i2_g_, i1_m_ = sess.run([cam2_T_cam1, flow_z, d1_z, i1_g, i2_g, i1_m])
# print/save what we got
print 'cam2_T_cam1:', cam2_T_cam1_
scipy.misc.imsave("%s/%d_zrt_flow.png" % (out_dir, idx), flow_z_[0])
scipy.misc.imsave("%s/%d_d1.png" % (out_dir, idx), d1_z_[0])
scipy.misc.imsave("%s/%d_i1_original.png" % (out_dir, idx), i1_g_[0])
scipy.misc.imsave("%s/%d_i2_original.png" % (out_dir, idx), i2_g_[0])
scipy.misc.imsave("%s/%d_i2_zrt_warp.png" % (out_dir, idx), i1_m_[0])
imageio.mimsave("%s/%d_i1i2_zrt_warp.gif" % (out_dir, idx), [i1_g_[0],i1_m_[0]], 'GIF')
| {"/utils.py": ["/flow_transformer.py", "/colorize.py"]} |
76,092 | ec500-software-engineering/exercise-2-ffmpeg-YanzuWuu | refs/heads/master | /test_ex2.py | import subprocess
from pytest import approx
import json
from pathlib import Path
import Exercise_2 as run
def ffprobe_sync(filein: Path) -> dict:
""" get media metadata """
meta = subprocess.check_output(['ffprobe', '-v', 'warning','-print_format', 'json','-show_streams','-show_format',str(filein)], universal_newlines = True)
return json.loads(meta)
def test_duration():
fnin = '11.mp4'
fnin2 = '22.mp4'
fnout = '480_11.mp4'
fnout2 = '720_11.mp4'
fnout3 = '480_22.mp4'
fnout4 = '720_22.mp4'
# test video 11
orig_meta = ffprobe_sync(fnin)
orig_duration = float(orig_meta['streams'][0]['duration'])
meta_480 = ffprobe_sync(fnout)
duration_480 = float(meta_480['streams'][0]['duration'])
meta_720 = ffprobe_sync(fnout2)
duration_720 = float(meta_720['streams'][0]['duration'])
# test video 22
orig_meta = ffprobe_sync(fnin2)
orig_duration1 = float(orig_meta['streams'][0]['duration'])
meta_480 = ffprobe_sync(fnout3)
duration_480_22 = float(meta_480['streams'][0]['duration'])
meta_720 = ffprobe_sync(fnout4)
duration_720_22 = float(meta_720['streams'][0]['duration'])
assert orig_duration== approx(duration_480, rel=0.01)
assert orig_duration == approx(duration_720, rel=0.01)
assert orig_duration1 == approx(duration_480_22, rel=0.01)
assert orig_duration1 == approx(duration_720_22, rel=0.01)
def main():
run.convert_video()
test_duration()
| {"/test_ex2.py": ["/Exercise_2.py"]} |
76,093 | ec500-software-engineering/exercise-2-ffmpeg-YanzuWuu | refs/heads/master | /Exercise_2.py | import os
import subprocess
import queue
import asyncio
import time
def convert_video():
path = os.getcwd()+'/'
print(path)
files= os.listdir(path)
q = queue.Queue()
i = 1
for file in files:
if file =='11.mp4' :
print(file)
q.put(file)
elif file =='22.mp4':
print(file)
q.put(file)
while not q.empty():
video = q.get()
async def transfer_720p():
try:
subprocess.call('ffmpeg -i ' + path + video + ' -b 2M -r 30 -s 1280x720 -c:a copy '+path+'/720_'+video, shell=True)
return '720p videos all transeferred'
except Exception:
return 'transfer failed'
async def transfer_480p():
try:
subprocess.call('ffmpeg -i ' + path + video + ' -b 1M -r 30 -s 720x480 -c:a copy ' + path + '/480_'+video, shell=True)
return '480p videos all transeferred'
except Exception:
return 'transfer failed'
tasks = [asyncio.ensure_future(transfer_720p()),asyncio.ensure_future(transfer_480p()),]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
for task in tasks:
print('Task: ', task.result())
i += 1
q.task_done()
# q.join()
print(str(i-1) +' videos have been transferred into 720p and 480p type')
def main():
start = time.clock()
convert_video()
# test_name(path)
# test_duration()
elapsed = time.clock()-start
print("Time used:",elapsed)
if __name__=='__main__':
main()
| {"/test_ex2.py": ["/Exercise_2.py"]} |
76,094 | ec500-software-engineering/exercise-2-ffmpeg-YanzuWuu | refs/heads/master | /setup.py | from setuptools import setup
setup(
name='ex2',
version='0.0.1',
packages=[''],
url='',
license='',
author='Yuncheng Zhu',
author_email='yczhu1@bu.edu',
install_requires=["flake8", "pytest"],
description='use nsync and ffmpeg to transfer video inot two version'
)
| {"/test_ex2.py": ["/Exercise_2.py"]} |
76,286 | iskyd/babs | refs/heads/master | /babs/__init__.py | from .note import Note
from .rest import Rest
from .note_list import NoteList
from .chord import Chord
from .scale import Scale | {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,287 | iskyd/babs | refs/heads/master | /tests/test_rest.py | from __future__ import division
from babs import Rest
def test_create():
assert Rest().duration == 4/4
assert Rest(duration=1/4).duration == 1/4
def test_repr():
assert eval(repr(Rest())).duration == 4 / 4
assert eval(repr(Rest(duration=3/4))).duration == 3/4
def test_eq():
assert Rest() == Rest()
assert Rest(duration=1/8) == Rest(duration=1/8)
assert not Rest() == Rest(duration=1/8)
def test_neq():
assert not Rest() != Rest()
assert not Rest(duration=1/8) != Rest(duration=1/8)
assert Rest() != Rest(duration=1/8)
def test_lt():
assert Rest(duration=1/8) < Rest()
assert Rest(duration=1/16) < Rest(duration=1/8)
assert not Rest() < Rest()
assert not Rest(duration=1/4) < Rest(duration=1/8)
def test_lte():
assert Rest() <= Rest()
assert Rest(duration=1 / 8) <= Rest(duration=1 / 8)
assert Rest(duration=1/16) <= Rest(duration=1/8)
assert not Rest(duration=1/4) <= Rest(duration=1/8)
def test_gt():
assert Rest() > Rest(duration=1/8)
assert Rest(duration=1/8) > Rest(duration=1/16)
assert not Rest() > Rest()
assert not Rest(duration=1/8) > Rest(duration=1/4)
def test_gte():
assert Rest() >= Rest()
assert Rest(duration=1/8) >= Rest(duration=1/8)
assert Rest(duration=1/8) >= Rest(duration=1/16)
assert not Rest(duration=1/8) >= Rest(duration=1/4)
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,288 | iskyd/babs | refs/heads/master | /tests/test_scale.py | import pytest
from babs import Note, Scale, NoteList
from babs.exceptions import ScaleException
def test_create():
s = Scale(Note(name='C'), Note(name='D'), Note(name='E'))
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert len(s.notes) == 3
s = Scale(Note(name='C'), Note(name='Bb', octave=3), Note(name='D'), Note(name='E'))
assert s.notes[0] == Note(name='Bb', octave=3)
assert s.notes[1] == Note(name='C')
assert s.notes[2] == Note(name='D')
assert s.notes[3] == Note(name='E')
assert len(s.notes) == 4
s = Scale(Note(name='C'), Note(name='C'), Note(name='D'), Note(name='E'))
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert len(s.notes) == 3
s = Scale(Note(name='C'), Note(name='C', octave=5), Note(name='D'), Note(name='E'))
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert s.notes[3] == Note(name='C', octave=5)
assert len(s.notes) == 4
s = Scale(Note(name='C', octave=5), Note(name='D', octave=3), Note(name='D'))
assert s.notes[0] == Note(name='D', octave=3)
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='C', octave=5)
assert len(s.notes) == 3
s = Scale(Note(name='C'), Note(name='Bb', octave=3), Note(name='D'), order=Scale.ASCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='Bb', octave=3)
assert s.notes[1] == Note(name='C')
assert s.notes[2] == Note(name='D')
assert len(s.notes) == 3
s = Scale(Note(name='C', octave=5), Note(name='D', octave=3), Note(name='D'), Note(name='G'), order=Scale.ASCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='D', octave=3)
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='G')
assert s.notes[3] == Note(name='C', octave=5)
assert len(s.notes) == 4
s = Scale(Note(name='C'), Note(name='Bb', octave=3), Note(name='D'), Note(name='E'), order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='E')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='C')
assert s.notes[3] == Note(name='Bb', octave=3)
assert len(s.notes) == 4
s = Scale(Note(name='C', octave=5), Note(name='D', octave=3), Note(name='D'), order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='C', octave=5)
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='D', octave=3)
assert len(s.notes) == 3
with pytest.raises(ScaleException) as exc:
Scale()
assert 'Invalid Scale.' == str(exc.value)
with pytest.raises(ScaleException) as exc:
Scale('invalid')
assert 'Invalid Scale.' == str(exc.value)
with pytest.raises(ScaleException) as exc:
Scale('invalid', 'invalid')
assert 'Invalid Scale.' == str(exc.value)
try:
Scale(Note(name='C'), 'invalid', strict=False)
Scale('invalid', strict=False)
Scale(strict=False)
except ScaleException:
pytest.fail("Unexpected NoteListException")
def test_create_from_root():
s = Scale.create_from_root(root=Note(name='C'))
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='G')
assert s.notes[5] == Note(name='A')
assert s.notes[6] == Note(name='B')
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, order=Scale.ASCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='Eb')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='G')
assert s.notes[5] == Note(name='Ab')
assert s.notes[6] == Note(name='Bb')
s = Scale.create_from_root(root=Note(name='C'), order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='B')
assert s.notes[1] == Note(name='A')
assert s.notes[2] == Note(name='G')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='E')
assert s.notes[5] == Note(name='D')
assert s.notes[6] == Note(name='C')
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='Bb')
assert s.notes[1] == Note(name='Ab')
assert s.notes[2] == Note(name='G')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='Eb')
assert s.notes[5] == Note(name='D')
assert s.notes[6] == Note(name='C')
assert Scale.create_from_root(root=Note(name='C')).strict is True
assert Scale.create_from_root(root=Note(name='C'), strict=True).strict is True
assert Scale.create_from_root(root=Note(name='C'), strict=False).strict is False
def test_create_from_root_with_scale_type():
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.MAJOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F'), Note(name='G'), Note(name='A'), Note(name='B'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.MINOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='Ab'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.IONIAN_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F'), Note(name='G'), Note(name='A'), Note(name='B'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.DORIAN_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='A'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.PHRIGIAN_TYPE)
assert s == Scale(Note(name='C'), Note(name='Db'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='Ab'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.LIDYAN_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F#'), Note(name='G'), Note(name='A'), Note(name='B'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.DOMINANT_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F'), Note(name='G'), Note(name='A'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.AEOLIAN_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='Ab'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.LOCRIAN_TYPE)
assert s == Scale(Note(name='C'), Note(name='Db'), Note(name='Eb'), Note(name='F'), Note(name='Gb'), Note(name='Ab'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.PENTATONIC_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='G'), Note(name='A'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.PENTATONIC_MINOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.BLUES_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='Eb'), Note(name='E'), Note(name='G'), Note(name='A'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.BLUES_MINOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='Eb'), Note(name='F'), Note(name='Gb'), Note(name='G'), Note(name='Bb'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.MELODIC_MINOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='A'), Note(name='B'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.HARMONIC_MINOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='Eb'), Note(name='F'), Note(name='G'), Note(name='Ab'), Note(name='B'))
s = Scale.create_from_root(root=Note(name='C'), scale_type=Scale.HARMONIC_MAJOR_TYPE)
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F'), Note(name='G'), Note(name='Ab'), Note(name='B'))
def test_create_from_root_with_from_root_octave():
s = Scale.create_from_root(root=Note(name='C', octave=3), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, octave=NoteList.OCTAVE_TYPE_FROM_ROOT)
assert s.notes[0] == Note(name='C', octave=3)
assert s.notes[1] == Note(name='D', octave=3)
assert s.notes[2] == Note(name='Eb', octave=3)
assert s.notes[3] == Note(name='F', octave=3)
assert s.notes[4] == Note(name='G', octave=3)
assert s.notes[5] == Note(name='Ab', octave=3)
assert s.notes[6] == Note(name='Bb', octave=3)
s = Scale.create_from_root(root=Note(name='F', octave=3), scale_type=Scale.MAJOR_TYPE, alt=Note.FLAT, octave=NoteList.OCTAVE_TYPE_FROM_ROOT, order=Scale.ASCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='F', octave=3)
assert s.notes[1] == Note(name='G', octave=3)
assert s.notes[2] == Note(name='A', octave=3)
assert s.notes[3] == Note(name='Bb', octave=3)
assert s.notes[4] == Note(name='C', octave=4)
assert s.notes[5] == Note(name='D', octave=4)
assert s.notes[6] == Note(name='E', octave=4)
s = Scale.create_from_root(root=Note(name='C', octave=3), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, octave=NoteList.OCTAVE_TYPE_FROM_ROOT, order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='Bb', octave=3)
assert s.notes[1] == Note(name='Ab', octave=3)
assert s.notes[2] == Note(name='G', octave=3)
assert s.notes[3] == Note(name='F', octave=3)
assert s.notes[4] == Note(name='Eb', octave=3)
assert s.notes[5] == Note(name='D', octave=3)
assert s.notes[6] == Note(name='C', octave=3)
s = Scale.create_from_root(root=Note(name='F', octave=3), scale_type=Scale.MAJOR_TYPE, alt=Note.FLAT, octave=NoteList.OCTAVE_TYPE_FROM_ROOT, order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='E', octave=4)
assert s.notes[1] == Note(name='D', octave=4)
assert s.notes[2] == Note(name='C', octave=4)
assert s.notes[3] == Note(name='Bb', octave=3)
assert s.notes[4] == Note(name='A', octave=3)
assert s.notes[5] == Note(name='G', octave=3)
assert s.notes[6] == Note(name='F', octave=3)
def test_create_from_root_with_callable_octave():
s = Scale.create_from_root(root=Note(name='C', octave=3), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, octave=lambda root_octave, i, distance: root_octave + 1)
assert s.notes[0] == Note(name='C', octave=3)
assert s.notes[1] == Note(name='D', octave=4)
assert s.notes[2] == Note(name='Eb', octave=4)
assert s.notes[3] == Note(name='F', octave=4)
assert s.notes[4] == Note(name='G', octave=4)
assert s.notes[5] == Note(name='Ab', octave=4)
assert s.notes[6] == Note(name='Bb', octave=4)
s = Scale.create_from_root(root=Note(name='C', octave=3), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, order=Scale.DESCENDING_SCALE_TYPE, octave=lambda root_octave, i, distance: root_octave + 1)
assert s.notes[0] == Note(name='Bb', octave=4)
assert s.notes[1] == Note(name='Ab', octave=4)
assert s.notes[2] == Note(name='G', octave=4)
assert s.notes[3] == Note(name='F', octave=4)
assert s.notes[4] == Note(name='Eb', octave=4)
assert s.notes[5] == Note(name='D', octave=4)
assert s.notes[6] == Note(name='C', octave=3)
def test_create_from_root_with_int_octave():
s = Scale.create_from_root(root=Note(name='C', octave=2), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, octave=2)
assert s.notes[0] == Note(name='C', octave=2)
assert s.notes[1] == Note(name='D', octave=2)
assert s.notes[2] == Note(name='Eb', octave=2)
assert s.notes[3] == Note(name='F', octave=2)
assert s.notes[4] == Note(name='G', octave=2)
assert s.notes[5] == Note(name='Ab', octave=2)
assert s.notes[6] == Note(name='Bb', octave=2) in s.notes
s = Scale.create_from_root(root=Note(name='C', octave=2), scale_type=Scale.MINOR_TYPE, alt=Note.FLAT, octave=2, order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='Bb', octave=2)
assert s.notes[1] == Note(name='Ab', octave=2)
assert s.notes[2] == Note(name='G', octave=2)
assert s.notes[3] == Note(name='F', octave=2)
assert s.notes[4] == Note(name='Eb', octave=2)
assert s.notes[5] == Note(name='D', octave=2)
assert s.notes[6] == Note(name='C', octave=2)
def test_create_from_root_with_none_octave():
s = Scale.create_from_root(root=Note(name='C', octave=4), scale_type=Scale.MAJOR_TYPE, alt=Note.FLAT, octave=None)
assert s.notes[0] == Note(name='C', octave=4)
assert s.notes[1] == Note(name='D', octave=4)
assert s.notes[2] == Note(name='E', octave=4)
assert s.notes[3] == Note(name='F', octave=4)
assert s.notes[4] == Note(name='G', octave=4)
assert s.notes[5] == Note(name='A', octave=4)
assert s.notes[6] == Note(name='B', octave=4)
s = Scale.create_from_root(root=Note(name='C'), order=Scale.DESCENDING_SCALE_TYPE)
assert s.notes[0] == Note(name='B')
assert s.notes[1] == Note(name='A')
assert s.notes[2] == Note(name='G')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='E')
assert s.notes[5] == Note(name='D')
assert s.notes[6] == Note(name='C')
def test_add_note():
s = Scale(Note(name='C'), Note(name='D'))
s.add_note(Note(name='E'))
assert len(s.notes) == 3
assert s.notes[2] == Note(name='E')
s.add_note(Note(name='Bb', octave=3))
assert s.notes[0] == Note(name='Bb', octave=3)
assert s.notes[1] == Note(name='C')
assert s.notes[2] == Note(name='D')
assert s.notes[3] == Note(name='E')
s = Scale.create_from_root(root=Note(name='C'))
s.add_note(Note(name='C', octave=5))
assert len(s.notes) == 8
assert s.notes[7] == Note(name='C', octave=5)
with pytest.raises(ScaleException) as exc:
s.add_note(note=Note(name='C'))
assert len(s.notes) == 8
assert 'Note C4 is alredy in Scale.' == str(exc.value)
with pytest.raises(ScaleException) as exc:
s.add_note(note=Note(name='E'))
assert len(s.notes) == 8
assert 'Note E4 is alredy in Scale.' == str(exc.value)
s.add_note(note=Note(name='E'), strict=False)
assert len(s.notes) == 8
s.add_note(note=Note(name='C'), strict=False)
assert len(s.notes) == 8
def test_remove_note():
s = Scale.create_from_root(root=Note(name='C'))
s.remove_note(note=Note(name='B'))
assert len(s.notes) == 6
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='G')
assert s.notes[5] == Note(name='A')
s = Scale.create_from_root(root=Note(name='C'))
s.remove_note(note=Note(name='A'))
assert len(s.notes) == 6
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='G')
assert s.notes[5] == Note(name='B')
def test_remove_note_strict():
s = Scale.create_from_root(root=Note(name='C'))
with pytest.raises(ScaleException) as exc:
s.remove_note(octave=4)
assert 'Invalid Scale.' == str(exc.value)
assert len(s.notes) == 7
assert s.notes[0] == Note(name='C')
assert s.notes[1] == Note(name='D')
assert s.notes[2] == Note(name='E')
assert s.notes[3] == Note(name='F')
assert s.notes[4] == Note(name='G')
assert s.notes[5] == Note(name='A')
assert s.notes[6] == Note(name='B')
s = Scale(Note(name='C'))
with pytest.raises(ScaleException) as exc:
s.remove_note(name='C')
assert 'Invalid Scale.' == str(exc.value)
assert len(s.notes) == 1
assert s.notes[0] == Note(name='C')
with pytest.raises(AttributeError) as exc:
Scale(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(note='invalid')
assert "'str' object has no attribute 'freq'" == str(exc.value)
with pytest.raises(ScaleException) as exc:
Scale(strict=False).remove_note(note='invalid')
assert 'Invalid Scale.' == str(exc.value)
def test_remove_note_strict_false():
s = Scale(Note(name='C'))
s.remove_note(note=Note(name='C'), strict=False)
assert len(s.notes) == 0
c = Scale(Note(name='E'))
c.remove_note(note=Note(name='E'), strict=False)
assert len(c.notes) == 0
try:
Scale(Note(name='C'), Note(name='D'), Note(name='E')).remove_note(note=Note(name='A'), strict=False)
Scale(Note(name='C'), Note(name='D'), Note(name='E')).remove_note(octave=3, strict=False)
Scale(Note(name='C'), Note(name='D'), Note(name='E')).remove_note(octave=4, strict=False)
Scale(Note(name='C'), Note(name='D'), Note(name='E')).remove_note(freq=12, strict=False)
except ScaleException:
pytest.fail('Unexpected ScaleException')
with pytest.raises(AttributeError) as exc:
Scale(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(note='invalid', strict=False)
assert "'str' object has no attribute 'freq'" == str(exc.value)
def test_str():
s = Scale.create_from_root(root=Note(name='C'))
assert 'C4,D4,E4,F4,G4,A4,B4' == str(s)
s = Scale.create_from_root(root=Note(name='C'), order=Scale.DESCENDING_SCALE_TYPE)
assert 'B4,A4,G4,F4,E4,D4,C4' == str(s)
def test_repr():
s = eval(repr(Scale.create_from_root(root=Note(name='C'))))
assert s == Scale.create_from_root(root=Note(name='C'))
s = eval(repr(Scale.create_from_root(root=Note(name='F'))))
assert s == Scale.create_from_root(root=Note(name='F'))
s = eval(repr(Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F#'), Note(name='G#'), Note(name='A#'))))
assert s == Scale(Note(name='C'), Note(name='D'), Note(name='E'), Note(name='F#'), Note(name='G#'), Note(name='A#'))
s = eval(repr(Scale.create_from_root(root=Note(name='C'), strict=True)))
assert s.strict is True
s = eval(repr(Scale.create_from_root(root=Note(name='C'), strict=False)))
assert s.strict is False | {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,289 | iskyd/babs | refs/heads/master | /babs/exceptions/note_list_exception.py | class NoteListException(Exception):
pass
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,290 | iskyd/babs | refs/heads/master | /babs/rest.py | from __future__ import division
class Rest:
"""
Interval of silence
"""
def __init__(self, duration=4/4):
"""
:param duration: relative duration of the rest
"""
self.duration = duration
def __eq__(self, other):
return self.duration == other.duration
def __ne__(self, other):
return self.duration != other.duration
def __lt__(self, other):
return self.duration < other.duration
def __le__(self, other):
return self.duration <= other.duration
def __gt__(self, other):
return self.duration > other.duration
def __ge__(self, other):
return self.duration >= other.duration
def __repr__(self):
return "Rest(duration={})".format(self.duration)
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,291 | iskyd/babs | refs/heads/master | /tests/test_note.py | from __future__ import division
import pytest
from babs import Note
from babs.exceptions import NoteException
def test_create_with_no_arguments():
with pytest.raises(NoteException) as exc:
Note()
assert "Can't create a note without frequency or name." == str(exc.value)
def test_create_with_invalid_note_name():
with pytest.raises(NoteException) as exc:
Note(name='S')
assert 'Invalid note.' == str(exc.value)
with pytest.raises(NoteException) as exc:
Note(name=3)
assert 'Invalid note.' == str(exc.value)
def test_create_with_invalid_freq():
with pytest.raises(ValueError):
Note(freq='string')
def test_create_with_name():
n = Note(name='A')
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
n = Note(name='B')
assert n.name == 'B'
assert n.freq == 493.88
assert n.octave == 4
n = Note(name='B', octave=3)
assert n.name == 'B'
assert n.freq == 246.94
assert n.octave == 3
n = Note(name='D#', octave=5)
assert n.name == 'D#'
assert n.freq == 622.25
assert n.octave == 5
def test_create_with_freq():
n = Note(freq=440)
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
n = Note(freq=246.94)
assert n.name == 'B'
assert n.freq == 246.94
assert n.octave == 3
n = Note(freq=466.16)
assert n.name == 'A#'
assert n.freq == 466.16
assert n.octave == 4
n = Note(freq=466.16, alt=Note.FLAT)
assert n.name == 'Bb'
assert n.freq == 466.16
assert n.octave == 4
n = Note(freq=466.16, alt=Note.SHARP)
assert n.name == 'A#'
assert n.freq == 466.16
assert n.octave == 4
n = Note(freq=440, name='B', octave=2)
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
def test_eq():
assert Note(freq=440) == Note(freq=440)
assert not Note(freq=440) == Note(freq=440, duration=1/8)
assert Note(freq=440, duration=1/8) == Note(freq=440, duration=1/8)
assert not Note(freq=440) == Note(freq=880)
def test_neq():
assert Note(freq=440) != Note(freq=880)
assert Note(freq=440) != Note(freq=440, duration=1/8)
assert not Note(freq=440) != Note(freq=440)
assert not Note(freq=440, duration=1/8) != Note(freq=440, duration=1/8)
def test_change_alt():
n = Note(freq=466.16, alt=Note.SHARP)
assert n.name == 'A#'
assert n.freq == 466.16
assert n.octave == 4
n.alt = Note.FLAT
assert n.name == 'Bb'
assert n.freq == 466.16
assert n.octave == 4
n.alt = 'random string'
assert n.name == 'A#'
assert n.freq == 466.16
assert n.octave == 4
def test_change_freq():
n = Note(freq=440)
n.freq = 220
assert n.name == 'A'
assert n.freq == 220
assert n.octave == 3
n.freq = 622.25
assert n.name == 'D#'
assert n.freq == 622.25
assert n.octave == 5
with pytest.raises(ValueError):
n.freq = 'string'
def test_pitch_shift():
n = Note(freq=440)
n.pitch_shift(value=53.88)
assert n.name == 'B'
assert n.freq == 493.88
assert n.octave == 4
n.pitch_shift(value=-53.88)
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
n.pitch_shift(value=0)
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
# Half step #
n = Note(freq=440)
n.pitch_shift(value=2, half_step=True)
assert n.name == 'B'
assert n.freq == 493.88
assert n.octave == 4
n.pitch_shift(value=-2, half_step=True)
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
n.pitch_shift(value=0, half_step=True)
assert n.name == 'A'
assert n.freq == 440
assert n.octave == 4
n.alt = Note.SHARP
n.pitch_shift(value=1, half_step=True, alt=Note.FLAT)
assert n.name == 'Bb'
assert n.freq == 466.16
assert n.octave == 4
n.pitch_shift(value=0, half_step=True, alt=Note.SHARP)
assert n.name == 'A#'
assert n.freq == 466.16
assert n.octave == 4
# Octave #
n = Note(freq=440)
n.pitch_shift(value=1, octave=True)
assert n.name == 'A'
assert n.freq == 880
assert n.octave == 5
n.pitch_shift(value=-2, octave=True)
assert n.name == 'A'
assert n.freq == 220
assert n.octave == 3
n.pitch_shift(value=0, octave=True)
assert n.name == 'A'
assert n.freq == 220
assert n.octave == 3
n = Note(freq=466.16)
n.pitch_shift(value=1, octave=True, alt=Note.FLAT)
assert n.name == 'Bb'
assert n.freq == 932.32
assert n.octave == 5
n.pitch_shift(value=-1, octave=True, alt=Note.SHARP)
assert n.name == 'A#'
assert n.freq == 466.16
assert n.octave == 4
with pytest.raises(TypeError):
Note(freq=440).pitch_shift()
with pytest.raises(TypeError):
Note(freq=440).pitch_shift(value='string')
with pytest.raises(TypeError):
Note(freq=440).pitch_shift(value='string', half_step=True)
with pytest.raises(TypeError):
Note(freq=440).pitch_shift(value='string', octave=True)
def test_str():
assert str(Note(freq=440)) == 'A4'
assert str(Note(freq=932.32, alt=Note.FLAT)) == 'Bb5'
def test_repr():
n = eval(repr(Note(name='A')))
assert n.freq == 440
assert n.name == 'A'
assert n.octave == 4
assert n.duration == 4/4
n = eval(repr(Note(freq=932.32, alt=Note.FLAT, duration=1/8)))
assert n.freq == 932.32
assert n.name == 'Bb'
assert n.octave == 5
assert n.duration == 1/8
def test_value():
n = Note(name='A')
assert n.duration == 4/4
n.duration = 1/8
assert n.duration == 1/8
assert Note(name='A', duration=1/16).duration == 1/16
def test_get_note_name_by_index():
assert Note.get_note_name_by_index(0) == 'C'
assert Note.get_note_name_by_index(1) == 'C#'
assert Note.get_note_name_by_index(1, alt=Note.FLAT) == 'Db'
assert Note.get_note_name_by_index(12) == 'C'
assert Note.get_note_name_by_index(26) == 'D'
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,292 | iskyd/babs | refs/heads/master | /tests/test_note_list.py | import pytest
from babs import NoteList, Note
from babs.exceptions import NoteListException
class Mock(NoteList):
pass
def test_create():
m = Mock(Note(name='C'), Note(name='D'), Note(name='E'))
assert Note(name='C') in m.notes
assert Note(name='D') in m.notes
assert Note(name='E') in m.notes
assert len(m.notes) == 3
m = Mock(Note(name='C'), Note(name='C'), Note(name='D'), Note(name='C'))
assert len(m.notes) == 4
assert Note(name='C') in m.notes
assert Note(name='D') in m.notes
with pytest.raises(NoteListException) as exc:
Mock('invalid')
assert 'Invalid Mock.' == str(exc.value)
with pytest.raises(NoteListException) as exc:
Mock(Note(name='C'), 'invalid')
assert 'Invalid Mock.' == str(exc.value)
with pytest.raises(NoteListException) as exc:
Mock()
assert 'Invalid Mock.' == str(exc.value)
try:
Mock(Note(name='C'), 'invalid', strict=False)
Mock('invalid', strict=False)
Mock(strict=False)
except NoteListException:
pytest.fail('Unexpected NoteListException')
def test_order_notes():
m = Mock(Note(name='C'), Note(name='D'))
assert m.notes[0] == Note(name='C')
assert m.notes[1] == Note(name='D')
m = Mock(Note(name='E'), Note(name='D', octave=3), strict=True)
assert m.notes[0] == Note(name='E')
assert m.notes[1] == Note(name='D', octave=3)
m = Mock(Note(name='E'), Note(name='D', octave=3), Note(name='E'), strict=True)
assert m.notes[0] == Note(name='E')
assert m.notes[1] == Note(name='D', octave=3)
assert m.notes[2] == Note(name='E')
def test_eq():
assert \
Mock(Note(name='C'), Note(name='D'), Note(name='C')) == \
Mock(Note(name='C'), Note(name='D'), Note(name='C'))
assert \
Mock(Note(name='C'), Note(name='D'), Note(name='E')) == \
Mock(Note(name='C'), Note(name='D'), Note(name='E'))
assert not \
Mock(Note(name='C'), Note(name='D'), Note(name='C')) == \
Mock(Note(name='C'), Note(name='C'), Note(name='D'))
assert not \
Mock(Note(name='C'), Note(name='D'), Note(name='C')) == \
Mock(Note(name='C', octave=5), Note(name='D'), Note(name='C'))
def test_neq():
assert \
Mock(Note(name='C'), Note(name='D'), Note(name='D')) != \
Mock(Note(name='C'), Note(name='D'), Note(name='C'))
assert \
Mock(Note(name='C'), Note(name='E'), Note(name='G')) != \
Mock(Note(name='C'), Note(name='D'), Note(name='E'))
assert \
Mock(Note(name='C'), Note(name='E'), Note(name='G')) != \
Mock(Note(name='C', octave=5), Note(name='E'), Note(name='G'))
assert not \
Mock(Note(name='C'), Note(name='D'), Note(name='C')) != \
Mock(Note(name='C'), Note(name='D'), Note(name='C'))
def test_str():
m = Mock(Note(name='C'), Note(name='E'), Note(name='G'))
assert 'C4' in str(m).split(',')
assert 'E4' in str(m).split(',')
assert 'G4' in str(m).split(',')
m.add_note(note=Note(name='C', octave=5))
assert 'C4' in str(m).split(',')
assert 'E4' in str(m).split(',')
assert 'G4' in str(m).split(',')
assert 'C5' in str(m).split(',')
def test_repr():
m = eval(repr(Mock(Note(name='C'), Note(name='D'), Note(name='E'))))
assert m == Mock(Note(name='C'), Note(name='D'), Note(name='E'))
assert m.notes[0] == Note(name='C')
assert m.notes[1] == Note(name='D')
assert m.notes[2] == Note(name='E')
m = eval(repr(Mock(Note(name='A'), Note(name='Bb'), Note(name='C', octave=3))))
assert m == Mock(Note(name='A'), Note(name='Bb'), Note(name='C', octave=3))
assert m.notes[0] == Note(name='A')
assert m.notes[1] == Note(name='Bb')
assert m.notes[2] == Note(name='C', octave=3)
m = eval(repr(Mock(Note(name='C'), Note(name='G'), Note(name='C'), Note(name='C', octave=5))))
assert m == Mock(Note(name='C'), Note(name='G'), Note(name='C'), Note(name='C', octave=5))
assert m.notes[0] == Note(name='C')
assert m.notes[1] == Note(name='G')
assert m.notes[2] == Note(name='C')
assert m.notes[3] == Note(name='C', octave=5)
m = eval(repr(Mock(Note(name='C'), Note(name='D'), strict=True)))
assert m.strict is True
m = eval(repr(Mock(Note(name='C'), Note(name='D'), strict=False)))
assert m.strict is False
def test_is_valid():
assert Mock(strict=False).is_valid() is False
assert Mock(Note(name='C'), strict=False).is_valid() is True
assert Mock('a', 'b', strict=False).is_valid() is False
assert Mock(Note(name='C'), Note(name='E')).is_valid() is True
assert Mock(Note(name='C'), Note(name='E'), strict=False).is_valid() is True
assert Mock(strict=False).is_valid() is False
def test_add_note():
m = Mock(Note(name='C'), Note(name='D'), Note(name='E'))
m.add_note(note=Note(name='G'))
assert len(m.notes) == 4
assert Note(name='G') in m.notes
m.add_note(note=Note(name='C'))
assert len(m.notes) == 5
m.add_note(note=Note(name='A', octave=5))
assert len(m.notes) == 6
assert Note(name='A', octave=5) in m.notes
def test_add_invalid_note():
m = Mock(Note(name='C'), Note(name='D'), Note(name='E'))
with pytest.raises(NoteListException) as exc:
m.add_note(note=1)
assert 'Invalid note given.' == str(exc.value)
with pytest.raises(NoteListException) as exc:
m.add_note(note='invalid')
assert 'Invalid note given.' == str(exc.value)
m.add_note(note='invalid', strict=False)
assert len(m.notes) == 4
with pytest.raises(AttributeError) as exc:
assert "'str' object has no attribute 'freq'" in m.notes
def test_remove_note_by_note():
m = Mock(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='C', octave=5))
m.remove_note(note=Note(name='C', octave=5))
assert len(m.notes) == 3
assert Note(name='C') in m.notes
assert Note(name='G') in m.notes
assert Note(name='E') in m.notes
assert Note(name='C', octave=5) not in m.notes
m.remove_note(note=Note(name='E'))
assert len(m.notes) == 2
assert Note(name='G') in m.notes
assert Note(name='C') in m.notes
assert Note(name='E') not in m.notes
with pytest.raises(AttributeError) as exc:
m.remove_note(note='a')
assert "'str' object has no attribute 'freq'" == str(exc.value)
m = Mock(Note(name='C'))
with pytest.raises(NoteListException) as exc:
m.remove_note(note=Note(name='C'))
assert 'Invalid Mock.' == str(exc.value)
assert len(m.notes) == 1
assert Note(name='C') in m.notes
def remove_note_by_note_not_strict():
m = Mock('a', 'b')
m.remove_note(note='a', strict=False)
assert len(m.notes) == 1
assert 'b' in m.notes
assert 'a' not in m.notes
m = Mock(Note(name='C'), Note(name='E'))
m.remove_note(note=Note(name='C'), strict=False)
assert len(m.notes) == 1
assert Note(name='E') in m.notes
m.remove_note(name='G', strict=False)
assert len(m.notes) == 1
assert Note(name='E') in m.notes
try:
m = Mock(Note(name='C'))
m.remove_note(note=Note(name='C'), strict=False)
except:
pytest.fail('Unexpected NoteListException')
assert len(m) == 0
def test_remove_note_by_freq():
m = Mock(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='C', octave=5))
m.remove_note(freq=523.25) # C5
assert len(m.notes) == 3
assert Note(name='C') in m.notes
assert Note(name='E') in m.notes
assert Note(name='G') in m.notes
assert Note(name='C', octave=5) not in m.notes
m.remove_note(freq=329.63)
assert len(m.notes) == 2
assert Note(name='C') in m.notes
assert Note(name='G') in m.notes
assert Note(name='E') not in m.notes
assert Note(name='C', octave=5) not in m.notes
m = Mock('a', 'b', strict=False)
with pytest.raises(AttributeError) as exc:
m.remove_note(freq=261.63)
assert len(m.notes) == 2
assert "'str' object has no attribute 'freq'" == str(exc.value)
m = Mock(Note(name='A'))
with pytest.raises(NoteListException) as exc:
m.remove_note(freq=440)
assert 'Invalid Mock.' == str(exc.value)
assert len(m.notes) == 1
assert Note(name='A') in m.notes
def test_remove_note_by_freq_not_strict():
m = Mock(Note(name='C'), Note(name='E'))
m.remove_note(freq=261.63, strict=False)
assert len(m.notes) == 1
assert Note(name='E') in m.notes
m.remove_note(freq=440, strict=False)
assert len(m.notes) == 1
assert Note(name='E') in m.notes
try:
m = Mock(Note(freq=440))
m.remove_note(freq=440, strict=False)
except:
pytest.fail('Unexpected NoteListException')
assert len(m.notes) == 0
def test_remove_note_by_name():
m = Mock(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='B'), Note(name='C', octave=5))
m.remove_note(name='C')
assert len(m.notes) == 3
assert Note(name='E') in m.notes
assert Note(name='G') in m.notes
assert Note(name='B') in m.notes
assert Note(name='C') not in m.notes
m.remove_note(name='B')
assert len(m.notes) == 2
assert Note(name='E') in m.notes
assert Note(name='G') in m.notes
assert Note(name='B') not in m.notes
m = Mock(Note(name='A'))
with pytest.raises(NoteListException) as exc:
m.remove_note(name='A')
assert 'Invalid Mock.' == str(exc.value)
assert len(m.notes) == 1
assert Note(name='A') in m.notes
def test_remove_note_by_name_not_strict():
m = Mock(Note(name='C'), Note(name='E'))
m.remove_note(name='C', strict=False)
assert len(m.notes) == 1
assert Note(name='E') in m.notes
m.remove_note(name='G', strict=False)
assert len(m.notes) == 1
assert Note(name='E') in m.notes
try:
m = Mock(Note(name='A'))
m.remove_note(name='A', strict=False)
except:
pytest.fail('Unexpected NoteListException')
assert len(m.notes) == 0
def test_remove_note_by_octave():
m = Mock(Note(name='B', octave=3), Note(name='C'), Note(name='E'), Note(name='G'), Note(name='C', octave=5))
m.remove_note(octave=5)
assert len(m.notes) == 4
assert Note(name='C') in m.notes
assert Note(name='E') in m.notes
assert Note(name='G') in m.notes
assert Note(name='B', octave=3) in m.notes
assert Note(name='C', octave=5) not in m.notes
m.remove_note(octave=3)
assert len(m.notes) == 3
assert Note(name='C') in m.notes
assert Note(name='E') in m.notes
assert Note(name='G') in m.notes
assert Note(name='B', octave=3) not in m.notes
m = Mock(Note(name='A'), Note(name='B'))
with pytest.raises(NoteListException) as exc:
m.remove_note(octave=4)
assert 'Invalid Mock.' == str(exc.value)
assert len(m.notes) == 2
assert Note(name='A') in m.notes
assert Note(name='B') in m.notes
def test_remove_note_by_octave_not_strict():
m = Mock(Note(name='C'), Note(name='C', octave=5))
m.remove_note(octave=4, strict=False)
assert len(m.notes) == 1
assert Note(name='C', octave=5) in m.notes
m.remove_note(octave=3, strict=False)
assert len(m.notes) == 1
assert Note(name='C', octave=5) in m.notes
try:
m = Mock(Note(name='A'), Note(name='B'))
m.remove_note(octave=4, strict=False)
except:
pytest.fail('Unexpected NoteListException')
assert len(m.notes) == 0
def test_remove_note_none():
m = Mock(Note(name='C'), Note(name='C', octave=5))
m.remove_note()
assert len(m.notes) == 2
m = Mock(Note(name='C'), Note(name='D'), Note(name='E'))
m.remove_note()
assert len(m.notes) == 3
def test_get_notes_from_root():
notes = Mock.get_notes_from_root(root=Note(name='C'), note_list_type=[3, 5, 7, 10])
assert Note(name='C') in notes
assert Note(name='Eb') in notes
assert Note(name='F') in notes
assert Note(name='G') in notes
assert Note(name='Bb') in notes
notes = Mock.get_notes_from_root(root=Note(name='C'), note_list_type=[2, 4, 7, 9])
assert Note(name='C') in notes
assert Note(name='D') in notes
assert Note(name='E') in notes
assert Note(name='G') in notes
assert Note(name='A') in notes
def test_get_notes_from_root_with_none_note_list_type():
with pytest.raises(NoteListException) as exc:
Mock.get_notes_from_root(root=Note(name='C'))
assert 'Invalid note list type' == str(exc.value)
with pytest.raises(NoteListException) as exc:
Mock.get_notes_from_root(root=Note(name='C'), alt=Note.FLAT, octave=4)
assert 'Invalid note list type' == str(exc.value)
def test_get_notes_from_root_with_alt():
m = Mock(*Mock.get_notes_from_root(root=Note(name='C'), note_list_type=[3, 5, 7, 10], alt=Note.FLAT))
assert 'Eb4' in str(m).split(',')
assert 'Bb4' in str(m).split(',')
m = Mock(*Mock.get_notes_from_root(root=Note(name='C'), note_list_type=[3, 5, 7, 10], alt=Note.SHARP))
assert 'D#4' in str(m).split(',')
assert 'A#4' in str(m).split(',')
def test_get_notes_from_root_with_from_root_octave():
notes = Mock.get_notes_from_root(root=Note(name='C', octave=4), note_list_type=[3, 5, 7, 10], octave='from_root')
assert Note(name='C', octave=4) in notes
assert Note(name='Eb', octave=4) in notes
assert Note(name='F', octave=4) in notes
assert Note(name='G', octave=4) in notes
assert Note(name='Bb', octave=4) in notes
notes = Mock.get_notes_from_root(root=Note(name='F', octave=3), note_list_type=[3, 5, 7, 10], octave='from_root')
assert Note(name='F', octave=3) in notes
assert Note(name='Ab', octave=3) in notes
assert Note(name='Bb', octave=3) in notes
assert Note(name='C', octave=4) in notes
assert Note(name='Eb', octave=4) in notes
def test_get_notes_from_root_with_int_octave():
notes = Mock.get_notes_from_root(root=Note(name='C', octave=4), note_list_type=[3, 5, 7, 10], octave=4)
assert Note(name='C', octave=4) in notes
assert Note(name='Eb', octave=4) in notes
assert Note(name='F', octave=4) in notes
assert Note(name='G', octave=4) in notes
assert Note(name='Bb', octave=4) in notes
notes = Mock.get_notes_from_root(root=Note(name='C', octave=3), note_list_type=[3, 5, 7, 10], octave=4)
assert Note(name='C', octave=3) in notes
assert Note(name='Eb', octave=4) in notes
assert Note(name='F', octave=4) in notes
assert Note(name='G', octave=4) in notes
assert Note(name='Bb', octave=4) in notes
def test_get_notes_from_root_with_callable_octave():
notes = Mock.get_notes_from_root(root=Note(name='C', octave=3), note_list_type=[3, 7], octave=lambda root_octave, i, distance: root_octave + 1)
assert Note(name='C', octave=3) in notes
assert Note(name='Eb', octave=4) in notes
assert Note(name='G', octave=4) in notes
notes = Mock.get_notes_from_root(root=Note(name='C', octave=3), note_list_type=[3, 7], octave=lambda root_octave, i, distance: i + 1)
assert Note(name='C', octave=3) in notes
assert Note(name='Eb', octave=1) in notes
assert Note(name='G', octave=2) in notes
with pytest.raises(TypeError):
notes = Mock.get_notes_from_root(root=Note(name='C', octave=3), note_list_type=[3, 7], octave=lambda: 'invalid')
def test_get_notes_from_root_with_invalid_octave():
notes = Mock.get_notes_from_root(root=Note(name='C'), note_list_type=[3, 5, 7, 10], octave='invalid')
assert Note(name='C', octave=4) in notes
assert Note(name='Eb', octave=4) in notes
assert Note(name='F', octave=4) in notes
assert Note(name='G', octave=4) in notes
assert Note(name='Bb', octave=4) in notes
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,293 | iskyd/babs | refs/heads/master | /babs/exceptions/scale_exception.py | from babs.exceptions import NoteException
class ScaleException(NoteException):
pass
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,294 | iskyd/babs | refs/heads/master | /tests/test_chord.py | import pytest
from babs import Note, Chord
from babs.exceptions import ChordException
def test_create():
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'))
assert Note(name='C') in c.notes
assert Note(name='E') in c.notes
assert Note(name='G') in c.notes
assert len(c.notes) == 3
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='C'))
assert Note(name='C') in c.notes
assert Note(name='E') in c.notes
assert Note(name='G') in c.notes
assert len(c.notes) == 4
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='C', octave=5))
assert Note(name='C') in c.notes
assert Note(name='E') in c.notes
assert Note(name='G') in c.notes
assert Note(name='C', octave=5) in c.notes
assert len(c.notes) == 4
with pytest.raises(ChordException) as exc:
Chord()
assert 'Invalid Chord.' == str(exc.value)
with pytest.raises(ChordException) as exc:
Chord(Note(name='C'))
assert 'Invalid Chord.' == str(exc.value)
with pytest.raises(ChordException) as exc:
Chord('invalid')
assert 'Invalid Chord.' == str(exc.value)
with pytest.raises(ChordException) as exc:
Chord('invalid', 'invalid')
assert 'Invalid Chord.' == str(exc.value)
def test_order_notes():
c = Chord(Note(name='C'), Note(name='E'))
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='E')
c = Chord(Note(name='C'), Note(name='E', octave=3), strict=True)
assert c.notes[0] == Note(name='E', octave=3)
assert c.notes[1] == Note(name='C')
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'), strict=True)
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='E')
assert c.notes[2] == Note(name='G')
c = Chord(Note(name='A', octave=3), Note(name='F'), Note(name='C', octave=3), Note(name='E', octave=2), Note(name='E'))
assert c.notes[0] == Note(name='E', octave=2)
assert c.notes[1] == Note(name='C', octave=3)
assert c.notes[2] == Note(name='A', octave=3)
assert c.notes[3] == Note(name='E')
assert c.notes[4] == Note(name='F')
def test_create_not_strict():
try:
assert len(Chord(strict=False).notes) == 0
assert len(Chord(Note(name='C'), strict=False).notes) == 1
c = Chord('a', 'b', strict=False)
assert len(c.notes) == 2
assert 'a' in c.notes
assert 'b' in c.notes
except ChordException:
pytest.fail("Unexpected ChordException")
def test_set_notes_attribute():
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'))
with pytest.raises(AttributeError) as exc:
c.notes = [Note(name='B')]
assert "can't set attribute" == str(exc.value)
def test_is_valid():
assert Chord(strict=False).is_valid() is False
assert Chord(Note(name='C'), strict=False).is_valid() is False
assert Chord('a', 'b', strict=False).is_valid() is False
assert Chord(Note(name='C'), Note(name='E')).is_valid() is True
c = Chord(Note(name='C'), Note(name='E'))
assert c.is_valid() is True
assert Chord(Note(name='C'), strict=False).is_valid() is False
def test_create_from_root():
c = Chord.create_from_root(root=Note(name='C'))
assert len(c.notes) == 3
assert Note(name='C') in c.notes
assert Note(name='E') in c.notes
assert Note(name='G') in c.notes
c = Chord.create_from_root(root=Note(name='G'))
assert len(c.notes) == 3
assert Note(name='G') in c.notes
assert Note(name='B') in c.notes
assert Note(name='D') in c.notes
c = Chord.create_from_root(root=Note(name='G', octave=3))
assert Note(name='G', octave=3) in c.notes
assert Note(name='B', octave=3) in c.notes
assert Note(name='D', octave=3) in c.notes
assert Chord.create_from_root(root=Note(name='C')).strict is True
assert Chord.create_from_root(root=Note(name='C'), strict=True).strict is True
assert Chord.create_from_root(root=Note(name='C'), strict=False).strict is False
def test_create_from_root_with_chord_type():
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MAJOR_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MAJOR_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='B'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MINOR_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='G'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MINOR_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='G'), Note(name='Bb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.DOMINANT_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='Bb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MINOR_MAJOR_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='G'), Note(name='B'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.HALF_DIMINISHED_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='Gb'), Note(name='Bb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.DIMINISHED_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='Gb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.DIMINISHED_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='Gb'), Note(name='A'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.AUGMENTED_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G#'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.AUGMENTED_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G#'), Note(name='Bb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.AUGMENTED_MAJOR_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G#'), Note(name='B'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MAJOR_SIXTH_TYPE)
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G'), Note(name='A'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MINOR_SIXTH_TYPE)
assert c == Chord(Note(name='C'), Note(name='Eb'), Note(name='G'), Note(name='A'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.SUS4_TYPE)
assert c == Chord(Note(name='C'), Note(name='F'), Note(name='G'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.SUS4_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='F'), Note(name='G'), Note(name='Bb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.SUS4_MAJOR_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='F'), Note(name='G'), Note(name='B'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.SUS2_TYPE)
assert c == Chord(Note(name='C'), Note(name='D'), Note(name='G'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.SUS2_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='D'), Note(name='G'), Note(name='Bb'))
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.SUS2_MAJOR_SEVEN_TYPE)
assert c == Chord(Note(name='C'), Note(name='D'), Note(name='G'), Note(name='B'))
def test_create_from_root_with_alt():
c = Chord.create_from_root(root=Note(name='C', alt=Note.SHARP), chord_type=Chord.MINOR_TYPE)
assert 'D#4' in str(c).split(',')
c = Chord.create_from_root(root=Note(name='C', alt=Note.FLAT), chord_type=Chord.MINOR_TYPE, alt=Note.FLAT)
assert 'Eb4' in str(c).split(',')
def test_create_from_root_with_from_root_octave():
c = Chord.create_from_root(root=Note(name='G', octave=3), octave='from_root')
assert Note(name='G', octave=3) in c.notes
assert Note(name='B', octave=3) in c.notes
assert Note(name='D', octave=4) in c.notes
c = Chord.create_from_root(root=Note(name='F', octave=4), octave='from_root')
assert Note(name='F', octave=4) in c.notes
assert Note(name='A', octave=4) in c.notes
assert Note(name='C', octave=5) in c.notes
def test_create_from_root_with_int_octave():
c = Chord.create_from_root(root=Note(name='G', octave=3), octave=4)
assert Note(name='G', octave=3) in c.notes
assert Note(name='B', octave=4) in c.notes
assert Note(name='D', octave=4) in c.notes
c = Chord.create_from_root(root=Note(name='F', octave=4), octave=3)
assert Note(name='F', octave=4) in c.notes
assert Note(name='A', octave=3) in c.notes
assert Note(name='C', octave=3) in c.notes
def test_create_from_root_with_callable_octave():
c = Chord.create_from_root(root=Note(name='G', octave=3), octave=lambda root_octave, i, distance: i + 1)
assert Note(name='G', octave=3) in c.notes
assert Note(name='B', octave=1) in c.notes
assert Note(name='D', octave=2) in c.notes
c = Chord.create_from_root(root=Note(name='F', octave=4), octave=lambda root_octave, i, distance: root_octave + i + 1)
assert Note(name='F', octave=4) in c.notes
assert Note(name='A', octave=5) in c.notes
assert Note(name='C', octave=6) in c.notes
with pytest.raises(TypeError):
Chord.create_from_root(root=Note(name='F', octave=4), octave=lambda: 'invalid')
def test_create_from_root_with_invalid_octave():
c = Chord.create_from_root(root=Note(name='C'), octave='invalid')
assert Note(name='C', octave=4) in c.notes
assert Note(name='E', octave=4) in c.notes
assert Note(name='G', octave=4) in c.notes
def test_add_note_order():
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'))
c.add_note(note=Note(name='Bb'))
assert len(c.notes) == 4
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='E')
assert c.notes[2] == Note(name='G')
assert c.notes[3] == Note(name='Bb')
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'))
c.add_note(note=Note(name='C', octave=3))
assert len(c.notes) == 4
assert c.notes[0] == Note(name='C', octave=3)
assert c.notes[1] == Note(name='C')
assert c.notes[2] == Note(name='E')
assert c.notes[3] == Note(name='G')
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'))
c.add_note(note=Note(name='C', octave=3))
c.add_note(note=Note(name='G', octave=3))
assert len(c.notes) == 5
assert c.notes[0] == Note(name='C', octave=3)
assert c.notes[1] == Note(name='G', octave=3)
assert c.notes[2] == Note(name='C')
assert c.notes[3] == Note(name='E')
assert c.notes[4] == Note(name='G')
def test_add_note_strict():
with pytest.raises(ChordException) as exc:
Chord(Note(name='C'), Note(name='E'), Note(name='G')).add_note(note='invalid')
assert 'Invalid note given.' == str(exc.value)
with pytest.raises(ChordException) as exc:
Chord(strict=False).add_note(note='invalid')
assert 'Invalid note given.' == str(exc.value)
def test_add_note_strict_false():
try:
Chord(Note(name='C'), Note(name='E'), Note(name='G')).add_note(note='invalid', strict=False)
Chord(strict=False).add_note(note='invalid', strict=False)
except ChordException:
pytest.fail('Unexpected ChordException')
def test_remove_note():
c = Chord.create_from_root(root=Note(name='C'))
c.remove_note(note=Note(name='E'))
assert len(c.notes) == 2
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='G')
c = Chord.create_from_root(root=Note(name='C'), chord_type=Chord.MINOR_TYPE)
c.remove_note(note=Note(name='Eb'))
assert len(c.notes) == 2
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='G')
def test_remove_note_strict():
c = Chord(Note(name='C'), Note(name='D'))
with pytest.raises(ChordException) as exc:
c.remove_note(note=Note(name='D'))
assert len(c.notes) == 2
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='D')
assert 'Invalid Chord.' == str(exc.value)
c = Chord(Note(name='C'), Note(name='E'))
with pytest.raises(ChordException) as exc:
c.remove_note(note=Note(name='C'))
assert len(c.notes) == 2
assert c.notes[0] == Note(name='C')
assert c.notes[1] == Note(name='E')
assert 'Invalid Chord.' == str(exc.value)
with pytest.raises(AttributeError) as exc:
Chord(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(note='invalid')
assert "'str' object has no attribute 'freq'" == str(exc.value)
with pytest.raises(ChordException) as exc:
Chord(strict=False).remove_note(note='invalid')
assert 'Invalid Chord.' == str(exc.value)
def test_remove_note_strict_false():
c = Chord(Note(name='C'), Note(name='D'))
c.remove_note(note=Note(name='D'), strict=False)
assert len(c.notes) == 1
assert c.notes[0] == Note(name='C')
c = Chord(Note(name='C'), Note(name='E'))
c.remove_note(note=Note(name='C'), strict=False)
assert len(c.notes) == 1
assert c.notes[0] == Note(name='E')
try:
Chord(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(note=Note(name='A'), strict=False)
Chord(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(octave=3, strict=False)
Chord(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(octave=4, strict=False)
Chord(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(freq=12, strict=False)
except ChordException:
pytest.fail('Unexpected ChordException')
with pytest.raises(AttributeError) as exc:
Chord(Note(name='C'), Note(name='E'), Note(name='G')).remove_note(note='invalid', strict=False)
assert "'str' object has no attribute 'freq'" == str(exc.value)
def test_str():
c = Chord(Note(name='C'), Note(name='E'), Note(name='G'))
assert 'C4,E4,G4' == str(c)
c.add_note(note=Note(name='C', octave=5))
assert 'C4,E4,G4,C5' == str(c)
def test_repr():
c = eval(repr(Chord(Note(name='C'), Note(name='E'), Note(name='G'))))
assert c == Chord(Note(name='C'), Note(name='E'), Note(name='G'))
c = eval(repr(Chord(Note(name='A', octave=3), Note(name='C'), Note(name='E'))))
assert c == Chord(Note(name='A', octave=3), Note(name='C'), Note(name='E'))
c = eval(repr(Chord(Note(name='C'), Note(name='G'), Note(name='C'), Note(name='C', octave=5))))
assert c == Chord(Note(name='C'), Note(name='G'), Note(name='C'), Note(name='C', octave=5))
c = eval(repr(Chord(Note(name='C'), Note(name='E'), strict=True)))
assert c.strict is True
c = eval(repr(Chord(Note(name='C'), Note(name='E'), strict=False)))
assert c.strict is False | {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,295 | iskyd/babs | refs/heads/master | /babs/note.py | from __future__ import division
import math
from babs.exceptions import NoteException
class Note(object):
"""
Musical note: the pitch and the duration of a sound
"""
A_FREQUENCY = 440
A_DEFAULT_OCTAVE = 4
NOTES = ['C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab', 'A', 'A#/Bb', 'B']
HALF_STEP_INTERVAL = 2 ** (1 / 12)
FLAT = 'flat'
SHARP = 'sharp'
def __init__(self, freq=None, name=None, octave=4, alt=None, duration=4/4):
"""
:param freq: frequency
:param name: name of the note
:param octave: note's position on a standard 88-key piano keyboard
:param alt: note's alteration, could be sharp or flat. used to choose name (e.g D# or Eb)
:param duration: relative duration of the note
"""
self._freq = freq
self._name = name
self._octave = octave
self._alt = alt
self.duration = duration
if self._freq is None and self._name is None:
raise NoteException("Can't create a note without frequency or name.")
if self._name is not None and self.get_note_index() is False:
raise NoteException('Invalid note.')
if self._freq is None:
self._set_freq()
else:
self._set_name_and_octave()
self._freq = round(self._freq, 2)
def __eq__(self, other):
return self._freq == other.freq and self.duration == other.duration
def __ne__(self, other):
return self._freq != other.freq or self.duration != other.duration
def __hash__(self):
return hash(self._freq)
def __str__(self):
return "{}{}".format(self._name, self._octave)
def __repr__(self):
return "Note(freq={}, alt='{}', duration={})".format(self._freq, self.alt, self.duration)
@property
def freq(self):
return self._freq
@freq.setter
def freq(self, freq):
self._freq = freq
self._set_name_and_octave()
@property
def name(self):
return self._name
@property
def octave(self):
return self._octave
@property
def alt(self):
return self._alt
@alt.setter
def alt(self, alt):
self._alt = alt
self._set_name_and_octave()
def pitch_shift(self, value, half_step=False, octave=False, alt=None):
"""
change the freq and calculate name and octave
:param value: value to add or sub from freq
:param half_step: if True val is based as half tone
:param octave: if True val is based on octave
:param alt: note's alteration, could be sharp or flat. used to choose name (e.g D# or Eb)
"""
if alt is not None:
self._alt = alt
if half_step is True:
self._freq = round(self._freq * (self.HALF_STEP_INTERVAL ** value), 2)
elif octave is True:
self._freq = round(self._freq * (2 ** value), 2)
else:
self._freq = round(self._freq + value, 2)
self._set_name_and_octave()
def get_note_index(self):
"""
:return: position of the note in self.NOTES or False if not found
"""
idx = 0
for note in self.NOTES:
names = note.split('/')
for name in names:
if self._name == name:
return idx
idx += 1
return False
def _set_name_and_octave(self):
"""
calculate and set _name and _octave based on freq and alt
"""
distance = int(round(len(self.NOTES) * (math.log(float(self._freq), 2) - math.log(self.A_FREQUENCY, 2))))
idx = self.NOTES.index('A') + distance
octave = int(self.A_DEFAULT_OCTAVE + (idx / len(self.NOTES)))
idx = idx % len(self.NOTES)
self._name = self.get_note_name_by_index(idx, self._alt)
self._octave = octave
def _set_freq(self):
"""
calculate and set freq based on name and octave
"""
note_distance = self.get_note_index() - self.NOTES.index('A')
oct_distance = self._octave - 4
distance = note_distance + (len(self.NOTES) * oct_distance)
self._freq = self.A_FREQUENCY * (self.HALF_STEP_INTERVAL ** distance)
@classmethod
def get_note_name_by_index(cls, idx, alt=None):
while idx >= len(cls.NOTES):
idx = idx - len(cls.NOTES)
names = cls.NOTES[idx].split('/')
if len(names) > 1 and alt == 'flat':
return names[1]
return names[0]
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,296 | iskyd/babs | refs/heads/master | /babs/chord.py | from babs import Note, NoteList
from babs.exceptions import ChordException
class Chord(NoteList):
"""
Any harmonic set of pitches consisting of two or more notes
"""
MAJOR_TYPE = [4, 7]
MAJOR_SEVEN_TYPE = [4, 7, 11]
MINOR_TYPE = [3, 7]
MINOR_SEVEN_TYPE = [3, 7, 10]
DOMINANT_TYPE = [4, 7, 10]
MINOR_MAJOR_SEVEN_TYPE = [3, 7, 11]
HALF_DIMINISHED_SEVEN_TYPE = [3, 6, 10]
DIMINISHED_TYPE = [3, 6]
DIMINISHED_SEVEN_TYPE = [3, 6, 9]
AUGMENTED_TYPE = [4, 8]
AUGMENTED_SEVEN_TYPE = [4, 8, 10]
AUGMENTED_MAJOR_SEVEN_TYPE = [4, 8, 11]
MAJOR_SIXTH_TYPE = [4, 7, 9]
MINOR_SIXTH_TYPE = [3, 7, 9]
SUS4_TYPE = [5, 7]
SUS4_SEVEN_TYPE = [5, 7, 10]
SUS4_MAJOR_SEVEN_TYPE = [5, 7, 11]
SUS2_TYPE = [2, 7]
SUS2_SEVEN_TYPE = [2, 7, 10]
SUS2_MAJOR_SEVEN_TYPE = [2, 7, 11]
def __init__(self, *notes, **kwargs):
"""
:param notes: list of notes
:param kwargs: options [strict]
"""
super().__init__(*notes, strict=kwargs.pop('strict', True), invalid_exception=ChordException)
self._order()
def _order(self):
if self.is_valid():
self._notes.sort(key=lambda note: note.freq, reverse=False)
def is_valid(self):
"""
Check if chord is valid
:return: bool
"""
if not super().is_valid():
return False
if len(self._notes) < 2:
return False
return True
def add_note(self, note, strict=True):
"""
Add note to list
:param note: note to be added in list
:param strict: raise ChordException if note is not valid or if chord will be invalid
:return: None
"""
super().add_note(note=note, strict=strict)
self._order()
def remove_note(self, note=None, freq=None, name=None, octave=None, strict=True):
"""
Remove note by note, freq, name or octave from list
:param note: note to remove
:param freq: frequency to remove
:param name: name to remove
:param octave: octave to remove
:param strict: raise ChordException if note is not valid, not found or if chord will be invalid after remove
:return: None
"""
super().remove_note(note=note, freq=freq, name=name, octave=octave, strict=strict)
self._order()
@classmethod
def create_from_root(cls, root, chord_type=None, octave=NoteList.OCTAVE_TYPE_ROOT, alt=Note.SHARP, strict=True):
"""
:param root: root note
:param chord_type: a list of notes distance from root note
:param octave: octave of notes in chord
:param alt: note alteration 'sharp' or 'flat'
:type root: Note
:type chord_type: list
:type octave: Union[int, string, callable]
:type alt: string
:return: Chord
"""
if chord_type is None:
chord_type = cls.MAJOR_TYPE
notes = NoteList.get_notes_from_root(root=root, note_list_type=chord_type, octave=octave, alt=alt)
return cls(
*notes,
strict=strict
)
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.