code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
title = 'methoxy decomposition to H + CH2O'
description = ''
frequencyScaleFactor = 1.0
"""
This example illustrates how to manually set up an Arkane input file for a small P-dep reaction system [using only the
RRHO assumption, and without tunneling, although this can be easily implemented]. Such a calculation is desirable if
the user wishes to supply experimentally determined frequencies, for example. Although some commented notes below may be
useful, see http://reactionmechanismgenerator.github.io/RMG-Py/users/arkane/index.html for more documented information
about Arkane and creating input files.
(information pertaining this file is adopted by <NAME>, 2013, JPCA 117 (33) 7686-96.)
"""
transitionState(
label='TS3',
E0=(34.1, 'kcal/mol'), # this INCLUDES the ZPE. Note that other energy units are also possible (e.g., kJ/mol)
spinMultiplicity=2,
opticalIsomers=1,
frequency=(-967, 'cm^-1'),
modes=[ # these modes are used to compute the partition functions
HarmonicOscillator(frequencies=([466, 581, 1169, 1242, 1499, 1659, 2933, 3000], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.970, 1.029, 3.717], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol")) # this must be included for every species/ts
],
)
transitionState(
label='TS2',
E0=(38.9, 'kcal/mol'),
spinMultiplicity=2,
opticalIsomers=1,
frequency=(-1934, 'cm^-1'),
modes=[
HarmonicOscillator(frequencies=([792, 987, 1136, 1142, 1482, 2441, 3096, 3183], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.928, 0.962, 5.807], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol"))
],
)
transitionState(
label='TS1',
E0=(39.95, 'kcal/mol'),
spinMultiplicity=2,
opticalIsomers=1,
frequency=(-1756, 'cm^-1'),
modes=[
HarmonicOscillator(frequencies=([186, 626, 1068, 1234, 1474, 1617, 2994, 3087], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.966, 0.986, 5.253], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol"))
],
)
species(
label='methoxy',
structure=SMILES('C[O]'),
E0=(9.44, 'kcal/mol'),
modes=[
HarmonicOscillator(frequencies=([758, 960, 1106, 1393, 1403, 1518, 2940, 3019, 3065], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.916, 0.921, 5.251], "cm^-1"), symmetry=3, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol")),
],
spinMultiplicity=3.88, # 3+exp(-89/T)
opticalIsomers=1,
molecularWeight=(31.01843, 'amu'),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
)
species(
label='CH2O',
E0=(28.69, 'kcal/mol'),
molecularWeight=(30.0106, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
spinMultiplicity=1,
opticalIsomers=1,
modes=[
HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')),
NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2,
quantum=False),
IdealGasTranslation(mass=(30.0106, "g/mol")),
],
)
species(
label='H',
E0=(0.000, 'kcal/mol'),
molecularWeight=(1.00783, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
modes=[
IdealGasTranslation(mass=(1.00783, "g/mol")),
],
spinMultiplicity=2,
opticalIsomers=1,
)
species(
label='CH2Ob',
# this is a special system with two chemically equivalent product channels. Thus, different labels are used.
E0=(28.69, 'kcal/mol'),
molecularWeight=(30.0106, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
spinMultiplicity=1,
opticalIsomers=1,
modes=[
HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')),
NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2,
quantum=False),
IdealGasTranslation(mass=(30.0106, "g/mol")),
],
)
species(
label='Hb',
E0=(0.0001, 'kcal/mol'),
molecularWeight=(1.00783, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
modes=[
IdealGasTranslation(mass=(1.00783, "g/mol")),
],
spinMultiplicity=2,
opticalIsomers=1,
)
species(
label='CH2OH',
E0=(0.00, 'kcal/mol'),
molecularWeight=(31.01843, "g/mol"),
modes=[
HarmonicOscillator(frequencies=([418, 595, 1055, 1198, 1368, 1488, 3138, 3279, 3840], 'cm^-1')),
# below is an example of how to include hindered rotors
# HinderedRotor(inertia=(5.75522e-47,'kg*m^2'), symmetry=1, barrier=(22427.8,'J/mol'), semiclassical=False),
NonlinearRotor(rotationalConstant=([0.868, 0.993, 6.419], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol")),
],
spinMultiplicity=2,
opticalIsomers=2,
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
)
species(
label='He',
# freqScaleFactor = 1, # TypeError: species() got an unexpected keyword argument 'freqScaleFactor'.
structure=SMILES('[He]'),
molecularWeight=(4.003, 'amu'),
collisionModel=TransportData(sigma=(2.55e-10, 'm'), epsilon=(0.0831, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
thermo=NASA(
polynomials=[NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 0.928724], Tmin=(200, 'K'), Tmax=(1000, 'K')),
NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 0.928724], Tmin=(1000, 'K'), Tmax=(6000, 'K'))],
Tmin=(200, 'K'), Tmax=(6000, 'K'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862, 'J/(mol*K)'), label="""He""",
comment="""Thermo library: primaryThermoLibrary"""),
)
reaction(
label='CH2O+H=Methoxy',
# label = 'Methoxy = CH2O+H',
reactants=['CH2O', 'H'],
products=['methoxy'],
# reactants = ['methoxy'],
# products = ['CH2O', 'H'],
transitionState='TS3',
# tunneling='Eckart',
)
reaction(
# label = 'CH2Ob+Hb=CH2OH',
label='CH2OH = CH2Ob+Hb',
# products = ['CH2OH'],
reactants=['CH2OH'],
# reactants = ['CH2Ob','Hb'],
products=['CH2Ob', 'Hb'],
transitionState='TS1',
# tunneling='Eckart',
)
reaction(
label='CH2OH = Methoxy',
# reactants = ['methoxy'],
# products = ['CH2OH'],
# label = 'Methoxy = CH2OH',
products=['methoxy'],
reactants=['CH2OH'],
transitionState='TS2',
# tunneling='Eckart',
)
kinetics('CH2O+H=Methoxy')
# kinetics('Methoxy = CH2O+H' )
# kinetics('Methoxy = CH2OH' )
kinetics('CH2OH = Methoxy')
kinetics('CH2OH = CH2Ob+Hb')
# kinetics('CH2Ob+Hb=CH2OH')
network(
label='methoxy',
isomers=[
'methoxy',
'CH2OH',
],
reactants=[
('CH2O', 'H'),
# ('CH2Ob','Hb'),
],
bathGas={
'He': 1,
},
)
pressureDependence(
label='methoxy',
Tmin=(450, 'K'), Tmax=(1200, 'K'), Tcount=4,
Tlist=([450, 500, 678, 700], 'K'),
Pmin=(0.01, 'atm'), Pmax=(1000, 'atm'), Pcount=7,
Plist=([0.01, 0.1, 1, 3, 10, 100, 1000], 'atm'),
maximumGrainSize=(0.5, 'kcal/mol'),
minimumGrainCount=500,
method='modified strong collision',
# Other methods include: 'reservoir state', 'chemically-significant eigenvalues',
interpolationModel='pdeparrhenius',
activeKRotor=True,
# active_j_rotor = False, # causes Arkane to crash
rmgmode=False,
) | arkane/data/methoxy.py | title = 'methoxy decomposition to H + CH2O'
description = ''
frequencyScaleFactor = 1.0
"""
This example illustrates how to manually set up an Arkane input file for a small P-dep reaction system [using only the
RRHO assumption, and without tunneling, although this can be easily implemented]. Such a calculation is desirable if
the user wishes to supply experimentally determined frequencies, for example. Although some commented notes below may be
useful, see http://reactionmechanismgenerator.github.io/RMG-Py/users/arkane/index.html for more documented information
about Arkane and creating input files.
(information pertaining this file is adopted by <NAME>, 2013, JPCA 117 (33) 7686-96.)
"""
transitionState(
label='TS3',
E0=(34.1, 'kcal/mol'), # this INCLUDES the ZPE. Note that other energy units are also possible (e.g., kJ/mol)
spinMultiplicity=2,
opticalIsomers=1,
frequency=(-967, 'cm^-1'),
modes=[ # these modes are used to compute the partition functions
HarmonicOscillator(frequencies=([466, 581, 1169, 1242, 1499, 1659, 2933, 3000], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.970, 1.029, 3.717], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol")) # this must be included for every species/ts
],
)
transitionState(
label='TS2',
E0=(38.9, 'kcal/mol'),
spinMultiplicity=2,
opticalIsomers=1,
frequency=(-1934, 'cm^-1'),
modes=[
HarmonicOscillator(frequencies=([792, 987, 1136, 1142, 1482, 2441, 3096, 3183], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.928, 0.962, 5.807], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol"))
],
)
transitionState(
label='TS1',
E0=(39.95, 'kcal/mol'),
spinMultiplicity=2,
opticalIsomers=1,
frequency=(-1756, 'cm^-1'),
modes=[
HarmonicOscillator(frequencies=([186, 626, 1068, 1234, 1474, 1617, 2994, 3087], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.966, 0.986, 5.253], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol"))
],
)
species(
label='methoxy',
structure=SMILES('C[O]'),
E0=(9.44, 'kcal/mol'),
modes=[
HarmonicOscillator(frequencies=([758, 960, 1106, 1393, 1403, 1518, 2940, 3019, 3065], 'cm^-1')),
NonlinearRotor(rotationalConstant=([0.916, 0.921, 5.251], "cm^-1"), symmetry=3, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol")),
],
spinMultiplicity=3.88, # 3+exp(-89/T)
opticalIsomers=1,
molecularWeight=(31.01843, 'amu'),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
)
species(
label='CH2O',
E0=(28.69, 'kcal/mol'),
molecularWeight=(30.0106, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
spinMultiplicity=1,
opticalIsomers=1,
modes=[
HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')),
NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2,
quantum=False),
IdealGasTranslation(mass=(30.0106, "g/mol")),
],
)
species(
label='H',
E0=(0.000, 'kcal/mol'),
molecularWeight=(1.00783, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
modes=[
IdealGasTranslation(mass=(1.00783, "g/mol")),
],
spinMultiplicity=2,
opticalIsomers=1,
)
species(
label='CH2Ob',
# this is a special system with two chemically equivalent product channels. Thus, different labels are used.
E0=(28.69, 'kcal/mol'),
molecularWeight=(30.0106, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
spinMultiplicity=1,
opticalIsomers=1,
modes=[
HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')),
NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2,
quantum=False),
IdealGasTranslation(mass=(30.0106, "g/mol")),
],
)
species(
label='Hb',
E0=(0.0001, 'kcal/mol'),
molecularWeight=(1.00783, "g/mol"),
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
modes=[
IdealGasTranslation(mass=(1.00783, "g/mol")),
],
spinMultiplicity=2,
opticalIsomers=1,
)
species(
label='CH2OH',
E0=(0.00, 'kcal/mol'),
molecularWeight=(31.01843, "g/mol"),
modes=[
HarmonicOscillator(frequencies=([418, 595, 1055, 1198, 1368, 1488, 3138, 3279, 3840], 'cm^-1')),
# below is an example of how to include hindered rotors
# HinderedRotor(inertia=(5.75522e-47,'kg*m^2'), symmetry=1, barrier=(22427.8,'J/mol'), semiclassical=False),
NonlinearRotor(rotationalConstant=([0.868, 0.993, 6.419], "cm^-1"), symmetry=1, quantum=False),
IdealGasTranslation(mass=(31.01843, "g/mol")),
],
spinMultiplicity=2,
opticalIsomers=2,
collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
)
species(
label='He',
# freqScaleFactor = 1, # TypeError: species() got an unexpected keyword argument 'freqScaleFactor'.
structure=SMILES('[He]'),
molecularWeight=(4.003, 'amu'),
collisionModel=TransportData(sigma=(2.55e-10, 'm'), epsilon=(0.0831, 'kJ/mol')),
energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95),
thermo=NASA(
polynomials=[NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 0.928724], Tmin=(200, 'K'), Tmax=(1000, 'K')),
NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 0.928724], Tmin=(1000, 'K'), Tmax=(6000, 'K'))],
Tmin=(200, 'K'), Tmax=(6000, 'K'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862, 'J/(mol*K)'), label="""He""",
comment="""Thermo library: primaryThermoLibrary"""),
)
reaction(
label='CH2O+H=Methoxy',
# label = 'Methoxy = CH2O+H',
reactants=['CH2O', 'H'],
products=['methoxy'],
# reactants = ['methoxy'],
# products = ['CH2O', 'H'],
transitionState='TS3',
# tunneling='Eckart',
)
reaction(
# label = 'CH2Ob+Hb=CH2OH',
label='CH2OH = CH2Ob+Hb',
# products = ['CH2OH'],
reactants=['CH2OH'],
# reactants = ['CH2Ob','Hb'],
products=['CH2Ob', 'Hb'],
transitionState='TS1',
# tunneling='Eckart',
)
reaction(
label='CH2OH = Methoxy',
# reactants = ['methoxy'],
# products = ['CH2OH'],
# label = 'Methoxy = CH2OH',
products=['methoxy'],
reactants=['CH2OH'],
transitionState='TS2',
# tunneling='Eckart',
)
kinetics('CH2O+H=Methoxy')
# kinetics('Methoxy = CH2O+H' )
# kinetics('Methoxy = CH2OH' )
kinetics('CH2OH = Methoxy')
kinetics('CH2OH = CH2Ob+Hb')
# kinetics('CH2Ob+Hb=CH2OH')
network(
label='methoxy',
isomers=[
'methoxy',
'CH2OH',
],
reactants=[
('CH2O', 'H'),
# ('CH2Ob','Hb'),
],
bathGas={
'He': 1,
},
)
pressureDependence(
label='methoxy',
Tmin=(450, 'K'), Tmax=(1200, 'K'), Tcount=4,
Tlist=([450, 500, 678, 700], 'K'),
Pmin=(0.01, 'atm'), Pmax=(1000, 'atm'), Pcount=7,
Plist=([0.01, 0.1, 1, 3, 10, 100, 1000], 'atm'),
maximumGrainSize=(0.5, 'kcal/mol'),
minimumGrainCount=500,
method='modified strong collision',
# Other methods include: 'reservoir state', 'chemically-significant eigenvalues',
interpolationModel='pdeparrhenius',
activeKRotor=True,
# active_j_rotor = False, # causes Arkane to crash
rmgmode=False,
) | 0.828627 | 0.529081 |
__author__ = '<NAME>'
import tweepy
import pymongo
from pymongo import MongoClient
import json
import logging
logging.basicConfig(
filename='emovix_twitter_hashtags.log',
level=logging.WARNING,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d-%m-%y %H:%M')
# Configuration parameters
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
database_address = ""
database_name = ""
source_box = ""
twitterStatusCol = ""
twitterUserCol = ""
ignored_tweet_fields = ["contributors", "truncated", "is_quote_status", "in_reply_to_status_id", "in_reply_to_screen_name", "geo",
"in_reply_to_user_id", "favorited", "in_reply_to_user_id_str", "filter_level", "in_reply_to_status_id_str"]
ignored_user_fields = ["follow_request_sent", "profile_use_background_image", "default_profile_image", "verified", "profile_image_url_https",
"profile_sidebar_fill_color", "profile_text_color", "profile_sidebar_border_color", "id_str", "profile_background_color",
"profile_background_image_url_https", "utc_offset", "profile_link_color", "profile_image_url", "following",
"profile_background_image_url", "profile_background_tile", "notifications", "created_at", "contributors_enabled",
"protected", "default_profile", "is_translator"]
hashtags = [
# Global hashtags
"20D", "EleccionesGenerales2015", "Elecciones2015", "Elecciones20D", "#ElBipartidismoDebate", "#CaraACaraL6",
"#RescataMiVoto", "#NOalVotoRogado", "#ValoraTuVoto", "#VotoRogadoVotoRobado",
# Partido Popular
"Partido Popular", "PartidoPopular", "ppopular", "marianorajoy", u"#EspañaEnSerio", "#VotaPP", "@Sorayapp", "#PP",
"@mdcospedal", "pablocasado_", "#YoVotoPP", "#EmpleoEnSerio", "@NNGG_Es", "pablocasado_", "@AlfonsoAlonsoPP",
# PSOE
"PSOE", "PSC", "@socialistes_cat", "#FemForaRajoy", "#SomLaSolucio", "@carmechacon", "sanchezcastejon",
"#OrgulloSocialista", "#VOTAPSOE", "#PedroPresidente", u"#UnFuturoParaLaMayoría", "ElCambioqueUne",
# Ciudadanos-Partido de la Ciudadanía
"@GirautaOficial", "#AlbertRivera", "Albert_Rivera", "CiudadanosCs", "#RutaCiudadana", "#ConIlusion",
"@sdelcampocs", u"#Ilusión", "Ciudadanos", "@InesArrimadas", "#AlbertPresidente", "IlusionNaranja", u"IlusiónNaranja",
# Podemos
"#UNPAISCONTIGO", "ahorapodemos", "Pablo_Iglesias_", "@AdaColau", "@VickyRosell", "#LeyDeImpunidad", "#Podemos",
"Unpaiscontigo", u"Unpaíscontigo"
# Democràcia i llibertat
"ConvergenciaCAT", "@DemocratesCAT", "@reagrupament", "#possible", "@20dl_cat", "@joseprull", "@joanbague",
"@peresalo68", "@Ferran_Bel", "@franceschoms", "<NAME>",
# ERC
"ERC", u"#SomRepública", "Esquerra_ERC", "@GabrielRufian", "@JoanTarda", "@junqueras", "@MartaRovira",
"catalunyasi", "RTmetropolitanTour",
# Euskal <NAME>
"ehbildu", "BilduErabakira", "@ehbildu_legebil",
# Unió
"unio_cat", "@DuranLleida", u"#Solucions!", "@Marti_Barbera", "@Ramon_Espadaler", "Duran", "DuranLleida",
# UPyD
"UPyD", "#VotaUPYD", u"#MásEspaña", "@Herzogoff", "@sryuriaguilar",
# Unidad Popular
"Unidad Popular", "Unidadpopular__", "IUnida", "agarzon", "IzquierdaUnida", "UnidadPopular20D",
# Partido Nacionalista Vasco
"eajpnv", "PNV", "Egibar",
# En Comú Podem
"EnComu_Podem",
# Nós-Candidatura Galega
"noscgalega", "coalicion",
# Coalición Canaria-Partido Nacionalista Canario
"TDCanarias",
# Compromís-Podemos-És el moment
"EsElMoment", u"#ÉsElMoment",
# Geroa Bai
"geroabai",
# En Marea
"En_Marea", "GZtenquestar",
]
client = None
db = None
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, api):
self.api = api
super(tweepy.StreamListener, self).__init__()
#self.db = pymongo.MongoClient().emovix
self.db = db
def on_data(self, data):
tweet = json.loads(data)
# This code ignores limit notices
# https://dev.twitter.com/streaming/overview/messages-types#limit_notices
if tweet.get('limit'):
logging.debug('Limit notice received: ' + str(tweet['limit']['track']))
return True
user = tweet['user']
for field in ignored_tweet_fields:
del tweet[field]
for field in ignored_user_fields:
del tweet['user'][field]
self.db[twitterStatusCol].update(tweet, tweet, upsert=True)
self.db[twitterUserCol].update({"screen_name": tweet['user']['screen_name']}, user, upsert=True)
return True
def on_error(self, status):
logging.error('CustomStreamListener on_error')
logging.error(status)
return True
def on_timeout(self):
logging.error('CustomStreamListener on_timeout')
return True # Don't kill the stream
if __name__ == '__main__':
logging.debug('emovix_twitter_streaming.py starting ...')
# Load configuration
with open('config.json', 'r') as f:
config = json.load(f)
access_token = config['access_token']
access_token_secret = config['access_token_secret']
consumer_key = config['consumer_key']
consumer_secret = config['consumer_secret']
database_address = config['database_address']
database_name = config['database_name']
source_box = config['source_box']
twitterStatusCol = source_box + "_twitterStatus"
twitterUserCol = source_box + "_twitterUser"
client = MongoClient('mongodb://' + database_address + ':27017/')
db = client[database_name]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
while True:
try:
logging.debug('Connecting to Twitter stream ...')
stream = tweepy.streaming.Stream(auth, CustomStreamListener(api))
stream.filter( track = hashtags )
except Exception as e:
# Oh well, reconnect and keep trucking
logging.error(e.__class__)
logging.error(e)
continue
except KeyboardInterrupt:
stream.disconnect()
break | emovix_twitter_hashtags.py |
__author__ = '<NAME>'
import tweepy
import pymongo
from pymongo import MongoClient
import json
import logging
logging.basicConfig(
filename='emovix_twitter_hashtags.log',
level=logging.WARNING,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d-%m-%y %H:%M')
# Configuration parameters
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
database_address = ""
database_name = ""
source_box = ""
twitterStatusCol = ""
twitterUserCol = ""
ignored_tweet_fields = ["contributors", "truncated", "is_quote_status", "in_reply_to_status_id", "in_reply_to_screen_name", "geo",
"in_reply_to_user_id", "favorited", "in_reply_to_user_id_str", "filter_level", "in_reply_to_status_id_str"]
ignored_user_fields = ["follow_request_sent", "profile_use_background_image", "default_profile_image", "verified", "profile_image_url_https",
"profile_sidebar_fill_color", "profile_text_color", "profile_sidebar_border_color", "id_str", "profile_background_color",
"profile_background_image_url_https", "utc_offset", "profile_link_color", "profile_image_url", "following",
"profile_background_image_url", "profile_background_tile", "notifications", "created_at", "contributors_enabled",
"protected", "default_profile", "is_translator"]
hashtags = [
# Global hashtags
"20D", "EleccionesGenerales2015", "Elecciones2015", "Elecciones20D", "#ElBipartidismoDebate", "#CaraACaraL6",
"#RescataMiVoto", "#NOalVotoRogado", "#ValoraTuVoto", "#VotoRogadoVotoRobado",
# Partido Popular
"Partido Popular", "PartidoPopular", "ppopular", "marianorajoy", u"#EspañaEnSerio", "#VotaPP", "@Sorayapp", "#PP",
"@mdcospedal", "pablocasado_", "#YoVotoPP", "#EmpleoEnSerio", "@NNGG_Es", "pablocasado_", "@AlfonsoAlonsoPP",
# PSOE
"PSOE", "PSC", "@socialistes_cat", "#FemForaRajoy", "#SomLaSolucio", "@carmechacon", "sanchezcastejon",
"#OrgulloSocialista", "#VOTAPSOE", "#PedroPresidente", u"#UnFuturoParaLaMayoría", "ElCambioqueUne",
# Ciudadanos-Partido de la Ciudadanía
"@GirautaOficial", "#AlbertRivera", "Albert_Rivera", "CiudadanosCs", "#RutaCiudadana", "#ConIlusion",
"@sdelcampocs", u"#Ilusión", "Ciudadanos", "@InesArrimadas", "#AlbertPresidente", "IlusionNaranja", u"IlusiónNaranja",
# Podemos
"#UNPAISCONTIGO", "ahorapodemos", "Pablo_Iglesias_", "@AdaColau", "@VickyRosell", "#LeyDeImpunidad", "#Podemos",
"Unpaiscontigo", u"Unpaíscontigo"
# Democràcia i llibertat
"ConvergenciaCAT", "@DemocratesCAT", "@reagrupament", "#possible", "@20dl_cat", "@joseprull", "@joanbague",
"@peresalo68", "@Ferran_Bel", "@franceschoms", "<NAME>",
# ERC
"ERC", u"#SomRepública", "Esquerra_ERC", "@GabrielRufian", "@JoanTarda", "@junqueras", "@MartaRovira",
"catalunyasi", "RTmetropolitanTour",
# Euskal <NAME>
"ehbildu", "BilduErabakira", "@ehbildu_legebil",
# Unió
"unio_cat", "@DuranLleida", u"#Solucions!", "@Marti_Barbera", "@Ramon_Espadaler", "Duran", "DuranLleida",
# UPyD
"UPyD", "#VotaUPYD", u"#MásEspaña", "@Herzogoff", "@sryuriaguilar",
# Unidad Popular
"Unidad Popular", "Unidadpopular__", "IUnida", "agarzon", "IzquierdaUnida", "UnidadPopular20D",
# Partido Nacionalista Vasco
"eajpnv", "PNV", "Egibar",
# En Comú Podem
"EnComu_Podem",
# Nós-Candidatura Galega
"noscgalega", "coalicion",
# Coalición Canaria-Partido Nacionalista Canario
"TDCanarias",
# Compromís-Podemos-És el moment
"EsElMoment", u"#ÉsElMoment",
# Geroa Bai
"geroabai",
# En Marea
"En_Marea", "GZtenquestar",
]
client = None
db = None
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, api):
self.api = api
super(tweepy.StreamListener, self).__init__()
#self.db = pymongo.MongoClient().emovix
self.db = db
def on_data(self, data):
tweet = json.loads(data)
# This code ignores limit notices
# https://dev.twitter.com/streaming/overview/messages-types#limit_notices
if tweet.get('limit'):
logging.debug('Limit notice received: ' + str(tweet['limit']['track']))
return True
user = tweet['user']
for field in ignored_tweet_fields:
del tweet[field]
for field in ignored_user_fields:
del tweet['user'][field]
self.db[twitterStatusCol].update(tweet, tweet, upsert=True)
self.db[twitterUserCol].update({"screen_name": tweet['user']['screen_name']}, user, upsert=True)
return True
def on_error(self, status):
logging.error('CustomStreamListener on_error')
logging.error(status)
return True
def on_timeout(self):
logging.error('CustomStreamListener on_timeout')
return True # Don't kill the stream
if __name__ == '__main__':
logging.debug('emovix_twitter_streaming.py starting ...')
# Load configuration
with open('config.json', 'r') as f:
config = json.load(f)
access_token = config['access_token']
access_token_secret = config['access_token_secret']
consumer_key = config['consumer_key']
consumer_secret = config['consumer_secret']
database_address = config['database_address']
database_name = config['database_name']
source_box = config['source_box']
twitterStatusCol = source_box + "_twitterStatus"
twitterUserCol = source_box + "_twitterUser"
client = MongoClient('mongodb://' + database_address + ':27017/')
db = client[database_name]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
while True:
try:
logging.debug('Connecting to Twitter stream ...')
stream = tweepy.streaming.Stream(auth, CustomStreamListener(api))
stream.filter( track = hashtags )
except Exception as e:
# Oh well, reconnect and keep trucking
logging.error(e.__class__)
logging.error(e)
continue
except KeyboardInterrupt:
stream.disconnect()
break | 0.398875 | 0.154855 |
load("//ros:utils.bzl", "get_stem")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@rules_cc//cc:defs.bzl", "cc_library")
load("@rules_python//python:defs.bzl", "py_library")
RosInterfaceInfo = provider(
"Provides info for interface code generation.",
fields = [
"info",
"deps",
],
)
_ACTION_OUTPUT_MAPPING = [
"{}Goal.msg",
"{}ActionGoal.msg",
"{}Action.msg",
"{}Result.msg",
"{}ActionResult.msg",
"{}Feedback.msg",
"{}ActionFeedback.msg",
]
def _ros_interface_library_impl(ctx):
ros_package_name = ctx.label.name
output_srcs = [] # Messages and services.
for src in ctx.files.srcs:
if src.extension == "action":
stem = get_stem(src)
action_msgs = [
ctx.actions.declare_file(
"{}/{}".format(ros_package_name, t.format(stem)),
)
for t in _ACTION_OUTPUT_MAPPING
]
genaction_args = ctx.actions.args()
genaction_args.add(src)
genaction_args.add("-o", action_msgs[0].dirname)
ctx.actions.run(
inputs = [src],
outputs = action_msgs,
executable = ctx.executable._genaction,
arguments = [genaction_args],
)
output_srcs.extend(action_msgs)
else:
src_symlink = ctx.actions.declare_file(
"{}/{}".format(ros_package_name, src.basename),
)
ctx.actions.symlink(output = src_symlink, target_file = src)
output_srcs.append(src_symlink)
return [
DefaultInfo(files = depset(output_srcs)),
RosInterfaceInfo(
info = struct(
ros_package_name = ros_package_name,
srcs = output_srcs,
),
deps = depset(
direct = [dep[RosInterfaceInfo].info for dep in ctx.attr.deps],
transitive = [
dep[RosInterfaceInfo].deps
for dep in ctx.attr.deps
],
),
),
]
ros_interface_library = rule(
attrs = {
"srcs": attr.label_list(
allow_files = [".action", ".msg", ".srv"],
mandatory = True,
),
"deps": attr.label_list(providers = [RosInterfaceInfo]),
"_genaction": attr.label(
default = Label("@ros_common_msgs//:genaction"),
executable = True,
cfg = "exec",
),
},
implementation = _ros_interface_library_impl,
)
def _get_include_flags(target, ctx):
ros_package_name = target.label.name
srcs = target[RosInterfaceInfo].info.srcs
deps = target[RosInterfaceInfo].deps
include_flags = ["-I", "{}:{}".format(ros_package_name, srcs[0].dirname)]
for dep in deps.to_list():
include_flags += ["-I", "{}:{}".format(
dep.ros_package_name,
dep.srcs[0].dirname,
)]
return include_flags
def _get_all_srcs(target, ctx):
srcs = target[RosInterfaceInfo].info.srcs
deps = target[RosInterfaceInfo].deps
return depset(
direct = srcs,
transitive = [depset(dep.srcs) for dep in deps.to_list()],
)
def _cc_ros_generator_aspect_impl(target, ctx):
include_flags = _get_include_flags(target, ctx)
all_srcs = _get_all_srcs(target, ctx)
ros_package_name = target.label.name
srcs = target[RosInterfaceInfo].info.srcs
all_headers = []
for src in srcs:
src_stem = get_stem(src)
msg_header = ctx.actions.declare_file(
"{}/{}.h".format(ros_package_name, src_stem),
)
msg_headers = [msg_header]
if src.extension == "srv":
msg_headers.append(ctx.actions.declare_file(
"{}/{}Request.h".format(ros_package_name, src_stem),
))
msg_headers.append(ctx.actions.declare_file(
"{}/{}Response.h".format(ros_package_name, src_stem),
))
all_headers.extend(msg_headers)
args = ctx.actions.args()
args.add("-o", msg_header.dirname)
args.add("-p", ros_package_name)
args.add_all(include_flags)
args.add(src)
ctx.actions.run(
inputs = all_srcs,
outputs = msg_headers,
executable = ctx.executable._gencpp,
arguments = [args],
)
cc_include_dir = "/".join(srcs[0].dirname.split("/")[:-1])
compilation_context = cc_common.create_compilation_context(
headers = depset(all_headers),
system_includes = depset([cc_include_dir]),
)
cc_info = cc_common.merge_cc_infos(
direct_cc_infos = [
CcInfo(compilation_context = compilation_context),
] + [
dep[CcInfo]
for dep in ctx.rule.attr.deps
],
)
return [cc_info]
cc_ros_generator_aspect = aspect(
implementation = _cc_ros_generator_aspect_impl,
attr_aspects = ["deps"],
attrs = {
"_gencpp": attr.label(
default = Label("@ros_gencpp//:gencpp"),
executable = True,
cfg = "exec",
),
},
provides = [CcInfo],
)
def _cc_ros_generator_impl(ctx):
cc_info = cc_common.merge_cc_infos(
direct_cc_infos = [dep[CcInfo] for dep in ctx.attr.deps],
)
return [cc_info]
cc_ros_generator = rule(
implementation = _cc_ros_generator_impl,
output_to_genfiles = True,
attrs = {
"deps": attr.label_list(
mandatory = True,
aspects = [cc_ros_generator_aspect],
providers = [RosInterfaceInfo],
),
},
)
def cc_ros_interface_library(name, deps, visibility = None):
name_gencpp = "{}_gencpp".format(name)
cc_ros_generator(
name = name_gencpp,
deps = deps,
)
cc_library(
name = name,
deps = [
name_gencpp,
"@roscpp_core//:roscpp_core",
"@ros_std_msgs//:cc_std_msgs_headers",
],
visibility = visibility,
)
def _py_generate(
ctx,
include_flags,
all_srcs,
ros_package_name,
rel_output_dir,
msgs):
if not msgs:
return []
extension = msgs[0].extension
if extension == "msg":
generator = ctx.executable._genmsg_py
else:
generator = ctx.executable._gensrv_py
py_msg_files = []
for msg in msgs:
msg_stem = get_stem(msg)
py_file = ctx.actions.declare_file(
"{}/{}/_{}.py".format(rel_output_dir, extension, msg_stem),
)
py_msg_files.append(py_file)
args = ctx.actions.args()
args.add("-o", py_msg_files[0].dirname)
args.add("-p", ros_package_name)
args.add_all(include_flags)
args.add_all(msgs)
ctx.actions.run(
inputs = all_srcs,
outputs = py_msg_files,
executable = generator,
arguments = [args],
)
init_py = ctx.actions.declare_file(
"{}/{}/__init__.py".format(rel_output_dir, extension),
)
args = ctx.actions.args()
args.add("--initpy")
args.add("-o", py_msg_files[0].dirname)
args.add("-p", ros_package_name)
ctx.actions.run(
inputs = py_msg_files,
outputs = [init_py],
executable = generator,
arguments = [args],
)
return py_msg_files + [init_py]
PyRosGeneratorAspectInfo = provider(
"Accumulates Python ROS interfaces.",
fields = [
"transitive_sources",
"imports",
],
)
def _get_list_attr(rule_attr, attr_name):
if not hasattr(rule_attr, attr_name):
return []
candidate = getattr(rule_attr, attr_name)
if type(candidate) != "list":
fail("Expected a list for attribute `{}`!".format(attr_name))
return candidate
def _collect_py_ros_generator_deps(rule_attr, attr_name):
return [
dep
for dep in _get_list_attr(rule_attr, attr_name)
if type(dep) == "Target" and PyRosGeneratorAspectInfo in dep
]
def _merge_py_ros_generator_aspect_infos(py_infos):
return PyRosGeneratorAspectInfo(
transitive_sources = depset(
transitive = [info.transitive_sources for info in py_infos],
),
imports = depset(transitive = [info.imports for info in py_infos]),
)
_PY_ROS_GENERATOR_ATTR_ASPECTS = ["data", "deps"]
def _py_ros_generator_aspect_impl(target, ctx):
py_infos = []
if ctx.rule.kind == "ros_interface_library":
include_flags = _get_include_flags(target, ctx)
all_srcs = _get_all_srcs(target, ctx)
ros_package_name = target.label.name
srcs = target[RosInterfaceInfo].info.srcs
rel_output_dir = ros_package_name
all_py_files = []
msgs = [src for src in srcs if src.extension == "msg"]
py_msg_files = _py_generate(
ctx,
include_flags,
all_srcs,
ros_package_name,
rel_output_dir,
msgs,
)
all_py_files.extend(py_msg_files)
srvs = [src for src in srcs if src.extension == "srv"]
py_srv_files = _py_generate(
ctx,
include_flags,
all_srcs,
ros_package_name,
rel_output_dir,
srvs,
)
all_py_files.extend(py_srv_files)
the_file = all_py_files[0]
relative_path_parts = paths.relativize(
the_file.dirname,
the_file.root.path,
).split("/")
if relative_path_parts[0] == "external":
py_import_path = paths.join(*relative_path_parts[1:-2])
else:
py_import_path = paths.join(
ctx.workspace_name,
*relative_path_parts[0:-2]
)
py_infos = [PyRosGeneratorAspectInfo(
transitive_sources = depset(all_py_files),
imports = depset([py_import_path]),
)]
for attr_name in _PY_ROS_GENERATOR_ATTR_ASPECTS:
for dep in _collect_py_ros_generator_deps(ctx.rule.attr, attr_name):
py_infos.append(dep[PyRosGeneratorAspectInfo])
merged_py_info = _merge_py_ros_generator_aspect_infos(py_infos)
return [merged_py_info]
py_ros_generator_aspect = aspect(
implementation = _py_ros_generator_aspect_impl,
attr_aspects = _PY_ROS_GENERATOR_ATTR_ASPECTS,
attrs = {
"_genmsg_py": attr.label(
default = Label("@ros_genpy//:genmsg_py"),
executable = True,
cfg = "exec",
),
"_gensrv_py": attr.label(
default = Label("@ros_genpy//:gensrv_py"),
executable = True,
cfg = "exec",
),
},
provides = [PyRosGeneratorAspectInfo],
)
def _py_ros_generator_impl(ctx):
py_info = _merge_py_ros_generator_aspect_infos([
dep[PyRosGeneratorAspectInfo]
for dep in ctx.attr.deps
])
return [
DefaultInfo(runfiles = ctx.runfiles(
transitive_files = py_info.transitive_sources,
)),
PyInfo(
transitive_sources = py_info.transitive_sources,
imports = py_info.imports,
),
]
py_ros_generator = rule(
implementation = _py_ros_generator_impl,
output_to_genfiles = True,
attrs = {
"deps": attr.label_list(
mandatory = True,
aspects = [py_ros_generator_aspect],
providers = [RosInterfaceInfo],
),
},
)
def py_ros_interface_library(name, deps, **kwargs):
name_genpy = "{}_genpy".format(name)
py_ros_generator(
name = name_genpy,
deps = deps,
)
py_library(
name = name,
deps = [name_genpy, "@ros_genpy//:genpy"],
**kwargs
)
py_ros_interface_collector = rule(
implementation = _py_ros_generator_impl,
output_to_genfiles = True,
attrs = {
"deps": attr.label_list(
mandatory = True,
aspects = [py_ros_generator_aspect],
),
},
) | ros/interfaces.bzl | load("//ros:utils.bzl", "get_stem")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@rules_cc//cc:defs.bzl", "cc_library")
load("@rules_python//python:defs.bzl", "py_library")
RosInterfaceInfo = provider(
"Provides info for interface code generation.",
fields = [
"info",
"deps",
],
)
_ACTION_OUTPUT_MAPPING = [
"{}Goal.msg",
"{}ActionGoal.msg",
"{}Action.msg",
"{}Result.msg",
"{}ActionResult.msg",
"{}Feedback.msg",
"{}ActionFeedback.msg",
]
def _ros_interface_library_impl(ctx):
ros_package_name = ctx.label.name
output_srcs = [] # Messages and services.
for src in ctx.files.srcs:
if src.extension == "action":
stem = get_stem(src)
action_msgs = [
ctx.actions.declare_file(
"{}/{}".format(ros_package_name, t.format(stem)),
)
for t in _ACTION_OUTPUT_MAPPING
]
genaction_args = ctx.actions.args()
genaction_args.add(src)
genaction_args.add("-o", action_msgs[0].dirname)
ctx.actions.run(
inputs = [src],
outputs = action_msgs,
executable = ctx.executable._genaction,
arguments = [genaction_args],
)
output_srcs.extend(action_msgs)
else:
src_symlink = ctx.actions.declare_file(
"{}/{}".format(ros_package_name, src.basename),
)
ctx.actions.symlink(output = src_symlink, target_file = src)
output_srcs.append(src_symlink)
return [
DefaultInfo(files = depset(output_srcs)),
RosInterfaceInfo(
info = struct(
ros_package_name = ros_package_name,
srcs = output_srcs,
),
deps = depset(
direct = [dep[RosInterfaceInfo].info for dep in ctx.attr.deps],
transitive = [
dep[RosInterfaceInfo].deps
for dep in ctx.attr.deps
],
),
),
]
ros_interface_library = rule(
attrs = {
"srcs": attr.label_list(
allow_files = [".action", ".msg", ".srv"],
mandatory = True,
),
"deps": attr.label_list(providers = [RosInterfaceInfo]),
"_genaction": attr.label(
default = Label("@ros_common_msgs//:genaction"),
executable = True,
cfg = "exec",
),
},
implementation = _ros_interface_library_impl,
)
def _get_include_flags(target, ctx):
ros_package_name = target.label.name
srcs = target[RosInterfaceInfo].info.srcs
deps = target[RosInterfaceInfo].deps
include_flags = ["-I", "{}:{}".format(ros_package_name, srcs[0].dirname)]
for dep in deps.to_list():
include_flags += ["-I", "{}:{}".format(
dep.ros_package_name,
dep.srcs[0].dirname,
)]
return include_flags
def _get_all_srcs(target, ctx):
srcs = target[RosInterfaceInfo].info.srcs
deps = target[RosInterfaceInfo].deps
return depset(
direct = srcs,
transitive = [depset(dep.srcs) for dep in deps.to_list()],
)
def _cc_ros_generator_aspect_impl(target, ctx):
include_flags = _get_include_flags(target, ctx)
all_srcs = _get_all_srcs(target, ctx)
ros_package_name = target.label.name
srcs = target[RosInterfaceInfo].info.srcs
all_headers = []
for src in srcs:
src_stem = get_stem(src)
msg_header = ctx.actions.declare_file(
"{}/{}.h".format(ros_package_name, src_stem),
)
msg_headers = [msg_header]
if src.extension == "srv":
msg_headers.append(ctx.actions.declare_file(
"{}/{}Request.h".format(ros_package_name, src_stem),
))
msg_headers.append(ctx.actions.declare_file(
"{}/{}Response.h".format(ros_package_name, src_stem),
))
all_headers.extend(msg_headers)
args = ctx.actions.args()
args.add("-o", msg_header.dirname)
args.add("-p", ros_package_name)
args.add_all(include_flags)
args.add(src)
ctx.actions.run(
inputs = all_srcs,
outputs = msg_headers,
executable = ctx.executable._gencpp,
arguments = [args],
)
cc_include_dir = "/".join(srcs[0].dirname.split("/")[:-1])
compilation_context = cc_common.create_compilation_context(
headers = depset(all_headers),
system_includes = depset([cc_include_dir]),
)
cc_info = cc_common.merge_cc_infos(
direct_cc_infos = [
CcInfo(compilation_context = compilation_context),
] + [
dep[CcInfo]
for dep in ctx.rule.attr.deps
],
)
return [cc_info]
cc_ros_generator_aspect = aspect(
implementation = _cc_ros_generator_aspect_impl,
attr_aspects = ["deps"],
attrs = {
"_gencpp": attr.label(
default = Label("@ros_gencpp//:gencpp"),
executable = True,
cfg = "exec",
),
},
provides = [CcInfo],
)
def _cc_ros_generator_impl(ctx):
cc_info = cc_common.merge_cc_infos(
direct_cc_infos = [dep[CcInfo] for dep in ctx.attr.deps],
)
return [cc_info]
cc_ros_generator = rule(
implementation = _cc_ros_generator_impl,
output_to_genfiles = True,
attrs = {
"deps": attr.label_list(
mandatory = True,
aspects = [cc_ros_generator_aspect],
providers = [RosInterfaceInfo],
),
},
)
def cc_ros_interface_library(name, deps, visibility = None):
name_gencpp = "{}_gencpp".format(name)
cc_ros_generator(
name = name_gencpp,
deps = deps,
)
cc_library(
name = name,
deps = [
name_gencpp,
"@roscpp_core//:roscpp_core",
"@ros_std_msgs//:cc_std_msgs_headers",
],
visibility = visibility,
)
def _py_generate(
ctx,
include_flags,
all_srcs,
ros_package_name,
rel_output_dir,
msgs):
if not msgs:
return []
extension = msgs[0].extension
if extension == "msg":
generator = ctx.executable._genmsg_py
else:
generator = ctx.executable._gensrv_py
py_msg_files = []
for msg in msgs:
msg_stem = get_stem(msg)
py_file = ctx.actions.declare_file(
"{}/{}/_{}.py".format(rel_output_dir, extension, msg_stem),
)
py_msg_files.append(py_file)
args = ctx.actions.args()
args.add("-o", py_msg_files[0].dirname)
args.add("-p", ros_package_name)
args.add_all(include_flags)
args.add_all(msgs)
ctx.actions.run(
inputs = all_srcs,
outputs = py_msg_files,
executable = generator,
arguments = [args],
)
init_py = ctx.actions.declare_file(
"{}/{}/__init__.py".format(rel_output_dir, extension),
)
args = ctx.actions.args()
args.add("--initpy")
args.add("-o", py_msg_files[0].dirname)
args.add("-p", ros_package_name)
ctx.actions.run(
inputs = py_msg_files,
outputs = [init_py],
executable = generator,
arguments = [args],
)
return py_msg_files + [init_py]
PyRosGeneratorAspectInfo = provider(
"Accumulates Python ROS interfaces.",
fields = [
"transitive_sources",
"imports",
],
)
def _get_list_attr(rule_attr, attr_name):
if not hasattr(rule_attr, attr_name):
return []
candidate = getattr(rule_attr, attr_name)
if type(candidate) != "list":
fail("Expected a list for attribute `{}`!".format(attr_name))
return candidate
def _collect_py_ros_generator_deps(rule_attr, attr_name):
return [
dep
for dep in _get_list_attr(rule_attr, attr_name)
if type(dep) == "Target" and PyRosGeneratorAspectInfo in dep
]
def _merge_py_ros_generator_aspect_infos(py_infos):
return PyRosGeneratorAspectInfo(
transitive_sources = depset(
transitive = [info.transitive_sources for info in py_infos],
),
imports = depset(transitive = [info.imports for info in py_infos]),
)
_PY_ROS_GENERATOR_ATTR_ASPECTS = ["data", "deps"]
def _py_ros_generator_aspect_impl(target, ctx):
py_infos = []
if ctx.rule.kind == "ros_interface_library":
include_flags = _get_include_flags(target, ctx)
all_srcs = _get_all_srcs(target, ctx)
ros_package_name = target.label.name
srcs = target[RosInterfaceInfo].info.srcs
rel_output_dir = ros_package_name
all_py_files = []
msgs = [src for src in srcs if src.extension == "msg"]
py_msg_files = _py_generate(
ctx,
include_flags,
all_srcs,
ros_package_name,
rel_output_dir,
msgs,
)
all_py_files.extend(py_msg_files)
srvs = [src for src in srcs if src.extension == "srv"]
py_srv_files = _py_generate(
ctx,
include_flags,
all_srcs,
ros_package_name,
rel_output_dir,
srvs,
)
all_py_files.extend(py_srv_files)
the_file = all_py_files[0]
relative_path_parts = paths.relativize(
the_file.dirname,
the_file.root.path,
).split("/")
if relative_path_parts[0] == "external":
py_import_path = paths.join(*relative_path_parts[1:-2])
else:
py_import_path = paths.join(
ctx.workspace_name,
*relative_path_parts[0:-2]
)
py_infos = [PyRosGeneratorAspectInfo(
transitive_sources = depset(all_py_files),
imports = depset([py_import_path]),
)]
for attr_name in _PY_ROS_GENERATOR_ATTR_ASPECTS:
for dep in _collect_py_ros_generator_deps(ctx.rule.attr, attr_name):
py_infos.append(dep[PyRosGeneratorAspectInfo])
merged_py_info = _merge_py_ros_generator_aspect_infos(py_infos)
return [merged_py_info]
py_ros_generator_aspect = aspect(
implementation = _py_ros_generator_aspect_impl,
attr_aspects = _PY_ROS_GENERATOR_ATTR_ASPECTS,
attrs = {
"_genmsg_py": attr.label(
default = Label("@ros_genpy//:genmsg_py"),
executable = True,
cfg = "exec",
),
"_gensrv_py": attr.label(
default = Label("@ros_genpy//:gensrv_py"),
executable = True,
cfg = "exec",
),
},
provides = [PyRosGeneratorAspectInfo],
)
def _py_ros_generator_impl(ctx):
py_info = _merge_py_ros_generator_aspect_infos([
dep[PyRosGeneratorAspectInfo]
for dep in ctx.attr.deps
])
return [
DefaultInfo(runfiles = ctx.runfiles(
transitive_files = py_info.transitive_sources,
)),
PyInfo(
transitive_sources = py_info.transitive_sources,
imports = py_info.imports,
),
]
py_ros_generator = rule(
implementation = _py_ros_generator_impl,
output_to_genfiles = True,
attrs = {
"deps": attr.label_list(
mandatory = True,
aspects = [py_ros_generator_aspect],
providers = [RosInterfaceInfo],
),
},
)
def py_ros_interface_library(name, deps, **kwargs):
name_genpy = "{}_genpy".format(name)
py_ros_generator(
name = name_genpy,
deps = deps,
)
py_library(
name = name,
deps = [name_genpy, "@ros_genpy//:genpy"],
**kwargs
)
py_ros_interface_collector = rule(
implementation = _py_ros_generator_impl,
output_to_genfiles = True,
attrs = {
"deps": attr.label_list(
mandatory = True,
aspects = [py_ros_generator_aspect],
),
},
) | 0.336985 | 0.134861 |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'<PASSWORD>': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])
user.is_active = False
user.save()
return user
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class MateriSerializer(serializers.ModelSerializer):
tags = serializers.SlugRelatedField(many=True, read_only=True,slug_field='name')
pengajar = serializers.ReadOnlyField(source='pengajar.nama')
tentang_pengajar = serializers.ReadOnlyField(source='pengajar.tentang_pengajar')
class Meta:
model = Materi
fields = [
'id',
'judul',
'kode',
'rating',
'pendek',
'deskripsi',
'gambar',
'kategori',
'copywrite',
'tags',
'harga',
'discount',
'pengajar',
'tentang_pengajar',
'hidden',
'featured',
'frontpage',
'playlist',
]
class KegiatanSerializer(serializers.ModelSerializer):
pengajar = serializers.ReadOnlyField(source='pengajar.nama')
tentang_pengajar = serializers.ReadOnlyField(source='pengajar.tentang_pengajar')
penyelenggara = serializers.ReadOnlyField(source='penyelenggara.')
judul_materi = serializers.ReadOnlyField(source='materi.judul')
class Meta:
model = Kegiatan
fields = [
'id',
'judul_acara',
'status_acara',
'penyelenggara',
'judul_materi',
'deskripsi',
'pengajar',
'tentang_pengajar',
'rating',
'tanggal_mulai',
'tanggal_selesai',
'url_donasi',
]
class TopicSerializer(serializers.ModelSerializer):
class Meta:
model = Topic
fields = ['materi', 'no_urut', 'judul', 'jenis', 'link', 'isi_tambahan', 'tugas']
class MessageSerializer(serializers.HyperlinkedModelSerializer):
sender = serializers.ReadOnlyField(source='sender.username')
receiver = serializers.ReadOnlyField(source='receiver.username')
class Meta:
model = Message
fields = ['sender', 'receiver', 'msg_content', 'created_at']
class UserDetailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email', 'groups']
class PendaftaranSerializer(serializers.ModelSerializer):
#materi = MateriSerializer(many=False)
class Meta:
model = Pendaftaran
fields = ['materi']
depth = 1
class FavoritSerializer(serializers.ModelSerializer):
materi = MateriSerializer(many=False)
class Meta:
model = Favorit
fields = ['user', 'materi']
class PembayaranSerializer(serializers.ModelSerializer):
class Meta:
model = Pembayaran
fields = ['no_order', 'harga','materi', 'status']
depth = 1
class TugasSerializer(serializers.ModelSerializer):
class Meta:
model = Tugas
fields = ['judul', 'kode','deskripsi', 'nilai_max']
depth = 1
class SoalSerializer(serializers.ModelSerializer):
class Meta:
model = Soal
fields = [
'tugas',
'no_urut',
'tipe',
'judul',
'pertanyaan',
'penjelasan',
'benarsalah',
'multianswer',
'tags',
'jawaban_url',
'jawaban_essay',
'jawaban_a',
'jawaban_b',
'jawaban_c',
'jawaban_d',
'jawaban_e',
'jawaban_f',
'jawaban_g',
'jawaban_h',
'jawaban_1',
'jawaban_2',
'jawaban_3',
'jawaban_4',
'jawaban_5',
'jawaban_6',
'jawaban_7',
'jawaban_8',
]
depth = 1 | edukasi/serializers.py | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'<PASSWORD>': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])
user.is_active = False
user.save()
return user
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class MateriSerializer(serializers.ModelSerializer):
tags = serializers.SlugRelatedField(many=True, read_only=True,slug_field='name')
pengajar = serializers.ReadOnlyField(source='pengajar.nama')
tentang_pengajar = serializers.ReadOnlyField(source='pengajar.tentang_pengajar')
class Meta:
model = Materi
fields = [
'id',
'judul',
'kode',
'rating',
'pendek',
'deskripsi',
'gambar',
'kategori',
'copywrite',
'tags',
'harga',
'discount',
'pengajar',
'tentang_pengajar',
'hidden',
'featured',
'frontpage',
'playlist',
]
class KegiatanSerializer(serializers.ModelSerializer):
pengajar = serializers.ReadOnlyField(source='pengajar.nama')
tentang_pengajar = serializers.ReadOnlyField(source='pengajar.tentang_pengajar')
penyelenggara = serializers.ReadOnlyField(source='penyelenggara.')
judul_materi = serializers.ReadOnlyField(source='materi.judul')
class Meta:
model = Kegiatan
fields = [
'id',
'judul_acara',
'status_acara',
'penyelenggara',
'judul_materi',
'deskripsi',
'pengajar',
'tentang_pengajar',
'rating',
'tanggal_mulai',
'tanggal_selesai',
'url_donasi',
]
class TopicSerializer(serializers.ModelSerializer):
class Meta:
model = Topic
fields = ['materi', 'no_urut', 'judul', 'jenis', 'link', 'isi_tambahan', 'tugas']
class MessageSerializer(serializers.HyperlinkedModelSerializer):
sender = serializers.ReadOnlyField(source='sender.username')
receiver = serializers.ReadOnlyField(source='receiver.username')
class Meta:
model = Message
fields = ['sender', 'receiver', 'msg_content', 'created_at']
class UserDetailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email', 'groups']
class PendaftaranSerializer(serializers.ModelSerializer):
#materi = MateriSerializer(many=False)
class Meta:
model = Pendaftaran
fields = ['materi']
depth = 1
class FavoritSerializer(serializers.ModelSerializer):
materi = MateriSerializer(many=False)
class Meta:
model = Favorit
fields = ['user', 'materi']
class PembayaranSerializer(serializers.ModelSerializer):
class Meta:
model = Pembayaran
fields = ['no_order', 'harga','materi', 'status']
depth = 1
class TugasSerializer(serializers.ModelSerializer):
class Meta:
model = Tugas
fields = ['judul', 'kode','deskripsi', 'nilai_max']
depth = 1
class SoalSerializer(serializers.ModelSerializer):
class Meta:
model = Soal
fields = [
'tugas',
'no_urut',
'tipe',
'judul',
'pertanyaan',
'penjelasan',
'benarsalah',
'multianswer',
'tags',
'jawaban_url',
'jawaban_essay',
'jawaban_a',
'jawaban_b',
'jawaban_c',
'jawaban_d',
'jawaban_e',
'jawaban_f',
'jawaban_g',
'jawaban_h',
'jawaban_1',
'jawaban_2',
'jawaban_3',
'jawaban_4',
'jawaban_5',
'jawaban_6',
'jawaban_7',
'jawaban_8',
]
depth = 1 | 0.410166 | 0.120905 |
from copy import deepcopy
from functools import lru_cache
s1 = {'a', 'b', 'c'}
s2 = frozenset('abc') # Hashable as long as all elements are hashable
print(hash(s2))
s2 = {frozenset({'a', 'b'}), frozenset({1, 2, 3})}
# Copy frozenset
t1 = (1, 2, [3, 4])
t2 = tuple(t1)
print(id(t1), id(t2)) # same
l1 = [1, 2, 3]
l2 = l1.copy()
print(id(l1), id(l2)) # different
s1 = {1, 2, 3}
s2 = set(s1)
print(s1 is s2) # False
s1 = frozenset([1, 2, 3])
s2 = frozenset(s1)
print(s1 is s2) # True
s2 = deepcopy(s1)
print(s1 is s2) # False
# Set operations
s1 = frozenset('ab')
s2 = {1, 2}
s3 = s1 | s2 # Type follow the type of first operand
print(s3)
s4 = s2 | s1
print(s4)
# Equality, Identity
s1 = {1, 2}
s2 = set(s1)
print(s1 is s2)
print(s1 == s2)
class Person:
def __init__(self, name, age):
self._name = name
self._age = age
def __repr__(self):
return f'Person(name={self._name}, age={self._age}'
@property
def name(self):
return self._name
@property
def age(self):
return self._age
def key(self):
return frozenset({self.name, self.age})
p1 = Person('John', 78)
p2 = Person('Eric', 75)
d = {
p1.key(): p1,
p2.key(): p2
}
print(d[frozenset(['John', 78])])
# Use case: Memoization
# Drawback of lru_cache
@lru_cache()
def my_func(*, a, b):
print('calculating a+b...')
return a+b
print(my_func(a='a', b='b'))
print(my_func(a='a', b='b'))
print(my_func(a='a', b='b'))
# Rewrite lru_cache
def memoizer(fn):
cache = {}
def inner(*args, **kwargs):
key = (*args, frozenset(kwargs.items()))
if key in cache:
return cache[key]
else:
result = fn(*args, **kwargs)
cache[key] = result
return result
return inner
@memoizer
def my_func(*, a, b):
print('calculating a + b...')
return a+b
print(my_func(a=1, b=2))
print(my_func(b=2, a=1))
# Rewrite memoization with key as frozenset
# Use when order is NOT matter
def memoizer(fn):
cache = {}
def inner(*args, **kwargs):
key = frozenset(args) | frozenset(kwargs.items())
if key in cache:
return cache[key]
else:
result = fn(*args, **kwargs)
cache[key] = result
return result
return inner
@memoizer
def adder(*args):
print('calculating...')
return sum(args)
print(adder(1, 2, 3))
print(adder(2, 1, 3))
print(adder(3, 2, 1)) | part-3/2-sets/5-frozensets.py | from copy import deepcopy
from functools import lru_cache
s1 = {'a', 'b', 'c'}
s2 = frozenset('abc') # Hashable as long as all elements are hashable
print(hash(s2))
s2 = {frozenset({'a', 'b'}), frozenset({1, 2, 3})}
# Copy frozenset
t1 = (1, 2, [3, 4])
t2 = tuple(t1)
print(id(t1), id(t2)) # same
l1 = [1, 2, 3]
l2 = l1.copy()
print(id(l1), id(l2)) # different
s1 = {1, 2, 3}
s2 = set(s1)
print(s1 is s2) # False
s1 = frozenset([1, 2, 3])
s2 = frozenset(s1)
print(s1 is s2) # True
s2 = deepcopy(s1)
print(s1 is s2) # False
# Set operations
s1 = frozenset('ab')
s2 = {1, 2}
s3 = s1 | s2 # Type follow the type of first operand
print(s3)
s4 = s2 | s1
print(s4)
# Equality, Identity
s1 = {1, 2}
s2 = set(s1)
print(s1 is s2)
print(s1 == s2)
class Person:
def __init__(self, name, age):
self._name = name
self._age = age
def __repr__(self):
return f'Person(name={self._name}, age={self._age}'
@property
def name(self):
return self._name
@property
def age(self):
return self._age
def key(self):
return frozenset({self.name, self.age})
p1 = Person('John', 78)
p2 = Person('Eric', 75)
d = {
p1.key(): p1,
p2.key(): p2
}
print(d[frozenset(['John', 78])])
# Use case: Memoization
# Drawback of lru_cache
@lru_cache()
def my_func(*, a, b):
print('calculating a+b...')
return a+b
print(my_func(a='a', b='b'))
print(my_func(a='a', b='b'))
print(my_func(a='a', b='b'))
# Rewrite lru_cache
def memoizer(fn):
cache = {}
def inner(*args, **kwargs):
key = (*args, frozenset(kwargs.items()))
if key in cache:
return cache[key]
else:
result = fn(*args, **kwargs)
cache[key] = result
return result
return inner
@memoizer
def my_func(*, a, b):
print('calculating a + b...')
return a+b
print(my_func(a=1, b=2))
print(my_func(b=2, a=1))
# Rewrite memoization with key as frozenset
# Use when order is NOT matter
def memoizer(fn):
cache = {}
def inner(*args, **kwargs):
key = frozenset(args) | frozenset(kwargs.items())
if key in cache:
return cache[key]
else:
result = fn(*args, **kwargs)
cache[key] = result
return result
return inner
@memoizer
def adder(*args):
print('calculating...')
return sum(args)
print(adder(1, 2, 3))
print(adder(2, 1, 3))
print(adder(3, 2, 1)) | 0.605916 | 0.309128 |
# Check SEM's ability to stay in the neighborhood of the (label) truth
# when initialized at the (label) truth.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import PCA
from Network import Network
from Models import StationaryLogistic, NonstationaryLogistic, Blockmodel
from Models import alpha_zero, alpha_norm
from Experiment import minimum_disagreement
# Parameters
N = 20
theta = 3.0
alpha_sd = 2.0
from_truth = True
steps = 100
# Set random seed for reproducible outputs
np.random.seed(137)
net = Network(N)
net.new_node_covariate('value').from_pairs(net.names, [0]*(N/2) + [1]*(N/2))
for v_1, v_2, name in [(0, 0, 'll'),
(1, 1, 'rr'),
(0, 1, 'lr')]:
def f_x(i_1, i_2):
return ((net.node_covariates['value'][i_1] == v_1) and
(net.node_covariates['value'][i_2] == v_2))
net.new_edge_covariate(name).from_binary_function_ind(f_x)
def f_x(i_1, i_2):
return np.random.uniform(-np.sqrt(3), np.sqrt(3))
net.new_edge_covariate('x').from_binary_function_ind(f_x)
data_model = NonstationaryLogistic()
data_model.beta['x'] = theta
for name, block_theta in [('ll', 4.0),
('rr', 3.0),
('lr', -2.0)]:
data_model.beta[name] = block_theta
alpha_norm(net, alpha_sd)
data_model.match_kappa(net, ('row_sum', 2))
net.generate(data_model)
net.show_heatmap()
net.offset_extremes()
fit_base_model = NonstationaryLogistic()
fit_base_model.beta['x'] = None
fit_model = Blockmodel(fit_base_model, 2)
#fit_model.base_model.fit = fit_model.base_model.fit_conditional
# Initialize block assignments
net.new_node_covariate_int('z')
if from_truth:
net.node_covariates['z'][:] = net.node_covariates['value'][:]
else:
net.node_covariates['z'][:] = np.random.random(N) < 0.5
# Calculate NLL at initialized block assignments
fit_model.fit_sem(net, cycles = 1, sweeps = 0,
use_best = False, store_all = True)
baseline_nll = fit_model.sem_trace[0][0]
nll_trace = []
z_trace = np.empty((steps,N))
disagreement_trace = []
theta_trace = []
for step in range(steps):
print step
fit_model.fit_sem(net, 1, 2, store_all = True)
#fit_model.fit_kl(net, 1)
nll_trace.append(fit_model.nll(net))
z_trace[step,:] = net.node_covariates['z'][:]
disagreement = minimum_disagreement(net.node_covariates['value'][:],
net.node_covariates['z'][:])
disagreement_trace.append(disagreement)
theta_trace.append(fit_model.base_model.beta['x'])
# Eliminate symmetry of 'z'
for step in range(steps):
if np.mean(z_trace[step,:]) < 0.5:
z_trace[step,:] = 1 - z_trace[step,:]
z_trace += np.random.normal(0, 0.01, (steps, N))
nll_trace = np.array(nll_trace)
nll_trace -= baseline_nll
disagreement_trace = np.array(disagreement_trace)
plt.figure()
plt.plot(np.arange(steps), theta_trace)
plt.xlabel('step')
plt.ylabel('theta')
plt.figure()
plt.plot(np.arange(steps), nll_trace)
plt.xlabel('step')
plt.ylabel('NLL')
plt.figure()
plt.plot(np.arange(steps), disagreement_trace)
plt.xlabel('step')
plt.ylabel('normalized disagreement')
plt.figure()
nll_trimmed = nll_trace[nll_trace <= np.percentile(nll_trace, 90)]
plt.hist(nll_trimmed, bins = 50)
plt.xlabel('NLL')
plt.title('Trimmed histogram of NLL')
try:
pca = PCA(z_trace)
plt.figure()
plt.plot(np.arange(steps), pca.Y[:,0])
plt.xlabel('step')
plt.ylabel('z (PC1)')
plt.figure()
plt.subplot(211)
plt.plot(pca.Y[:,0], nll_trace, '.')
plt.xlabel('z (PC1)')
plt.ylabel('NLL')
plt.subplot(212)
plt.plot(pca.Y[:,1], nll_trace, '.')
plt.xlabel('z (PC2)')
plt.ylabel('NLL')
plt.figure()
plt.subplot(211)
plt.plot(pca.Y[:,0], disagreement_trace, '.')
plt.xlabel('z (PC1)')
plt.ylabel('normalized disagreement')
plt.subplot(212)
plt.plot(pca.Y[:,1], disagreement_trace, '.')
plt.xlabel('z (PC2)')
plt.ylabel('normalized_disagreement')
plt.figure()
plt.plot(pca.Y[:,0], pca.Y[:,1])
plt.xlabel('z (PC1)')
plt.ylabel('z (PC2)')
except:
print 'PCA failed; maybe no variation in z or steps < N?'
plt.show() | minitest_gibbs.py |
# Check SEM's ability to stay in the neighborhood of the (label) truth
# when initialized at the (label) truth.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import PCA
from Network import Network
from Models import StationaryLogistic, NonstationaryLogistic, Blockmodel
from Models import alpha_zero, alpha_norm
from Experiment import minimum_disagreement
# Parameters
N = 20
theta = 3.0
alpha_sd = 2.0
from_truth = True
steps = 100
# Set random seed for reproducible outputs
np.random.seed(137)
net = Network(N)
net.new_node_covariate('value').from_pairs(net.names, [0]*(N/2) + [1]*(N/2))
for v_1, v_2, name in [(0, 0, 'll'),
(1, 1, 'rr'),
(0, 1, 'lr')]:
def f_x(i_1, i_2):
return ((net.node_covariates['value'][i_1] == v_1) and
(net.node_covariates['value'][i_2] == v_2))
net.new_edge_covariate(name).from_binary_function_ind(f_x)
def f_x(i_1, i_2):
return np.random.uniform(-np.sqrt(3), np.sqrt(3))
net.new_edge_covariate('x').from_binary_function_ind(f_x)
data_model = NonstationaryLogistic()
data_model.beta['x'] = theta
for name, block_theta in [('ll', 4.0),
('rr', 3.0),
('lr', -2.0)]:
data_model.beta[name] = block_theta
alpha_norm(net, alpha_sd)
data_model.match_kappa(net, ('row_sum', 2))
net.generate(data_model)
net.show_heatmap()
net.offset_extremes()
fit_base_model = NonstationaryLogistic()
fit_base_model.beta['x'] = None
fit_model = Blockmodel(fit_base_model, 2)
#fit_model.base_model.fit = fit_model.base_model.fit_conditional
# Initialize block assignments
net.new_node_covariate_int('z')
if from_truth:
net.node_covariates['z'][:] = net.node_covariates['value'][:]
else:
net.node_covariates['z'][:] = np.random.random(N) < 0.5
# Calculate NLL at initialized block assignments
fit_model.fit_sem(net, cycles = 1, sweeps = 0,
use_best = False, store_all = True)
baseline_nll = fit_model.sem_trace[0][0]
nll_trace = []
z_trace = np.empty((steps,N))
disagreement_trace = []
theta_trace = []
for step in range(steps):
print step
fit_model.fit_sem(net, 1, 2, store_all = True)
#fit_model.fit_kl(net, 1)
nll_trace.append(fit_model.nll(net))
z_trace[step,:] = net.node_covariates['z'][:]
disagreement = minimum_disagreement(net.node_covariates['value'][:],
net.node_covariates['z'][:])
disagreement_trace.append(disagreement)
theta_trace.append(fit_model.base_model.beta['x'])
# Eliminate symmetry of 'z'
for step in range(steps):
if np.mean(z_trace[step,:]) < 0.5:
z_trace[step,:] = 1 - z_trace[step,:]
z_trace += np.random.normal(0, 0.01, (steps, N))
nll_trace = np.array(nll_trace)
nll_trace -= baseline_nll
disagreement_trace = np.array(disagreement_trace)
plt.figure()
plt.plot(np.arange(steps), theta_trace)
plt.xlabel('step')
plt.ylabel('theta')
plt.figure()
plt.plot(np.arange(steps), nll_trace)
plt.xlabel('step')
plt.ylabel('NLL')
plt.figure()
plt.plot(np.arange(steps), disagreement_trace)
plt.xlabel('step')
plt.ylabel('normalized disagreement')
plt.figure()
nll_trimmed = nll_trace[nll_trace <= np.percentile(nll_trace, 90)]
plt.hist(nll_trimmed, bins = 50)
plt.xlabel('NLL')
plt.title('Trimmed histogram of NLL')
try:
pca = PCA(z_trace)
plt.figure()
plt.plot(np.arange(steps), pca.Y[:,0])
plt.xlabel('step')
plt.ylabel('z (PC1)')
plt.figure()
plt.subplot(211)
plt.plot(pca.Y[:,0], nll_trace, '.')
plt.xlabel('z (PC1)')
plt.ylabel('NLL')
plt.subplot(212)
plt.plot(pca.Y[:,1], nll_trace, '.')
plt.xlabel('z (PC2)')
plt.ylabel('NLL')
plt.figure()
plt.subplot(211)
plt.plot(pca.Y[:,0], disagreement_trace, '.')
plt.xlabel('z (PC1)')
plt.ylabel('normalized disagreement')
plt.subplot(212)
plt.plot(pca.Y[:,1], disagreement_trace, '.')
plt.xlabel('z (PC2)')
plt.ylabel('normalized_disagreement')
plt.figure()
plt.plot(pca.Y[:,0], pca.Y[:,1])
plt.xlabel('z (PC1)')
plt.ylabel('z (PC2)')
except:
print 'PCA failed; maybe no variation in z or steps < N?'
plt.show() | 0.766687 | 0.676847 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('imagestore_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['imagestore.Category'])),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=200, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('order', self.gf('django.db.models.fields.IntegerField')()),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=False)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('imagestore', ['Category'])
# Adding model 'Image'
db.create_table('imagestore_image', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=200, null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('tags', self.gf('tagging.fields.TagField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['imagestore.Category'])),
('order', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100)),
))
db.send_create_signal('imagestore', ['Image'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('imagestore_category')
# Deleting model 'Image'
db.delete_table('imagestore_image')
models = {
'imagestore.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['imagestore.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'imagestore.image': {
'Meta': {'object_name': 'Image'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['imagestore.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['imagestore'] | imagestore/migrations/0001_initial.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('imagestore_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['imagestore.Category'])),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=200, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('order', self.gf('django.db.models.fields.IntegerField')()),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=False)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('imagestore', ['Category'])
# Adding model 'Image'
db.create_table('imagestore_image', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=200, null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('tags', self.gf('tagging.fields.TagField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['imagestore.Category'])),
('order', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100)),
))
db.send_create_signal('imagestore', ['Image'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('imagestore_category')
# Deleting model 'Image'
db.delete_table('imagestore_image')
models = {
'imagestore.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['imagestore.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'imagestore.image': {
'Meta': {'object_name': 'Image'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['imagestore.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['imagestore'] | 0.471467 | 0.104249 |
import asynctest
import unittest.mock
import os.path
from livebridge import LiveBridge, config
class RunTests(asynctest.TestCase):
async def test_run_with_loop(self):
self.loop.run_until_complete = asynctest.CoroutineMock(return_value=True)
control_file = os.path.join(os.path.dirname(__file__), "files", "control.yaml")
db_connector = asynctest.MagicMock()
db_connector.setup = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("livebridge.components.get_db_client") as mocked_db_client:
mocked_db_client.return_value = db_connector
with asynctest.patch("livebridge.controller.Controller") as mocked_controller:
mocked_controller.run = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("asyncio.ensure_future") as mocked_ensure:
mocked_ensure.return_value = True
from livebridge.run import main
livebridge = main(loop=self.loop, control=control_file)
assert type(livebridge) is LiveBridge
async def test_run_with_args(self):
with unittest.mock.patch("argparse.ArgumentParser.parse_args") as patched:
patched.side_effect = [Exception()]
with self.assertRaises(Exception):
from livebridge.run import main
main(loop=self.loop)
async def test_run(self):
self.loop.run_forever = asynctest.CoroutineMock(return_value=True)
self.loop.run_until_complete = asynctest.CoroutineMock(return_value=True)
self.loop.close = asynctest.CoroutineMock(return_value=True)
control_file = os.path.join(os.path.dirname(__file__), "files", "control.yaml")
db_connector = asynctest.MagicMock()
db_connector.setup = asynctest.CoroutineMock(return_value=True)
web_config = {"host": "0.0.0.0", "port": 9090}
web_server = asynctest.MagicMock(spec="livebridge.web.WebApi")
web_server.shutdown = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("livebridge.loader.load_extensions") as mocked_loader:
mocked_loader.return_value = None
with asynctest.patch("livebridge.config.CONTROLFILE") as mocked_config:
mocked_config.return_value = control_file
with asynctest.patch("livebridge.web.WebApi") as mocked_server:
mocked_server.return_value = web_server
with asynctest.patch("livebridge.config.WEB") as mocked_web_config:
mocked_web_config.return_value = web_config
with asynctest.patch("livebridge.components.get_db_client") as mocked_db_client:
mocked_db_client.return_value = db_connector
with asynctest.patch("livebridge.controller.Controller") as mocked_controller:
mocked_controller.run = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("asyncio.get_event_loop") as patched:
patched.return_value = self.loop
with asynctest.patch("asyncio.ensure_future") as mocked_ensure:
mocked_ensure.return_value = True
with asynctest.patch("livebridge.LiveBridge.finish"):
print(self.loop.run_forever.call_count)
from livebridge.run import main
main()
assert self.loop.run_forever.call_count == 1
assert self.loop.close.call_count == 1
assert web_server.shutdown.call_count == 1
class ArgsTests(asynctest.TestCase):
async def test_read_args_file(self):
self.control_file = os.path.join(os.path.dirname(__file__), "files", "control.yaml")
config.CONTROLFILE = self.control_file
from livebridge.run import read_args
args = read_args()
assert args.control == self.control_file
config.CONTROLFILE = None
@asynctest.fail_on(unused_loop=False)
def test_read_args_kwargs(self):
from livebridge.run import read_args
args = read_args(**{"control": "foobaz"})
assert args.control == "foobaz"
@asynctest.fail_on(unused_loop=False)
def test_read_args_sql(self):
from livebridge.run import read_args
config.DB["control_table_name"] = "foobaz"
config.AWS["control_table_name"] = "foobaz"
args = read_args()
assert args.control == "sql"
config.DB["control_table_name"] = None
config.AWS["control_table_name"] = None
@asynctest.fail_on(unused_loop=False)
def test_read_args_dynamo(self):
from livebridge.run import read_args
config.AWS["control_table_name"] = "foobaz"
args = read_args()
assert args.control == "dynamodb"
config.AWS["control_table_name"] = None | tests/test_run.py | import asynctest
import unittest.mock
import os.path
from livebridge import LiveBridge, config
class RunTests(asynctest.TestCase):
async def test_run_with_loop(self):
self.loop.run_until_complete = asynctest.CoroutineMock(return_value=True)
control_file = os.path.join(os.path.dirname(__file__), "files", "control.yaml")
db_connector = asynctest.MagicMock()
db_connector.setup = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("livebridge.components.get_db_client") as mocked_db_client:
mocked_db_client.return_value = db_connector
with asynctest.patch("livebridge.controller.Controller") as mocked_controller:
mocked_controller.run = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("asyncio.ensure_future") as mocked_ensure:
mocked_ensure.return_value = True
from livebridge.run import main
livebridge = main(loop=self.loop, control=control_file)
assert type(livebridge) is LiveBridge
async def test_run_with_args(self):
with unittest.mock.patch("argparse.ArgumentParser.parse_args") as patched:
patched.side_effect = [Exception()]
with self.assertRaises(Exception):
from livebridge.run import main
main(loop=self.loop)
async def test_run(self):
self.loop.run_forever = asynctest.CoroutineMock(return_value=True)
self.loop.run_until_complete = asynctest.CoroutineMock(return_value=True)
self.loop.close = asynctest.CoroutineMock(return_value=True)
control_file = os.path.join(os.path.dirname(__file__), "files", "control.yaml")
db_connector = asynctest.MagicMock()
db_connector.setup = asynctest.CoroutineMock(return_value=True)
web_config = {"host": "0.0.0.0", "port": 9090}
web_server = asynctest.MagicMock(spec="livebridge.web.WebApi")
web_server.shutdown = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("livebridge.loader.load_extensions") as mocked_loader:
mocked_loader.return_value = None
with asynctest.patch("livebridge.config.CONTROLFILE") as mocked_config:
mocked_config.return_value = control_file
with asynctest.patch("livebridge.web.WebApi") as mocked_server:
mocked_server.return_value = web_server
with asynctest.patch("livebridge.config.WEB") as mocked_web_config:
mocked_web_config.return_value = web_config
with asynctest.patch("livebridge.components.get_db_client") as mocked_db_client:
mocked_db_client.return_value = db_connector
with asynctest.patch("livebridge.controller.Controller") as mocked_controller:
mocked_controller.run = asynctest.CoroutineMock(return_value=True)
with asynctest.patch("asyncio.get_event_loop") as patched:
patched.return_value = self.loop
with asynctest.patch("asyncio.ensure_future") as mocked_ensure:
mocked_ensure.return_value = True
with asynctest.patch("livebridge.LiveBridge.finish"):
print(self.loop.run_forever.call_count)
from livebridge.run import main
main()
assert self.loop.run_forever.call_count == 1
assert self.loop.close.call_count == 1
assert web_server.shutdown.call_count == 1
class ArgsTests(asynctest.TestCase):
async def test_read_args_file(self):
self.control_file = os.path.join(os.path.dirname(__file__), "files", "control.yaml")
config.CONTROLFILE = self.control_file
from livebridge.run import read_args
args = read_args()
assert args.control == self.control_file
config.CONTROLFILE = None
@asynctest.fail_on(unused_loop=False)
def test_read_args_kwargs(self):
from livebridge.run import read_args
args = read_args(**{"control": "foobaz"})
assert args.control == "foobaz"
@asynctest.fail_on(unused_loop=False)
def test_read_args_sql(self):
from livebridge.run import read_args
config.DB["control_table_name"] = "foobaz"
config.AWS["control_table_name"] = "foobaz"
args = read_args()
assert args.control == "sql"
config.DB["control_table_name"] = None
config.AWS["control_table_name"] = None
@asynctest.fail_on(unused_loop=False)
def test_read_args_dynamo(self):
from livebridge.run import read_args
config.AWS["control_table_name"] = "foobaz"
args = read_args()
assert args.control == "dynamodb"
config.AWS["control_table_name"] = None | 0.365796 | 0.277216 |
import os
import copy
import thornpy
from . import TMPLT_ENV
from .utilities import read_TO_file, get_cdb_path, get_full_path
class DrillSolverSettings():
"""Creates an object with all data necessary to write an Adams Drill solver settings (.ssf) file.
Note
----
The static funnel is stored as a :obj:`list` of :obj:`list`s in the 'Funnel' entry of the :attr:`parameters` attribute.
Examples
--------
This example reads a :class:`DrillSolverSettings` object from a file and prints `Maxit` from all the steps in the static funnel.
>>> ssf = DrillSolverSettings.read_from_file('example.ssf')
>>> maxit = ssf.parameters['Funnel'][0]
>>> print(maxit)
[500, 500, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 100]
Attributes
----------
name : str
Name of the solver settings object
parameters : dict
Dictionary of parameters that make up an Adams Drill solver settings and would be found in an Adams Drill solver settings file (.ssf). The keys of the dictionary are the parameter names that would be seen in the string file and the values of the dictionary are the values that would be seen in the string file.
filename : str
Name of the solver settings file (.ssf) in which these solver settings are stored. This attribute is initially empty and is populated by the `write_to_file()` method.
"""
_SCALAR_PARAMETERS = [
'Integrator',
'Formulation',
'Corrector',
'Error',
'HMax',
'Alpha',
'Thread_Count'
]
_DEFAULT_PARAMETER_SCALARS = {
'Integrator': 'HHT',
'Formulation': 'I3',
'Corrector': 'Modified',
'Error': 0.00001,
'HMax': 0.005,
'Alpha': -0.25,
'Thread_Count': 4
}
_TABLE_PARAMETERS = [
'Funnel'
]
_DEFAULT_PARAMETER_TABLES = {
'Funnel': [
[500, 500, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 100],
[0.1, 5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.1, 1, 0.3, 0.3, 0.2, 0.2, 0.1, 0.1, 0.05, 0.05, 0.01, 0.01, 0.005, 0.005, 0.005, 0.005],
[0.1, 1, 0.3, 0.2, 0.2, 0.1, 0.1, 0.05, 0.05, 0.01, 0.01, 0.005, 0.005, 0.001, 0.0005, 0.005],
[1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
_CDB_TABLE = 'solver_settings.tbl'
_EXT = 'ssf'
def __init__(self, name, **kwargs):
"""Initializes the :class:`DrillSolverSettings` object.
Parameters
----------
name : str
Name of the solver settings.
"""
self.name = name
self.parameters = kwargs
# Apply default parameters from Class Variable
self._apply_defaults()
# Initialize filename instance variable
self.filename = ''
def add_funnel_step(self, maxit, stab, error, imbal, tlim, alim, clear_existing=False):
"""Adds a ramp to the specified ramp parameter.
Parameters
----------
maxit : int
Specifies the maximum number of iterations allowed for finding static equilibrium
stab : float
Specifies the fraction of the mass and damping matrices (subsets of the equilibrium Jacobian matrix) Adams Solver (C++) adds to the stiffness matrix (a subset of the equilibrium Jacobian matrix) during static simulations performed using static analyses.
error : float
Specifies the relative correction convergence threshold.
imbal : float
Specifies the equation imbalance convergence threshold.
tlim : float
Specifies the maximum translational increment allowed per iteration.
alim : float
Specifies the maximum angular increment allowed per iteration during a static or quasi-static equilibrium analysis.
"""
if clear_existing:
self.parameters['Funnel'] = [[], [], [], [], [], []]
for i, param in enumerate([int(maxit), stab, error, imbal, tlim, alim]):
self.parameters['Funnel'][i].append(param)
self.parameters['_Funnel'] = zip(*self.parameters['Funnel'])
def write_to_file(self, filename, directory=None, cdb=None):
"""Creates a solver settings file from the DrillSolverSettings object.
Parameters
----------
filename : str
Name of the file to write.
directory : str
Directory in which to write the file. (default is None which means it is written to the current working directory.
cdb : str
Name of the cdb in which to write the file. This argument overrides `directory`.
Raises
------
ValueError
Raised if not all parameters have been defined.
"""
# Raise an error if the parameters can't be validated
if not self.validate():
raise ValueError('The parameters could not be validated.')
if directory is not None:
# If the write_directory argument is passed, strip the filename of
# it's path and extension
filename = os.path.split(filename)[-1].replace(f'.{self._EXT}','')
# Set the filepath to the filename in the given directory
filepath = os.path.join(directory, filename + f'.{self._EXT}')
elif cdb is not None:
# If the write_directory argument is not passed, but the cdb
# argument is, strip the filename of it's path and extension
filename = os.path.split(filename)[-1].replace(f'.{self._EXT}','')
# Set the filepath to the file in the cdb
filepath = get_full_path(os.path.join(cdb, self._CDB_TABLE, filename + f'.{self._EXT}'))
elif filename is not None:
# If Nothing but a filename is given, set that as the full path
filepath = thornpy.utilities.convert_path(filename.replace(f'.{self._EXT}',''))
else:
# If nothing is given, raise an error
raise ValueError('One of the following must key work arguments must be defined: write_directory, filename, cdb')
# Get the jinja2 template for a solver settings file
ssf_template = TMPLT_ENV.from_string(open(os.path.join(os.path.dirname(__file__), 'templates', f'template.{self._EXT}')).read())
# Write the solver settings file
with open(filepath, 'w') as fid:
fid.write(ssf_template.render(self.parameters))
# Update the instance's filename attribute
self.filename = get_cdb_path(filepath)
# Return the name of the file that was written
return self.filename
def validate(self):
"""
Determines if all parameters have been set
Returns
-------
bool
True if all parameters have been set. Otherwise False.
"""
validated = True
# Check that all parameters exist in the self.parameters dictionary
for param_name in self._SCALAR_PARAMETERS:
if param_name not in self.parameters:
validated = False
for param_name in self._TABLE_PARAMETERS:
if not all([elem for elem in self.parameters[param_name]]):
validated = False
return validated
@classmethod
def read_from_file(cls, filename):
"""Reads a string file and returns a DrillString object with DrillString.parameters based on data in the string file.
Parameters
----------
filename : str
Filename of a drill string (.str) file.
Returns
-------
DrillSolverSettings
:class:`DrillSolverSettings` object with parameters from the passed solver settings file.
"""
# Read the TO data into a dictionary
tiem_orbit_data = read_TO_file(get_full_path(filename))
drill_solver_settings = cls('')
# Extract the DrillString parameters from the TO dictionary
drill_solver_settings._get_params_from_TO_data(tiem_orbit_data) #pylint: disable=protected-access
# Set the filename attribute
drill_solver_settings.filename = filename
return drill_solver_settings
def _apply_defaults(self):
"""
Applies defaults from class variables
"""
# Applies normal parameter defaults
for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():
if scalar_parameter not in self.parameters:
self.parameters[scalar_parameter] = copy.copy(value)
# Applies defaults to all ramp parameters
for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():
self.parameters[table_parameter] = {}
self.parameters[table_parameter] = list(table)
self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])
def _get_params_from_TO_data(self, tiem_orbit_data): #pylint: disable=invalid-name
"""Reads the solver settings parameters out of a dictoinary of Tiem Orbit data generated by :meth:`adamspy.adripy.utilities.read_TO_file`.
Parameters
----------
tiem_orbit_data : dict
:obj:`dict` of Tiem Orbit data
Raises
------
ValueError
A solver settings parameter could not be found
"""
for param in self._TABLE_PARAMETERS:
# For each parameter initialize a found flag
found = False
if param.lower() == 'funnel':
for i, par in enumerate(['maxit', 'stability', 'error', 'imbalance', 'tlimit', 'alimit']):
self.parameters[param][i] = tiem_orbit_data['STATICS']['FUNNEL'][par]
self.parameters['_' + param] = zip(*self.parameters[param])
found = True
# Raise a value error if the parameter isn't found.
if not found:
raise ValueError(f'{param} not found!')
for param in self._SCALAR_PARAMETERS:
# For each parameter initialize a found flag
found = False
for block in tiem_orbit_data:
# For each block in the TO file
if block !='STATICS' and param.lower() in tiem_orbit_data[block]:
# If the parameter is in this block, set the parameter and break the loop
self.parameters[param] = tiem_orbit_data[block][param.lower()]
found = True
break
elif block != 'STATICS':
# If the parameter is not in this block, find all the sub blocks
# and look for the parameter inside each sub block
sub_blocks = [header for header in tiem_orbit_data[block] if isinstance(tiem_orbit_data[block][header], dict)]
for sub_block in sub_blocks:
# For each sub_block in the block
if param.lower() in [p.lower() for p in tiem_orbit_data[block][sub_block]]:
# If the parameter is in the sub block, set the parameter and break the loop
self.parameters[param] = tiem_orbit_data[block][sub_block][param.lower()]
found = True
break
if found:
break
# Raise a value error if the parameter isn't found.
if not found:
raise ValueError(f'{param} not found!') | adamspy/adripy/solver_settings.py | import os
import copy
import thornpy
from . import TMPLT_ENV
from .utilities import read_TO_file, get_cdb_path, get_full_path
class DrillSolverSettings():
"""Creates an object with all data necessary to write an Adams Drill solver settings (.ssf) file.
Note
----
The static funnel is stored as a :obj:`list` of :obj:`list`s in the 'Funnel' entry of the :attr:`parameters` attribute.
Examples
--------
This example reads a :class:`DrillSolverSettings` object from a file and prints `Maxit` from all the steps in the static funnel.
>>> ssf = DrillSolverSettings.read_from_file('example.ssf')
>>> maxit = ssf.parameters['Funnel'][0]
>>> print(maxit)
[500, 500, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 100]
Attributes
----------
name : str
Name of the solver settings object
parameters : dict
Dictionary of parameters that make up an Adams Drill solver settings and would be found in an Adams Drill solver settings file (.ssf). The keys of the dictionary are the parameter names that would be seen in the string file and the values of the dictionary are the values that would be seen in the string file.
filename : str
Name of the solver settings file (.ssf) in which these solver settings are stored. This attribute is initially empty and is populated by the `write_to_file()` method.
"""
_SCALAR_PARAMETERS = [
'Integrator',
'Formulation',
'Corrector',
'Error',
'HMax',
'Alpha',
'Thread_Count'
]
_DEFAULT_PARAMETER_SCALARS = {
'Integrator': 'HHT',
'Formulation': 'I3',
'Corrector': 'Modified',
'Error': 0.00001,
'HMax': 0.005,
'Alpha': -0.25,
'Thread_Count': 4
}
_TABLE_PARAMETERS = [
'Funnel'
]
_DEFAULT_PARAMETER_TABLES = {
'Funnel': [
[500, 500, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 100],
[0.1, 5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.1, 1, 0.3, 0.3, 0.2, 0.2, 0.1, 0.1, 0.05, 0.05, 0.01, 0.01, 0.005, 0.005, 0.005, 0.005],
[0.1, 1, 0.3, 0.2, 0.2, 0.1, 0.1, 0.05, 0.05, 0.01, 0.01, 0.005, 0.005, 0.001, 0.0005, 0.005],
[1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
_CDB_TABLE = 'solver_settings.tbl'
_EXT = 'ssf'
def __init__(self, name, **kwargs):
"""Initializes the :class:`DrillSolverSettings` object.
Parameters
----------
name : str
Name of the solver settings.
"""
self.name = name
self.parameters = kwargs
# Apply default parameters from Class Variable
self._apply_defaults()
# Initialize filename instance variable
self.filename = ''
def add_funnel_step(self, maxit, stab, error, imbal, tlim, alim, clear_existing=False):
"""Adds a ramp to the specified ramp parameter.
Parameters
----------
maxit : int
Specifies the maximum number of iterations allowed for finding static equilibrium
stab : float
Specifies the fraction of the mass and damping matrices (subsets of the equilibrium Jacobian matrix) Adams Solver (C++) adds to the stiffness matrix (a subset of the equilibrium Jacobian matrix) during static simulations performed using static analyses.
error : float
Specifies the relative correction convergence threshold.
imbal : float
Specifies the equation imbalance convergence threshold.
tlim : float
Specifies the maximum translational increment allowed per iteration.
alim : float
Specifies the maximum angular increment allowed per iteration during a static or quasi-static equilibrium analysis.
"""
if clear_existing:
self.parameters['Funnel'] = [[], [], [], [], [], []]
for i, param in enumerate([int(maxit), stab, error, imbal, tlim, alim]):
self.parameters['Funnel'][i].append(param)
self.parameters['_Funnel'] = zip(*self.parameters['Funnel'])
def write_to_file(self, filename, directory=None, cdb=None):
"""Creates a solver settings file from the DrillSolverSettings object.
Parameters
----------
filename : str
Name of the file to write.
directory : str
Directory in which to write the file. (default is None which means it is written to the current working directory.
cdb : str
Name of the cdb in which to write the file. This argument overrides `directory`.
Raises
------
ValueError
Raised if not all parameters have been defined.
"""
# Raise an error if the parameters can't be validated
if not self.validate():
raise ValueError('The parameters could not be validated.')
if directory is not None:
# If the write_directory argument is passed, strip the filename of
# it's path and extension
filename = os.path.split(filename)[-1].replace(f'.{self._EXT}','')
# Set the filepath to the filename in the given directory
filepath = os.path.join(directory, filename + f'.{self._EXT}')
elif cdb is not None:
# If the write_directory argument is not passed, but the cdb
# argument is, strip the filename of it's path and extension
filename = os.path.split(filename)[-1].replace(f'.{self._EXT}','')
# Set the filepath to the file in the cdb
filepath = get_full_path(os.path.join(cdb, self._CDB_TABLE, filename + f'.{self._EXT}'))
elif filename is not None:
# If Nothing but a filename is given, set that as the full path
filepath = thornpy.utilities.convert_path(filename.replace(f'.{self._EXT}',''))
else:
# If nothing is given, raise an error
raise ValueError('One of the following must key work arguments must be defined: write_directory, filename, cdb')
# Get the jinja2 template for a solver settings file
ssf_template = TMPLT_ENV.from_string(open(os.path.join(os.path.dirname(__file__), 'templates', f'template.{self._EXT}')).read())
# Write the solver settings file
with open(filepath, 'w') as fid:
fid.write(ssf_template.render(self.parameters))
# Update the instance's filename attribute
self.filename = get_cdb_path(filepath)
# Return the name of the file that was written
return self.filename
def validate(self):
"""
Determines if all parameters have been set
Returns
-------
bool
True if all parameters have been set. Otherwise False.
"""
validated = True
# Check that all parameters exist in the self.parameters dictionary
for param_name in self._SCALAR_PARAMETERS:
if param_name not in self.parameters:
validated = False
for param_name in self._TABLE_PARAMETERS:
if not all([elem for elem in self.parameters[param_name]]):
validated = False
return validated
@classmethod
def read_from_file(cls, filename):
"""Reads a string file and returns a DrillString object with DrillString.parameters based on data in the string file.
Parameters
----------
filename : str
Filename of a drill string (.str) file.
Returns
-------
DrillSolverSettings
:class:`DrillSolverSettings` object with parameters from the passed solver settings file.
"""
# Read the TO data into a dictionary
tiem_orbit_data = read_TO_file(get_full_path(filename))
drill_solver_settings = cls('')
# Extract the DrillString parameters from the TO dictionary
drill_solver_settings._get_params_from_TO_data(tiem_orbit_data) #pylint: disable=protected-access
# Set the filename attribute
drill_solver_settings.filename = filename
return drill_solver_settings
def _apply_defaults(self):
"""
Applies defaults from class variables
"""
# Applies normal parameter defaults
for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():
if scalar_parameter not in self.parameters:
self.parameters[scalar_parameter] = copy.copy(value)
# Applies defaults to all ramp parameters
for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():
self.parameters[table_parameter] = {}
self.parameters[table_parameter] = list(table)
self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])
def _get_params_from_TO_data(self, tiem_orbit_data): #pylint: disable=invalid-name
"""Reads the solver settings parameters out of a dictoinary of Tiem Orbit data generated by :meth:`adamspy.adripy.utilities.read_TO_file`.
Parameters
----------
tiem_orbit_data : dict
:obj:`dict` of Tiem Orbit data
Raises
------
ValueError
A solver settings parameter could not be found
"""
for param in self._TABLE_PARAMETERS:
# For each parameter initialize a found flag
found = False
if param.lower() == 'funnel':
for i, par in enumerate(['maxit', 'stability', 'error', 'imbalance', 'tlimit', 'alimit']):
self.parameters[param][i] = tiem_orbit_data['STATICS']['FUNNEL'][par]
self.parameters['_' + param] = zip(*self.parameters[param])
found = True
# Raise a value error if the parameter isn't found.
if not found:
raise ValueError(f'{param} not found!')
for param in self._SCALAR_PARAMETERS:
# For each parameter initialize a found flag
found = False
for block in tiem_orbit_data:
# For each block in the TO file
if block !='STATICS' and param.lower() in tiem_orbit_data[block]:
# If the parameter is in this block, set the parameter and break the loop
self.parameters[param] = tiem_orbit_data[block][param.lower()]
found = True
break
elif block != 'STATICS':
# If the parameter is not in this block, find all the sub blocks
# and look for the parameter inside each sub block
sub_blocks = [header for header in tiem_orbit_data[block] if isinstance(tiem_orbit_data[block][header], dict)]
for sub_block in sub_blocks:
# For each sub_block in the block
if param.lower() in [p.lower() for p in tiem_orbit_data[block][sub_block]]:
# If the parameter is in the sub block, set the parameter and break the loop
self.parameters[param] = tiem_orbit_data[block][sub_block][param.lower()]
found = True
break
if found:
break
# Raise a value error if the parameter isn't found.
if not found:
raise ValueError(f'{param} not found!') | 0.809615 | 0.418043 |
from ...jvm.lib.compat import *
from ...jvm.lib import annotate, Optional
from ...jvm.lib import public
from ...jvm.lib import classproperty
from ... import jni
from ...jvm import JVM as _JVM
@public
class JVM(_JVM):
"""Represents the Java virtual machine"""
jvm = classproperty(lambda cls: JVM._jvm)
jenv = classproperty(lambda cls: JVM._jenv)
_jvm = None # Optional[jt.jvm.JVM]
_jenv = None # Optional[jni.JNIEnv]
def __init__(self, dll_path=None):
from ._typemanager import TypeManager
self._dll_path = None
self._load(dll_path)
self._create()
self.type_manager = TypeManager()
def __enter__(self):
return self._jvm, JVM._jenv
def start(self, *jvmoptions, **jvmargs):
_, jenv = result = super(JVM, self).start(*jvmoptions, **jvmargs)
JVM._jvm, JVM._jenv = self, jenv
self._initialize(jenv)
self.type_manager.start()
return result
def shutdown(self):
self.type_manager.stop()
_, jenv = self
self._dispose(jenv)
super(JVM, self).shutdown()
JVM._jvm = JVM._jenv = None
def _load(self, dll_path=None):
from ...jvm.platform import JVMFinder
from ...jvm import EStatusCode
if dll_path is not None:
self._dll_path = dll_path
elif self._dll_path is None:
finder = JVMFinder()
self._dll_path = finder.get_jvm_path()
super(JVM, self).__init__(self._dll_path)
def _create(self):
from .._java import jnirubicon
self.ProxyHandler = jnirubicon.rubicon_reflect_ProxyHandler()
self.Python = jnirubicon.rubicon_Python()
@annotate(jenv=jni.JNIEnv)
def _initialize(self, jenv):
self.ProxyHandler.initialize(jenv)
self.Python.initialize(jenv)
@annotate(jenv=jni.JNIEnv)
def _dispose(self, jenv):
self.ProxyHandler.dispose(jenv)
self.Python.dispose(jenv)
def handleException(self, exc):
raise exc | src/jt/rubicon/java/_jvm.py |
from ...jvm.lib.compat import *
from ...jvm.lib import annotate, Optional
from ...jvm.lib import public
from ...jvm.lib import classproperty
from ... import jni
from ...jvm import JVM as _JVM
@public
class JVM(_JVM):
"""Represents the Java virtual machine"""
jvm = classproperty(lambda cls: JVM._jvm)
jenv = classproperty(lambda cls: JVM._jenv)
_jvm = None # Optional[jt.jvm.JVM]
_jenv = None # Optional[jni.JNIEnv]
def __init__(self, dll_path=None):
from ._typemanager import TypeManager
self._dll_path = None
self._load(dll_path)
self._create()
self.type_manager = TypeManager()
def __enter__(self):
return self._jvm, JVM._jenv
def start(self, *jvmoptions, **jvmargs):
_, jenv = result = super(JVM, self).start(*jvmoptions, **jvmargs)
JVM._jvm, JVM._jenv = self, jenv
self._initialize(jenv)
self.type_manager.start()
return result
def shutdown(self):
self.type_manager.stop()
_, jenv = self
self._dispose(jenv)
super(JVM, self).shutdown()
JVM._jvm = JVM._jenv = None
def _load(self, dll_path=None):
from ...jvm.platform import JVMFinder
from ...jvm import EStatusCode
if dll_path is not None:
self._dll_path = dll_path
elif self._dll_path is None:
finder = JVMFinder()
self._dll_path = finder.get_jvm_path()
super(JVM, self).__init__(self._dll_path)
def _create(self):
from .._java import jnirubicon
self.ProxyHandler = jnirubicon.rubicon_reflect_ProxyHandler()
self.Python = jnirubicon.rubicon_Python()
@annotate(jenv=jni.JNIEnv)
def _initialize(self, jenv):
self.ProxyHandler.initialize(jenv)
self.Python.initialize(jenv)
@annotate(jenv=jni.JNIEnv)
def _dispose(self, jenv):
self.ProxyHandler.dispose(jenv)
self.Python.dispose(jenv)
def handleException(self, exc):
raise exc | 0.751375 | 0.107204 |
import pathlib
import sys
import numpy as np
from matplotlib import pyplot as plt
from gromacs import (
read_gromacs_file,
write_gromacs_gro_file,
)
plt.style.use('seaborn-talk')
def get_positions(frame):
"""Get positions given indices."""
xpos = np.array([i for i in frame['x']])
ypos = np.array([i for i in frame['y']])
zpos = np.array([i for i in frame['z']])
xyz = np.column_stack((xpos, ypos, zpos))
return xyz
def merge_snapshots(frames):
"""Extract a subset of atoms from a given frame."""
snapshot = {
'header': 'Merged.',
'box': frames[0]['box'],
'residunr': [],
'residuname': [],
'atomname': [],
'atomnr': [],
}
for key in ('residunr', 'residuname', 'atomname', 'atomnr'):
for frame in frames:
for item in frame[key]:
snapshot[key].append(item)
return snapshot
def main(upper_file, lower_file, delta_z):
"""Read frames and extract upper/lower bilayer."""
upper = [i for i in read_gromacs_file(upper_file)][0]
upper_xyz = get_positions(upper)
lower = [i for i in read_gromacs_file(lower_file)][0]
lower_xyz = get_positions(lower)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.scatter(upper_xyz[:, 0], upper_xyz[:, 2], marker='o')
ax1.scatter(lower_xyz[:, 0], lower_xyz[:, 2], marker='s')
upper_xyz[:, 2] += delta_z
lower_xyz[:, 2] -= delta_z
ax2.scatter(upper_xyz[:, 0], upper_xyz[:, 2], marker='o')
ax2.scatter(lower_xyz[:, 0], lower_xyz[:, 2], marker='s')
write_gromacs_gro_file(
'translated_{}.gro'.format(pathlib.Path(upper_file).stem),
upper,
upper_xyz,
np.zeros_like(upper_xyz)
)
write_gromacs_gro_file(
'translated_{}.gro'.format(pathlib.Path(lower_file).stem),
lower,
lower_xyz,
np.zeros_like(lower_xyz)
)
merged = merge_snapshots((upper, lower))
merged_xyz = np.vstack((upper_xyz, lower_xyz))
write_gromacs_gro_file(
'merged.gro',
merged,
merged_xyz,
np.zeros_like(merged_xyz)
)
fig.tight_layout()
plt.show()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], float(sys.argv[3])) | split_bilayer/translate.py | import pathlib
import sys
import numpy as np
from matplotlib import pyplot as plt
from gromacs import (
read_gromacs_file,
write_gromacs_gro_file,
)
plt.style.use('seaborn-talk')
def get_positions(frame):
"""Get positions given indices."""
xpos = np.array([i for i in frame['x']])
ypos = np.array([i for i in frame['y']])
zpos = np.array([i for i in frame['z']])
xyz = np.column_stack((xpos, ypos, zpos))
return xyz
def merge_snapshots(frames):
"""Extract a subset of atoms from a given frame."""
snapshot = {
'header': 'Merged.',
'box': frames[0]['box'],
'residunr': [],
'residuname': [],
'atomname': [],
'atomnr': [],
}
for key in ('residunr', 'residuname', 'atomname', 'atomnr'):
for frame in frames:
for item in frame[key]:
snapshot[key].append(item)
return snapshot
def main(upper_file, lower_file, delta_z):
"""Read frames and extract upper/lower bilayer."""
upper = [i for i in read_gromacs_file(upper_file)][0]
upper_xyz = get_positions(upper)
lower = [i for i in read_gromacs_file(lower_file)][0]
lower_xyz = get_positions(lower)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.scatter(upper_xyz[:, 0], upper_xyz[:, 2], marker='o')
ax1.scatter(lower_xyz[:, 0], lower_xyz[:, 2], marker='s')
upper_xyz[:, 2] += delta_z
lower_xyz[:, 2] -= delta_z
ax2.scatter(upper_xyz[:, 0], upper_xyz[:, 2], marker='o')
ax2.scatter(lower_xyz[:, 0], lower_xyz[:, 2], marker='s')
write_gromacs_gro_file(
'translated_{}.gro'.format(pathlib.Path(upper_file).stem),
upper,
upper_xyz,
np.zeros_like(upper_xyz)
)
write_gromacs_gro_file(
'translated_{}.gro'.format(pathlib.Path(lower_file).stem),
lower,
lower_xyz,
np.zeros_like(lower_xyz)
)
merged = merge_snapshots((upper, lower))
merged_xyz = np.vstack((upper_xyz, lower_xyz))
write_gromacs_gro_file(
'merged.gro',
merged,
merged_xyz,
np.zeros_like(merged_xyz)
)
fig.tight_layout()
plt.show()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], float(sys.argv[3])) | 0.501465 | 0.527134 |
import json
import os
import time
from pathlib import Path
import uuid
import paho.mqtt.publish as publish
def safe_publish(topic, msg, broker, timeout=5):
if not broker:
print("No MQTT broker configured")
else:
try:
hostname, port = broker.split(':')
return publish.single(topic,
json.dumps(msg),
hostname=hostname,
port=int(port),
keepalive=timeout)
except Exception as e:
print("Could not send MQTT message:", e)
def mqtt_status(helper=None):
helper_ = helper
def wrap(method):
def wrapped_f(args):
# Get the broker to use
mqtt_broker = args.get('mqtt_broker')
# Get the stage from the current env
stage = os.environ.get('__OW_ACTION_NAME')
try:
stage = stage.split('/')[-1]
except IndexError:
stage = 'unknown'
notification = args.get('notification', {})
key = args.get('key', notification.get('object_name', ''))
key_parts = Path(key).stem.split('+')
choir_id, song_id = key_parts[:2]
msg = {'choir_id': choir_id,
'song_id': song_id,
'stage': stage,
'status_id': str(uuid.uuid4())
}
if stage in ['convert_format', 'calculate_alignment', 'trim_clip']:
msg['part_id'] = key_parts[2]
t1 = time.time()
msg['event'] = 'start'
msg['start'] = int(t1)
if helper_ is not None:
msg.update(helper_(args))
safe_publish(
f"choirless/{choir_id}/{song_id}/renderer/{stage}",
msg,
mqtt_broker
)
try:
result = method(args)
t2 = time.time()
msg['event'] = 'end'
msg['end'] = int(t2)
msg['duration'] = int(t2-t1)
safe_publish(
f"choirless/{choir_id}/{song_id}/renderer/{stage}",
msg,
mqtt_broker,
)
except Exception as e:
t2 = time.time()
msg['event'] = 'error'
msg['error'] = str(e)
msg['end'] = int(t2)
msg['duration'] = int(t2-t1)
safe_publish(
f"choirless/{choir_id}/{song_id}/renderer/{stage}",
msg,
mqtt_broker,
)
raise
return result
return wrapped_f
return wrap | python/choirless_lib/choirless_lib/mqtt_status.py | import json
import os
import time
from pathlib import Path
import uuid
import paho.mqtt.publish as publish
def safe_publish(topic, msg, broker, timeout=5):
if not broker:
print("No MQTT broker configured")
else:
try:
hostname, port = broker.split(':')
return publish.single(topic,
json.dumps(msg),
hostname=hostname,
port=int(port),
keepalive=timeout)
except Exception as e:
print("Could not send MQTT message:", e)
def mqtt_status(helper=None):
helper_ = helper
def wrap(method):
def wrapped_f(args):
# Get the broker to use
mqtt_broker = args.get('mqtt_broker')
# Get the stage from the current env
stage = os.environ.get('__OW_ACTION_NAME')
try:
stage = stage.split('/')[-1]
except IndexError:
stage = 'unknown'
notification = args.get('notification', {})
key = args.get('key', notification.get('object_name', ''))
key_parts = Path(key).stem.split('+')
choir_id, song_id = key_parts[:2]
msg = {'choir_id': choir_id,
'song_id': song_id,
'stage': stage,
'status_id': str(uuid.uuid4())
}
if stage in ['convert_format', 'calculate_alignment', 'trim_clip']:
msg['part_id'] = key_parts[2]
t1 = time.time()
msg['event'] = 'start'
msg['start'] = int(t1)
if helper_ is not None:
msg.update(helper_(args))
safe_publish(
f"choirless/{choir_id}/{song_id}/renderer/{stage}",
msg,
mqtt_broker
)
try:
result = method(args)
t2 = time.time()
msg['event'] = 'end'
msg['end'] = int(t2)
msg['duration'] = int(t2-t1)
safe_publish(
f"choirless/{choir_id}/{song_id}/renderer/{stage}",
msg,
mqtt_broker,
)
except Exception as e:
t2 = time.time()
msg['event'] = 'error'
msg['error'] = str(e)
msg['end'] = int(t2)
msg['duration'] = int(t2-t1)
safe_publish(
f"choirless/{choir_id}/{song_id}/renderer/{stage}",
msg,
mqtt_broker,
)
raise
return result
return wrapped_f
return wrap | 0.223971 | 0.057467 |
import FWCore.ParameterSet.Config as cms
import DQM.TrackingMonitor.LogMessageMonitor_cfi
LocalRecoLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
LocalRecoLogMessageMon.pluginsMonName = cms.string ( 'LocalReco' )
LocalRecoLogMessageMon.modules = cms.vstring( 'siPixelDigis', 'siStripDigis', 'siPixelClusters', 'siStripClusters' ) # siPixelDigis : SiPixelRawToDigi, siStripDigis : SiStripRawToDigi (SiStripRawToDigiUnpacker), siPixelClusters : SiPixelClusterProducer, siStripClusters : SiStripClusterizer
LocalRecoLogMessageMon.categories = cms.vstring( 'SiPixelRawToDigi', 'TooManyErrors', 'TooManyClusters' )
# apparentely there are not LogError in RecoLocalTracker/SubCollectionProducers/src/TrackClusterRemover.cc
ClusterizerLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
ClusterizerLogMessageMon.pluginsMonName = cms.string ( 'TrackClusterRemover' )
ClusterizerLogMessageMon.modules = cms.vstring( 'detachedTripletStepClusters', 'lowPtTripletStepClusters', 'pixelPairStepClusters', 'mixedTripletStepClusters', 'pixelLessStepClusters', 'tobTecStepClusters' ) # TrackClusterRemover
ClusterizerLogMessageMon.categories = cms.vstring( )
# initialStepSeeds,lowPtTripletStepSeeds, pixelPairStepSeeds, detachedTripletStepSeeds, : TooManyClusters (SeedGeneratorFromRegionHitsEDProducer),
# photonConvTrajSeedFromSingleLeg : (PhotonConversionTrajectorySeedProducerFromSingleLeg)
SeedingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
SeedingLogMessageMon.pluginsMonName = cms.string ( 'Seeding' )
SeedingLogMessageMon.modules = cms.vstring( 'initialStepSeedsPreSplitting', 'initialStepSeeds', 'detachedTripletStepSeeds', 'lowPtTripletStepSeeds', 'pixelPairStepSeeds', 'mixedTripletStepSeedsA', 'mixedTripletStepSeedsB', 'pixelLessStepSeeds', 'tobTecStepSeeds', 'jetCoreRegionalStepSeeds', 'muonSeededSeedsOutIn', 'muonSeededSeedsInOut', 'photonConvTrajSeedFromSingleLeg')
SeedingLogMessageMon.categories = cms.vstring( 'TooManyClusters', 'TooManyPairs', 'TooManyTriplets', 'TooManySeeds' )
# RecoTracker/CkfPattern/src/CkfTrackCandidateMakerBase.cc
TrackCandidateLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
TrackCandidateLogMessageMon.pluginsMonName = cms.string ( 'TrackCandidate' )
TrackCandidateLogMessageMon.modules = cms.vstring( 'initialStepTrackCandidatesPreSplitting', 'initialStepTrackCandidates', 'detachedTripletStepTrackCandidates', 'lowPtTripletStepTrackCandidates', 'pixelPairStepTrackCandidates', 'mixedTripletStepTrackCandidates', 'pixelLessStepTrackCandidates', 'tobTecStepTrackCandidates', 'jetCoreRegionalStepTrackCandidates', 'muonSeededTrackCandidatesInOut', 'muonSeededTrackCandidatesOutIn', 'convTrackCandidates' )
TrackCandidateLogMessageMon.categories = cms.vstring( 'TooManySeeds' )
# TrackProducer:FailedPropagation
TrackFinderLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
TrackFinderLogMessageMon.pluginsMonName = cms.string ( 'TrackFinder' )
TrackFinderLogMessageMon.modules = cms.vstring( 'pixelTracks', 'initialStepTracks', 'lowPtTripletStepTracks', 'pixelPairStepTracks', 'detachedTripletStepTracks', 'mixedTripletStepTracks', 'pixelLessStepTracks', 'tobTecStepTracks', 'jetCoreRegionalStepTracks', 'muonSeededTracksOutIn', 'muonSeededTracksInOut', 'convStepTracks', 'generalTracks' )
TrackFinderLogMessageMon.categories = cms.vstring(
'FailedPropagation', 'RKPropagatorInS'
)
FullIterTrackingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
FullIterTrackingLogMessageMon.pluginsMonName = cms.string ( 'FullIterTracking' )
FullIterTrackingLogMessageMon.modules = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
FullIterTrackingLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
IterTrackingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
IterTrackingLogMessageMon.pluginsMonName = cms.string ( 'IterTracking' )
IterTrackingLogMessageMon.modules = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
)
IterTrackingLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
ConversionLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
ConversionLogMessageMon.pluginsMonName = cms.string ( 'Conversion' )
ConversionLogMessageMon.modules = cms.vstring(
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
ConversionLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
) | DQM/TrackingMonitor/python/LogMessageMonitor_cff.py | import FWCore.ParameterSet.Config as cms
import DQM.TrackingMonitor.LogMessageMonitor_cfi
LocalRecoLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
LocalRecoLogMessageMon.pluginsMonName = cms.string ( 'LocalReco' )
LocalRecoLogMessageMon.modules = cms.vstring( 'siPixelDigis', 'siStripDigis', 'siPixelClusters', 'siStripClusters' ) # siPixelDigis : SiPixelRawToDigi, siStripDigis : SiStripRawToDigi (SiStripRawToDigiUnpacker), siPixelClusters : SiPixelClusterProducer, siStripClusters : SiStripClusterizer
LocalRecoLogMessageMon.categories = cms.vstring( 'SiPixelRawToDigi', 'TooManyErrors', 'TooManyClusters' )
# apparentely there are not LogError in RecoLocalTracker/SubCollectionProducers/src/TrackClusterRemover.cc
ClusterizerLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
ClusterizerLogMessageMon.pluginsMonName = cms.string ( 'TrackClusterRemover' )
ClusterizerLogMessageMon.modules = cms.vstring( 'detachedTripletStepClusters', 'lowPtTripletStepClusters', 'pixelPairStepClusters', 'mixedTripletStepClusters', 'pixelLessStepClusters', 'tobTecStepClusters' ) # TrackClusterRemover
ClusterizerLogMessageMon.categories = cms.vstring( )
# initialStepSeeds,lowPtTripletStepSeeds, pixelPairStepSeeds, detachedTripletStepSeeds, : TooManyClusters (SeedGeneratorFromRegionHitsEDProducer),
# photonConvTrajSeedFromSingleLeg : (PhotonConversionTrajectorySeedProducerFromSingleLeg)
SeedingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
SeedingLogMessageMon.pluginsMonName = cms.string ( 'Seeding' )
SeedingLogMessageMon.modules = cms.vstring( 'initialStepSeedsPreSplitting', 'initialStepSeeds', 'detachedTripletStepSeeds', 'lowPtTripletStepSeeds', 'pixelPairStepSeeds', 'mixedTripletStepSeedsA', 'mixedTripletStepSeedsB', 'pixelLessStepSeeds', 'tobTecStepSeeds', 'jetCoreRegionalStepSeeds', 'muonSeededSeedsOutIn', 'muonSeededSeedsInOut', 'photonConvTrajSeedFromSingleLeg')
SeedingLogMessageMon.categories = cms.vstring( 'TooManyClusters', 'TooManyPairs', 'TooManyTriplets', 'TooManySeeds' )
# RecoTracker/CkfPattern/src/CkfTrackCandidateMakerBase.cc
TrackCandidateLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
TrackCandidateLogMessageMon.pluginsMonName = cms.string ( 'TrackCandidate' )
TrackCandidateLogMessageMon.modules = cms.vstring( 'initialStepTrackCandidatesPreSplitting', 'initialStepTrackCandidates', 'detachedTripletStepTrackCandidates', 'lowPtTripletStepTrackCandidates', 'pixelPairStepTrackCandidates', 'mixedTripletStepTrackCandidates', 'pixelLessStepTrackCandidates', 'tobTecStepTrackCandidates', 'jetCoreRegionalStepTrackCandidates', 'muonSeededTrackCandidatesInOut', 'muonSeededTrackCandidatesOutIn', 'convTrackCandidates' )
TrackCandidateLogMessageMon.categories = cms.vstring( 'TooManySeeds' )
# TrackProducer:FailedPropagation
TrackFinderLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
TrackFinderLogMessageMon.pluginsMonName = cms.string ( 'TrackFinder' )
TrackFinderLogMessageMon.modules = cms.vstring( 'pixelTracks', 'initialStepTracks', 'lowPtTripletStepTracks', 'pixelPairStepTracks', 'detachedTripletStepTracks', 'mixedTripletStepTracks', 'pixelLessStepTracks', 'tobTecStepTracks', 'jetCoreRegionalStepTracks', 'muonSeededTracksOutIn', 'muonSeededTracksInOut', 'convStepTracks', 'generalTracks' )
TrackFinderLogMessageMon.categories = cms.vstring(
'FailedPropagation', 'RKPropagatorInS'
)
FullIterTrackingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
FullIterTrackingLogMessageMon.pluginsMonName = cms.string ( 'FullIterTracking' )
FullIterTrackingLogMessageMon.modules = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
FullIterTrackingLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
IterTrackingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
IterTrackingLogMessageMon.pluginsMonName = cms.string ( 'IterTracking' )
IterTrackingLogMessageMon.modules = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
)
IterTrackingLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
ConversionLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
ConversionLogMessageMon.pluginsMonName = cms.string ( 'Conversion' )
ConversionLogMessageMon.modules = cms.vstring(
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
ConversionLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
) | 0.345768 | 0.193719 |
import os
import sys
import time
import numpy as np
import torch
from torch import nn
from torchvision import transforms
# (N, C, H, W)
#t = torch.randint(0, 255, size = (1, 3, 720, 1280), dtype = torch.uint8)
def set_resize_layers(p_ls):
resize_m_ls = []
for p in p_ls:
m = nn.Upsample(scale_factor = p, mode = 'bilinear', align_corners = False)
resize_m_ls.append(m)
return resize_m_ls
def np_to_uint_tensor(np_data):
"""
permute and put numpy to cuda tensor, convert to float 0-1
input:
np_data -- np array, (H, W, C), uint8
output:
t -- torch tensor, (C, H, W), float32
"""
np_data = np.float32(np_data) / 255
np_data = np_data.transpose(2, 0, 1)
t = torch.from_numpy(np_data).cuda()
return t
def ImageLoad_torch(data, ensemble_n, resize_layers, is_silent):
"""
input:
data -- np array (H, W, 3)
p -- rescale factor. e.g. p=0.4: (720, 1280) --> (288, 512)
"""
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
img_t = np_to_uint_tensor(data.copy())
# normalize and then resize
img_t = normalize(img_t)
img_t = img_t.unsqueeze(0)
img_t = resize_layers[0](img_t)
_, _, ori_height, ori_width = img_t.shape
#imgSizes = [300, 375, 450, 525, 600]
#imgMaxSize = 1000
img_resized_list = []
for i in range(ensemble_n):
m = resize_layers[i + 1]
img_t_resized = m(img_t)
img_resized_list.append(img_t_resized)
output=dict()
output['img_ori'] = data
if not is_silent:
print('img size', img_t.shape)
output['img_data'] = [x.contiguous() for x in img_resized_list]
return output
####################### TESTING ###########################
def test_resize_layers():
p_ls = [0.4, 1.041667, 1.302083, 1.5625, 1.82292]
resize_layers = set_resize_layers(p_ls)
return resize_layers
def test_np_to_t():
x = np.random.randint(0, 255, (720, 1280, 3), dtype = np.uint8)
t = np_to_uint_tensor(x)
return t
if __name__ == '__main__':
#t = test_np_to_t()
resize_layers = test_resize_layers()
for i in range(10):
torch.cuda.synchronize()
start = time.time()
img = np.random.randint(0, 255, (720, 1280, 3), dtype = np.uint8)
out = ImageLoad_torch(img, 3, resize_layers, is_silent = True)
torch.cuda.synchronize()
end = time.time()
print('torch resize runtime: {}s'.format(end - start)) | mobilenet_segment/test/test_resize_torch.py | import os
import sys
import time
import numpy as np
import torch
from torch import nn
from torchvision import transforms
# (N, C, H, W)
#t = torch.randint(0, 255, size = (1, 3, 720, 1280), dtype = torch.uint8)
def set_resize_layers(p_ls):
resize_m_ls = []
for p in p_ls:
m = nn.Upsample(scale_factor = p, mode = 'bilinear', align_corners = False)
resize_m_ls.append(m)
return resize_m_ls
def np_to_uint_tensor(np_data):
"""
permute and put numpy to cuda tensor, convert to float 0-1
input:
np_data -- np array, (H, W, C), uint8
output:
t -- torch tensor, (C, H, W), float32
"""
np_data = np.float32(np_data) / 255
np_data = np_data.transpose(2, 0, 1)
t = torch.from_numpy(np_data).cuda()
return t
def ImageLoad_torch(data, ensemble_n, resize_layers, is_silent):
"""
input:
data -- np array (H, W, 3)
p -- rescale factor. e.g. p=0.4: (720, 1280) --> (288, 512)
"""
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
img_t = np_to_uint_tensor(data.copy())
# normalize and then resize
img_t = normalize(img_t)
img_t = img_t.unsqueeze(0)
img_t = resize_layers[0](img_t)
_, _, ori_height, ori_width = img_t.shape
#imgSizes = [300, 375, 450, 525, 600]
#imgMaxSize = 1000
img_resized_list = []
for i in range(ensemble_n):
m = resize_layers[i + 1]
img_t_resized = m(img_t)
img_resized_list.append(img_t_resized)
output=dict()
output['img_ori'] = data
if not is_silent:
print('img size', img_t.shape)
output['img_data'] = [x.contiguous() for x in img_resized_list]
return output
####################### TESTING ###########################
def test_resize_layers():
p_ls = [0.4, 1.041667, 1.302083, 1.5625, 1.82292]
resize_layers = set_resize_layers(p_ls)
return resize_layers
def test_np_to_t():
x = np.random.randint(0, 255, (720, 1280, 3), dtype = np.uint8)
t = np_to_uint_tensor(x)
return t
if __name__ == '__main__':
#t = test_np_to_t()
resize_layers = test_resize_layers()
for i in range(10):
torch.cuda.synchronize()
start = time.time()
img = np.random.randint(0, 255, (720, 1280, 3), dtype = np.uint8)
out = ImageLoad_torch(img, 3, resize_layers, is_silent = True)
torch.cuda.synchronize()
end = time.time()
print('torch resize runtime: {}s'.format(end - start)) | 0.40439 | 0.352146 |
from collections import defaultdict
train_data = [['Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10', 'Yes'],
['Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', 'No'],
['No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', 'Yes'],
['Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '10-30', 'Yes'],
['Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', 'No'],
['No', 'Yes', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Italian', '0-10', 'Yes'],
['No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', 'No'],
['No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', 'Yes'],
['No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', 'No'],
['Yes', 'Yes', 'Yes', 'Yes', 'Full', '$$$', 'No', 'Yes', 'Italian', '10-30', 'No'],
['No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', 'No'],
['Yes', 'Yes', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Burger', '30-60', 'Yes']
]
n = len(train_data)
classif_dist = defaultdict(int)
attrib_dist = []
for row in train_data:
classif_dist[row[10]] += 1
# Fill classif_dist or count of negative positives
for i in range(10):
one_attrib_dist = {"Yes": defaultdict(int), "No": defaultdict(int)}
for row in train_data:
one_attrib_dist[row[10]][row[i]] += 1
attrib_dist.append(one_attrib_dist)
print(classif_dist)
print(attrib_dist)
#print(classif_dist["Yes"])
#print(attrib_dist[3]["No"]["No"])
def classif_probability(attr, hyp):
n_hyp = classif_dist[hyp]
p = n_hyp / n
for i in range(10):
p *= (attrib_dist[i][hyp][attr[i]] + 1 / n_hyp)
return p # on number
print("hyp: Yes", classif_probability(['Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10'], "Yes"))
print("hyp: No", classif_probability(['Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10'], "No")) | ht10.py | from collections import defaultdict
train_data = [['Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10', 'Yes'],
['Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', 'No'],
['No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', 'Yes'],
['Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '10-30', 'Yes'],
['Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', 'No'],
['No', 'Yes', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Italian', '0-10', 'Yes'],
['No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', 'No'],
['No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', 'Yes'],
['No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', 'No'],
['Yes', 'Yes', 'Yes', 'Yes', 'Full', '$$$', 'No', 'Yes', 'Italian', '10-30', 'No'],
['No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', 'No'],
['Yes', 'Yes', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Burger', '30-60', 'Yes']
]
n = len(train_data)
classif_dist = defaultdict(int)
attrib_dist = []
for row in train_data:
classif_dist[row[10]] += 1
# Fill classif_dist or count of negative positives
for i in range(10):
one_attrib_dist = {"Yes": defaultdict(int), "No": defaultdict(int)}
for row in train_data:
one_attrib_dist[row[10]][row[i]] += 1
attrib_dist.append(one_attrib_dist)
print(classif_dist)
print(attrib_dist)
#print(classif_dist["Yes"])
#print(attrib_dist[3]["No"]["No"])
def classif_probability(attr, hyp):
n_hyp = classif_dist[hyp]
p = n_hyp / n
for i in range(10):
p *= (attrib_dist[i][hyp][attr[i]] + 1 / n_hyp)
return p # on number
print("hyp: Yes", classif_probability(['Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10'], "Yes"))
print("hyp: No", classif_probability(['Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10'], "No")) | 0.237046 | 0.244848 |
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.streetview.publish_v1.types import resources
__protobuf__ = proto.module(
package='google.streetview.publish.v1',
manifest={
'PhotoView',
'CreatePhotoRequest',
'GetPhotoRequest',
'BatchGetPhotosRequest',
'BatchGetPhotosResponse',
'PhotoResponse',
'ListPhotosRequest',
'ListPhotosResponse',
'UpdatePhotoRequest',
'BatchUpdatePhotosRequest',
'BatchUpdatePhotosResponse',
'DeletePhotoRequest',
'BatchDeletePhotosRequest',
'BatchDeletePhotosResponse',
},
)
class PhotoView(proto.Enum):
r"""Specifies which view of the
[Photo][google.streetview.publish.v1.Photo] to include in the
response.
"""
BASIC = 0
INCLUDE_DOWNLOAD_URL = 1
class CreatePhotoRequest(proto.Message):
r"""Request to create a [Photo][google.streetview.publish.v1.Photo].
Attributes:
photo (google.streetview.publish_v1.types.Photo):
Required. Photo to create.
"""
photo = proto.Field(
proto.MESSAGE,
number=1,
message=resources.Photo,
)
class GetPhotoRequest(proto.Message):
r"""Request to get a [Photo][google.streetview.publish.v1.Photo].
By default
- does not return the download URL for the photo bytes.
Parameters:
- ``view`` controls if the download URL for the photo bytes is
returned.
Attributes:
photo_id (str):
Required. ID of the
[Photo][google.streetview.publish.v1.Photo].
view (google.streetview.publish_v1.types.PhotoView):
Specifies if a download URL for the photo bytes should be
returned in the [Photo][google.streetview.publish.v1.Photo]
response.
language_code (str):
The BCP-47 language code, such as "en-US" or "sr-Latn". For
more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
If language_code is unspecified, the user's language
preference for Google services is used.
"""
photo_id = proto.Field(
proto.STRING,
number=1,
)
view = proto.Field(
proto.ENUM,
number=2,
enum='PhotoView',
)
language_code = proto.Field(
proto.STRING,
number=3,
)
class BatchGetPhotosRequest(proto.Message):
r"""Request to get one or more
[Photos][google.streetview.publish.v1.Photo]. By default
- does not return the download URL for the photo bytes.
Parameters:
- ``view`` controls if the download URL for the photo bytes is
returned.
Attributes:
photo_ids (Sequence[str]):
Required. IDs of the
[Photos][google.streetview.publish.v1.Photo]. HTTP GET
requests require the following syntax for the URL query
parameter: ``photoIds=<id1>&photoIds=<id2>&...``.
view (google.streetview.publish_v1.types.PhotoView):
Specifies if a download URL for the photo
bytes should be returned in the Photo response.
language_code (str):
The BCP-47 language code, such as "en-US" or "sr-Latn". For
more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
If language_code is unspecified, the user's language
preference for Google services is used.
"""
photo_ids = proto.RepeatedField(
proto.STRING,
number=1,
)
view = proto.Field(
proto.ENUM,
number=2,
enum='PhotoView',
)
language_code = proto.Field(
proto.STRING,
number=3,
)
class BatchGetPhotosResponse(proto.Message):
r"""Response to batch get of
[Photos][google.streetview.publish.v1.Photo].
Attributes:
results (Sequence[google.streetview.publish_v1.types.PhotoResponse]):
List of results for each individual
[Photo][google.streetview.publish.v1.Photo] requested, in
the same order as the requests in
[BatchGetPhotos][google.streetview.publish.v1.StreetViewPublishService.BatchGetPhotos].
"""
results = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PhotoResponse',
)
class PhotoResponse(proto.Message):
r"""Response payload for a single
[Photo][google.streetview.publish.v1.Photo] in batch operations
including
[BatchGetPhotos][google.streetview.publish.v1.StreetViewPublishService.BatchGetPhotos]
and
[BatchUpdatePhotos][google.streetview.publish.v1.StreetViewPublishService.BatchUpdatePhotos].
Attributes:
status (google.rpc.status_pb2.Status):
The status for the operation to get or update
a single photo in the batch request.
photo (google.streetview.publish_v1.types.Photo):
The [Photo][google.streetview.publish.v1.Photo] resource, if
the request was successful.
"""
status = proto.Field(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
photo = proto.Field(
proto.MESSAGE,
number=2,
message=resources.Photo,
)
class ListPhotosRequest(proto.Message):
r"""Request to list all photos that belong to the user sending the
request.
By default
- does not return the download URL for the photo bytes.
Parameters:
- ``view`` controls if the download URL for the photo bytes is
returned.
- ``pageSize`` determines the maximum number of photos to return.
- ``pageToken`` is the next page token value returned from a
previous
[ListPhotos][google.streetview.publish.v1.StreetViewPublishService.ListPhotos]
request, if any.
- ``filter`` allows filtering by a given parameter. 'placeId' is
the only parameter supported at the moment.
Attributes:
view (google.streetview.publish_v1.types.PhotoView):
Specifies if a download URL for the photos
bytes should be returned in the Photos response.
page_size (int):
The maximum number of photos to return. ``pageSize`` must be
non-negative. If ``pageSize`` is zero or is not provided,
the default page size of 100 is used. The number of photos
returned in the response may be less than ``pageSize`` if
the number of photos that belong to the user is less than
``pageSize``.
page_token (str):
The
[nextPageToken][google.streetview.publish.v1.ListPhotosResponse.next_page_token]
value returned from a previous
[ListPhotos][google.streetview.publish.v1.StreetViewPublishService.ListPhotos]
request, if any.
filter (str):
The filter expression. For example:
``placeId=ChIJj61dQgK6j4AR4GeTYWZsKWw``.
The only filter supported at the moment is ``placeId``.
language_code (str):
The BCP-47 language code, such as "en-US" or "sr-Latn". For
more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
If language_code is unspecified, the user's language
preference for Google services is used.
"""
view = proto.Field(
proto.ENUM,
number=1,
enum='PhotoView',
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
filter = proto.Field(
proto.STRING,
number=4,
)
language_code = proto.Field(
proto.STRING,
number=5,
)
class ListPhotosResponse(proto.Message):
r"""Response to list all photos that belong to a user.
Attributes:
photos (Sequence[google.streetview.publish_v1.types.Photo]):
List of photos. The
[pageSize][google.streetview.publish.v1.ListPhotosRequest.page_size]
field in the request determines the number of items
returned.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
photos = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=resources.Photo,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class UpdatePhotoRequest(proto.Message):
r"""Request to update the metadata of a
[Photo][google.streetview.publish.v1.Photo]. Updating the pixels of
a photo is not supported.
Attributes:
photo (google.streetview.publish_v1.types.Photo):
Required. [Photo][google.streetview.publish.v1.Photo] object
containing the new metadata.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask that identifies fields on the photo metadata to update.
If not present, the old
[Photo][google.streetview.publish.v1.Photo] metadata is
entirely replaced with the new
[Photo][google.streetview.publish.v1.Photo] metadata in this
request. The update fails if invalid fields are specified.
Multiple fields can be specified in a comma-delimited list.
The following fields are valid:
- ``pose.heading``
- ``pose.latLngPair``
- ``pose.pitch``
- ``pose.roll``
- ``pose.level``
- ``pose.altitude``
- ``connections``
- ``places``
.. raw:: html
<aside class="note"><b>Note:</b> When
[updateMask][google.streetview.publish.v1.UpdatePhotoRequest.update_mask]
contains repeated fields, the entire set of repeated values get replaced
with the new contents. For example, if
[updateMask][google.streetview.publish.v1.UpdatePhotoRequest.update_mask]
contains `connections` and `UpdatePhotoRequest.photo.connections` is empty,
all connections are removed.</aside>
"""
photo = proto.Field(
proto.MESSAGE,
number=1,
message=resources.Photo,
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class BatchUpdatePhotosRequest(proto.Message):
r"""Request to update the metadata of photos.
Updating the pixels of photos is not supported.
Attributes:
update_photo_requests (Sequence[google.streetview.publish_v1.types.UpdatePhotoRequest]):
Required. List of
[UpdatePhotoRequests][google.streetview.publish.v1.UpdatePhotoRequest].
"""
update_photo_requests = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='UpdatePhotoRequest',
)
class BatchUpdatePhotosResponse(proto.Message):
r"""Response to batch update of metadata of one or more
[Photos][google.streetview.publish.v1.Photo].
Attributes:
results (Sequence[google.streetview.publish_v1.types.PhotoResponse]):
List of results for each individual
[Photo][google.streetview.publish.v1.Photo] updated, in the
same order as the request.
"""
results = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PhotoResponse',
)
class DeletePhotoRequest(proto.Message):
r"""Request to delete a [Photo][google.streetview.publish.v1.Photo].
Attributes:
photo_id (str):
Required. ID of the
[Photo][google.streetview.publish.v1.Photo].
"""
photo_id = proto.Field(
proto.STRING,
number=1,
)
class BatchDeletePhotosRequest(proto.Message):
r"""Request to delete multiple
[Photos][google.streetview.publish.v1.Photo].
Attributes:
photo_ids (Sequence[str]):
Required. IDs of the
[Photos][google.streetview.publish.v1.Photo]. HTTP GET
requests require the following syntax for the URL query
parameter: ``photoIds=<id1>&photoIds=<id2>&...``.
"""
photo_ids = proto.RepeatedField(
proto.STRING,
number=1,
)
class BatchDeletePhotosResponse(proto.Message):
r"""Response to batch delete of one or more
[Photos][google.streetview.publish.v1.Photo].
Attributes:
status (Sequence[google.rpc.status_pb2.Status]):
The status for the operation to delete a single
[Photo][google.streetview.publish.v1.Photo] in the batch
request.
"""
status = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
__all__ = tuple(sorted(__protobuf__.manifest)) | google/streetview/publish/v1/streetview-publish-v1-py/google/streetview/publish_v1/types/rpcmessages.py | import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.streetview.publish_v1.types import resources
__protobuf__ = proto.module(
package='google.streetview.publish.v1',
manifest={
'PhotoView',
'CreatePhotoRequest',
'GetPhotoRequest',
'BatchGetPhotosRequest',
'BatchGetPhotosResponse',
'PhotoResponse',
'ListPhotosRequest',
'ListPhotosResponse',
'UpdatePhotoRequest',
'BatchUpdatePhotosRequest',
'BatchUpdatePhotosResponse',
'DeletePhotoRequest',
'BatchDeletePhotosRequest',
'BatchDeletePhotosResponse',
},
)
class PhotoView(proto.Enum):
r"""Specifies which view of the
[Photo][google.streetview.publish.v1.Photo] to include in the
response.
"""
BASIC = 0
INCLUDE_DOWNLOAD_URL = 1
class CreatePhotoRequest(proto.Message):
r"""Request to create a [Photo][google.streetview.publish.v1.Photo].
Attributes:
photo (google.streetview.publish_v1.types.Photo):
Required. Photo to create.
"""
photo = proto.Field(
proto.MESSAGE,
number=1,
message=resources.Photo,
)
class GetPhotoRequest(proto.Message):
r"""Request to get a [Photo][google.streetview.publish.v1.Photo].
By default
- does not return the download URL for the photo bytes.
Parameters:
- ``view`` controls if the download URL for the photo bytes is
returned.
Attributes:
photo_id (str):
Required. ID of the
[Photo][google.streetview.publish.v1.Photo].
view (google.streetview.publish_v1.types.PhotoView):
Specifies if a download URL for the photo bytes should be
returned in the [Photo][google.streetview.publish.v1.Photo]
response.
language_code (str):
The BCP-47 language code, such as "en-US" or "sr-Latn". For
more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
If language_code is unspecified, the user's language
preference for Google services is used.
"""
photo_id = proto.Field(
proto.STRING,
number=1,
)
view = proto.Field(
proto.ENUM,
number=2,
enum='PhotoView',
)
language_code = proto.Field(
proto.STRING,
number=3,
)
class BatchGetPhotosRequest(proto.Message):
r"""Request to get one or more
[Photos][google.streetview.publish.v1.Photo]. By default
- does not return the download URL for the photo bytes.
Parameters:
- ``view`` controls if the download URL for the photo bytes is
returned.
Attributes:
photo_ids (Sequence[str]):
Required. IDs of the
[Photos][google.streetview.publish.v1.Photo]. HTTP GET
requests require the following syntax for the URL query
parameter: ``photoIds=<id1>&photoIds=<id2>&...``.
view (google.streetview.publish_v1.types.PhotoView):
Specifies if a download URL for the photo
bytes should be returned in the Photo response.
language_code (str):
The BCP-47 language code, such as "en-US" or "sr-Latn". For
more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
If language_code is unspecified, the user's language
preference for Google services is used.
"""
photo_ids = proto.RepeatedField(
proto.STRING,
number=1,
)
view = proto.Field(
proto.ENUM,
number=2,
enum='PhotoView',
)
language_code = proto.Field(
proto.STRING,
number=3,
)
class BatchGetPhotosResponse(proto.Message):
r"""Response to batch get of
[Photos][google.streetview.publish.v1.Photo].
Attributes:
results (Sequence[google.streetview.publish_v1.types.PhotoResponse]):
List of results for each individual
[Photo][google.streetview.publish.v1.Photo] requested, in
the same order as the requests in
[BatchGetPhotos][google.streetview.publish.v1.StreetViewPublishService.BatchGetPhotos].
"""
results = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PhotoResponse',
)
class PhotoResponse(proto.Message):
r"""Response payload for a single
[Photo][google.streetview.publish.v1.Photo] in batch operations
including
[BatchGetPhotos][google.streetview.publish.v1.StreetViewPublishService.BatchGetPhotos]
and
[BatchUpdatePhotos][google.streetview.publish.v1.StreetViewPublishService.BatchUpdatePhotos].
Attributes:
status (google.rpc.status_pb2.Status):
The status for the operation to get or update
a single photo in the batch request.
photo (google.streetview.publish_v1.types.Photo):
The [Photo][google.streetview.publish.v1.Photo] resource, if
the request was successful.
"""
status = proto.Field(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
photo = proto.Field(
proto.MESSAGE,
number=2,
message=resources.Photo,
)
class ListPhotosRequest(proto.Message):
r"""Request to list all photos that belong to the user sending the
request.
By default
- does not return the download URL for the photo bytes.
Parameters:
- ``view`` controls if the download URL for the photo bytes is
returned.
- ``pageSize`` determines the maximum number of photos to return.
- ``pageToken`` is the next page token value returned from a
previous
[ListPhotos][google.streetview.publish.v1.StreetViewPublishService.ListPhotos]
request, if any.
- ``filter`` allows filtering by a given parameter. 'placeId' is
the only parameter supported at the moment.
Attributes:
view (google.streetview.publish_v1.types.PhotoView):
Specifies if a download URL for the photos
bytes should be returned in the Photos response.
page_size (int):
The maximum number of photos to return. ``pageSize`` must be
non-negative. If ``pageSize`` is zero or is not provided,
the default page size of 100 is used. The number of photos
returned in the response may be less than ``pageSize`` if
the number of photos that belong to the user is less than
``pageSize``.
page_token (str):
The
[nextPageToken][google.streetview.publish.v1.ListPhotosResponse.next_page_token]
value returned from a previous
[ListPhotos][google.streetview.publish.v1.StreetViewPublishService.ListPhotos]
request, if any.
filter (str):
The filter expression. For example:
``placeId=ChIJj61dQgK6j4AR4GeTYWZsKWw``.
The only filter supported at the moment is ``placeId``.
language_code (str):
The BCP-47 language code, such as "en-US" or "sr-Latn". For
more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
If language_code is unspecified, the user's language
preference for Google services is used.
"""
view = proto.Field(
proto.ENUM,
number=1,
enum='PhotoView',
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
filter = proto.Field(
proto.STRING,
number=4,
)
language_code = proto.Field(
proto.STRING,
number=5,
)
class ListPhotosResponse(proto.Message):
r"""Response to list all photos that belong to a user.
Attributes:
photos (Sequence[google.streetview.publish_v1.types.Photo]):
List of photos. The
[pageSize][google.streetview.publish.v1.ListPhotosRequest.page_size]
field in the request determines the number of items
returned.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
photos = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=resources.Photo,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class UpdatePhotoRequest(proto.Message):
r"""Request to update the metadata of a
[Photo][google.streetview.publish.v1.Photo]. Updating the pixels of
a photo is not supported.
Attributes:
photo (google.streetview.publish_v1.types.Photo):
Required. [Photo][google.streetview.publish.v1.Photo] object
containing the new metadata.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask that identifies fields on the photo metadata to update.
If not present, the old
[Photo][google.streetview.publish.v1.Photo] metadata is
entirely replaced with the new
[Photo][google.streetview.publish.v1.Photo] metadata in this
request. The update fails if invalid fields are specified.
Multiple fields can be specified in a comma-delimited list.
The following fields are valid:
- ``pose.heading``
- ``pose.latLngPair``
- ``pose.pitch``
- ``pose.roll``
- ``pose.level``
- ``pose.altitude``
- ``connections``
- ``places``
.. raw:: html
<aside class="note"><b>Note:</b> When
[updateMask][google.streetview.publish.v1.UpdatePhotoRequest.update_mask]
contains repeated fields, the entire set of repeated values get replaced
with the new contents. For example, if
[updateMask][google.streetview.publish.v1.UpdatePhotoRequest.update_mask]
contains `connections` and `UpdatePhotoRequest.photo.connections` is empty,
all connections are removed.</aside>
"""
photo = proto.Field(
proto.MESSAGE,
number=1,
message=resources.Photo,
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class BatchUpdatePhotosRequest(proto.Message):
r"""Request to update the metadata of photos.
Updating the pixels of photos is not supported.
Attributes:
update_photo_requests (Sequence[google.streetview.publish_v1.types.UpdatePhotoRequest]):
Required. List of
[UpdatePhotoRequests][google.streetview.publish.v1.UpdatePhotoRequest].
"""
update_photo_requests = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='UpdatePhotoRequest',
)
class BatchUpdatePhotosResponse(proto.Message):
r"""Response to batch update of metadata of one or more
[Photos][google.streetview.publish.v1.Photo].
Attributes:
results (Sequence[google.streetview.publish_v1.types.PhotoResponse]):
List of results for each individual
[Photo][google.streetview.publish.v1.Photo] updated, in the
same order as the request.
"""
results = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PhotoResponse',
)
class DeletePhotoRequest(proto.Message):
r"""Request to delete a [Photo][google.streetview.publish.v1.Photo].
Attributes:
photo_id (str):
Required. ID of the
[Photo][google.streetview.publish.v1.Photo].
"""
photo_id = proto.Field(
proto.STRING,
number=1,
)
class BatchDeletePhotosRequest(proto.Message):
r"""Request to delete multiple
[Photos][google.streetview.publish.v1.Photo].
Attributes:
photo_ids (Sequence[str]):
Required. IDs of the
[Photos][google.streetview.publish.v1.Photo]. HTTP GET
requests require the following syntax for the URL query
parameter: ``photoIds=<id1>&photoIds=<id2>&...``.
"""
photo_ids = proto.RepeatedField(
proto.STRING,
number=1,
)
class BatchDeletePhotosResponse(proto.Message):
r"""Response to batch delete of one or more
[Photos][google.streetview.publish.v1.Photo].
Attributes:
status (Sequence[google.rpc.status_pb2.Status]):
The status for the operation to delete a single
[Photo][google.streetview.publish.v1.Photo] in the batch
request.
"""
status = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
__all__ = tuple(sorted(__protobuf__.manifest)) | 0.723016 | 0.171269 |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Code starts here
df = pd.read_csv(path)
print(df.head())
print(df.info)
df.columns
columns = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']
for col in columns:
df[col].replace({'\$': '', ',': ''}, regex=True,inplace=True)
X = df.copy()
X=X.drop('CLAIM_FLAG',axis=1)
y=df['CLAIM_FLAG']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,stratify=y, random_state=6)
# Code ends here
# --------------
# Code starts here
for col in columns:
X_test[col] = X_test[[col]].apply(pd.to_numeric)
X_train[col] = X_train[[col]].apply(pd.to_numeric)
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
# Code starts here
# drop missing values
X_train.dropna(subset=['YOJ','OCCUPATION'],inplace=True)
X_test.dropna(subset=['YOJ','OCCUPATION'],inplace=True)
y_train=y_train[X_train.index]
y_test=y_test[X_test.index]
# fill missing values with mean
X_train['AGE'].fillna((X_train['AGE'].mean()), inplace=True)
X_test['AGE'].fillna((X_train['AGE'].mean()), inplace=True)
X_train['CAR_AGE'].fillna((X_train['CAR_AGE'].mean()), inplace=True)
X_test['CAR_AGE'].fillna((X_train['CAR_AGE'].mean()), inplace=True)
X_train['INCOME'].fillna((X_train['INCOME'].mean()), inplace=True)
X_test['INCOME'].fillna((X_train['INCOME'].mean()), inplace=True)
X_train['HOME_VAL'].fillna((X_train['HOME_VAL'].mean()), inplace=True)
X_test['HOME_VAL'].fillna((X_train['HOME_VAL'].mean()), inplace=True)
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"]
# Code starts here.
le=LabelEncoder()
for col in columns :
X_train[col]=le.fit_transform(X_train[col].astype(str))
X_test[col]=le.fit_transform(X_test[col].astype(str))
# Code ends here
# --------------
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state = 6)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = model.score(X_test, y_test)
# --------------
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# code starts here
smote = SMOTE(random_state=6)
X_train,y_train= smote.fit_sample(X_train,y_train)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Code ends here
# --------------
# Code Starts here
model = LogisticRegression()
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test,y_pred)
# Code ends here | Imbalance/code.py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Code starts here
df = pd.read_csv(path)
print(df.head())
print(df.info)
df.columns
columns = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']
for col in columns:
df[col].replace({'\$': '', ',': ''}, regex=True,inplace=True)
X = df.copy()
X=X.drop('CLAIM_FLAG',axis=1)
y=df['CLAIM_FLAG']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,stratify=y, random_state=6)
# Code ends here
# --------------
# Code starts here
for col in columns:
X_test[col] = X_test[[col]].apply(pd.to_numeric)
X_train[col] = X_train[[col]].apply(pd.to_numeric)
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
# Code starts here
# drop missing values
X_train.dropna(subset=['YOJ','OCCUPATION'],inplace=True)
X_test.dropna(subset=['YOJ','OCCUPATION'],inplace=True)
y_train=y_train[X_train.index]
y_test=y_test[X_test.index]
# fill missing values with mean
X_train['AGE'].fillna((X_train['AGE'].mean()), inplace=True)
X_test['AGE'].fillna((X_train['AGE'].mean()), inplace=True)
X_train['CAR_AGE'].fillna((X_train['CAR_AGE'].mean()), inplace=True)
X_test['CAR_AGE'].fillna((X_train['CAR_AGE'].mean()), inplace=True)
X_train['INCOME'].fillna((X_train['INCOME'].mean()), inplace=True)
X_test['INCOME'].fillna((X_train['INCOME'].mean()), inplace=True)
X_train['HOME_VAL'].fillna((X_train['HOME_VAL'].mean()), inplace=True)
X_test['HOME_VAL'].fillna((X_train['HOME_VAL'].mean()), inplace=True)
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"]
# Code starts here.
le=LabelEncoder()
for col in columns :
X_train[col]=le.fit_transform(X_train[col].astype(str))
X_test[col]=le.fit_transform(X_test[col].astype(str))
# Code ends here
# --------------
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state = 6)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = model.score(X_test, y_test)
# --------------
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# code starts here
smote = SMOTE(random_state=6)
X_train,y_train= smote.fit_sample(X_train,y_train)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Code ends here
# --------------
# Code Starts here
model = LogisticRegression()
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test,y_pred)
# Code ends here | 0.314471 | 0.300348 |
import numpy as np
import scipy
from sklearn.utils import sparsefuncs
def normalize_by_umi(matrix):
counts_per_bc = matrix.get_counts_per_bc()
median_counts_per_bc = max(1.0, np.median(counts_per_bc))
scaling_factors = median_counts_per_bc / counts_per_bc
# Normalize each barcode's total count by median total count
m = matrix.m.copy().astype(np.float64)
sparsefuncs.inplace_column_scale(m, scaling_factors)
return m
def normalize_by_idf(matrix):
numbcs_per_feature = matrix.get_numbcs_per_feature()
scaling_factors_row = np.log(matrix.bcs_dim + 1) - np.log(1 + numbcs_per_feature)
m = matrix.m.copy().astype(np.float64)
sparsefuncs.inplace_row_scale(m, scaling_factors_row)
return m
def summarize_columns(matrix):
''' Calculate mean and variance of each column, in a sparsity-preserving way.'''
mu = matrix.mean(axis=0).A
# sparse variance = E(col^2) - E(col)^2
mu2 = matrix.multiply(matrix).mean(axis=0).A
var = mu2 - mu**2
return mu, var
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
""" Calculates the normalized dispersion. The dispersion is calculated for each feature
and then normalized to see how its dispersion compares to samples that had a
similar mean value.
"""
# See equation in https://academic.oup.com/nar/article/40/10/4288/2411520
# If a negative binomial is parameterized with mean m, and variance = m + d * m^2
# then this d = dispersion as calculated below
mat_disp = (mat_var - mat_mean) / np.square(mat_mean)
quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
quantiles = np.append(quantiles, mat_mean.max())
# merge bins with no difference in value
quantiles = np.unique(quantiles)
if len(quantiles) <= 1:
# pathological case: the means are all identical. just return raw dispersion.
return mat_disp
# calc median dispersion per bin
(disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)
# calc median absolute deviation of dispersion per bin
disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
disp_abs_dev = abs(mat_disp - disp_meds_arr)
(disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)
# calculate normalized dispersion
disp_mads_arr = disp_mads[disp_bins-1]
disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
return disp_norm | lib/python/cellranger/analysis/stats.py | import numpy as np
import scipy
from sklearn.utils import sparsefuncs
def normalize_by_umi(matrix):
counts_per_bc = matrix.get_counts_per_bc()
median_counts_per_bc = max(1.0, np.median(counts_per_bc))
scaling_factors = median_counts_per_bc / counts_per_bc
# Normalize each barcode's total count by median total count
m = matrix.m.copy().astype(np.float64)
sparsefuncs.inplace_column_scale(m, scaling_factors)
return m
def normalize_by_idf(matrix):
numbcs_per_feature = matrix.get_numbcs_per_feature()
scaling_factors_row = np.log(matrix.bcs_dim + 1) - np.log(1 + numbcs_per_feature)
m = matrix.m.copy().astype(np.float64)
sparsefuncs.inplace_row_scale(m, scaling_factors_row)
return m
def summarize_columns(matrix):
''' Calculate mean and variance of each column, in a sparsity-preserving way.'''
mu = matrix.mean(axis=0).A
# sparse variance = E(col^2) - E(col)^2
mu2 = matrix.multiply(matrix).mean(axis=0).A
var = mu2 - mu**2
return mu, var
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
""" Calculates the normalized dispersion. The dispersion is calculated for each feature
and then normalized to see how its dispersion compares to samples that had a
similar mean value.
"""
# See equation in https://academic.oup.com/nar/article/40/10/4288/2411520
# If a negative binomial is parameterized with mean m, and variance = m + d * m^2
# then this d = dispersion as calculated below
mat_disp = (mat_var - mat_mean) / np.square(mat_mean)
quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
quantiles = np.append(quantiles, mat_mean.max())
# merge bins with no difference in value
quantiles = np.unique(quantiles)
if len(quantiles) <= 1:
# pathological case: the means are all identical. just return raw dispersion.
return mat_disp
# calc median dispersion per bin
(disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)
# calc median absolute deviation of dispersion per bin
disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
disp_abs_dev = abs(mat_disp - disp_meds_arr)
(disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)
# calculate normalized dispersion
disp_mads_arr = disp_mads[disp_bins-1]
disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
return disp_norm | 0.870982 | 0.735547 |
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Layer
from utils import pnsm
class PyramidNSMLayer(Layer):
'''
'''
def __init__(self, ishape, num_of_rois, nsm_iou_threshold, nsm_score_threshold, anchor_4dtensors, **kwargs):
self.ishape = ishape
self.num_of_rois = num_of_rois
self.nsm_iou_threshold = nsm_iou_threshold
self.nsm_score_threshold = nsm_score_threshold
self.anchor_4dtensors = anchor_4dtensors
super(PyramidNSMLayer, self).__init__(**kwargs)
def build(self, input_shape):
'''
Arguments
input_shape: [
(batch_size, h1, w1, 6k),
(batch_size, h2, w2, 6k),
(batch_size, h3, w3, 6k),
(batch_size, h4, w4, 6k)
]
'''
assert len(input_shape) == 4, 'PyramidNSMLayer must be passed 4 inputs: 4 lavels of clz_tensor & bbe_tensor'
super(PyramidNSMLayer, self).build(input_shape)
def compute_output_shape(self, input_shape):
'''
Arguments
input_shape: [
(batch_size, h1, w1, 6k),
(batch_size, h2, w2, 6k),
(batch_size, h3, w3, 6k),
(batch_size, h4, w4, 6k)
]
Return
None, num_of_rois, 4
'''
assert len(input_shape) == 4, 'PyramidNSMLayer must be passed 4 inputs: 4 lavels of clz_tensor & bbe_tensor'
return None, self.num_of_rois, 4
def call(self, x):
'''
To compute rois from infered classification branches and location branches
Arguments:
x:
Return
roi_3dtensor:
'''
assert len(x) == 4, 'PyramidNSMLayer must be passed 4 inputs: 4 lavels of clz_tensor & bbe_tensor'
ishape = self.ishape
max_num_of_rois = self.num_of_rois
nsm_iou_threshold = self.nsm_iou_threshold
anchor_4dtensors = self.anchor_4dtensors
nsm_score_threshold = self.nsm_score_threshold
clzbbe_3dtensors = [x[0][0], x[1][0], x[2][0], x[3][0]]
roi_2dtensor = pnsm(
anchor_4dtensors=anchor_4dtensors,
clzbbe_3dtensors=clzbbe_3dtensors,
max_num_of_rois=max_num_of_rois,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
ishape=ishape) # (num_of_rois, 4)
roi_3dtensor = tf.expand_dims(input=roi_2dtensor, axis=0) # (batch_size, num_of_rois, 4), batch_size = 1
return roi_3dtensor | maskrcnn/PyramidNSMLayer.py | import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Layer
from utils import pnsm
class PyramidNSMLayer(Layer):
'''
'''
def __init__(self, ishape, num_of_rois, nsm_iou_threshold, nsm_score_threshold, anchor_4dtensors, **kwargs):
self.ishape = ishape
self.num_of_rois = num_of_rois
self.nsm_iou_threshold = nsm_iou_threshold
self.nsm_score_threshold = nsm_score_threshold
self.anchor_4dtensors = anchor_4dtensors
super(PyramidNSMLayer, self).__init__(**kwargs)
def build(self, input_shape):
'''
Arguments
input_shape: [
(batch_size, h1, w1, 6k),
(batch_size, h2, w2, 6k),
(batch_size, h3, w3, 6k),
(batch_size, h4, w4, 6k)
]
'''
assert len(input_shape) == 4, 'PyramidNSMLayer must be passed 4 inputs: 4 lavels of clz_tensor & bbe_tensor'
super(PyramidNSMLayer, self).build(input_shape)
def compute_output_shape(self, input_shape):
'''
Arguments
input_shape: [
(batch_size, h1, w1, 6k),
(batch_size, h2, w2, 6k),
(batch_size, h3, w3, 6k),
(batch_size, h4, w4, 6k)
]
Return
None, num_of_rois, 4
'''
assert len(input_shape) == 4, 'PyramidNSMLayer must be passed 4 inputs: 4 lavels of clz_tensor & bbe_tensor'
return None, self.num_of_rois, 4
def call(self, x):
'''
To compute rois from infered classification branches and location branches
Arguments:
x:
Return
roi_3dtensor:
'''
assert len(x) == 4, 'PyramidNSMLayer must be passed 4 inputs: 4 lavels of clz_tensor & bbe_tensor'
ishape = self.ishape
max_num_of_rois = self.num_of_rois
nsm_iou_threshold = self.nsm_iou_threshold
anchor_4dtensors = self.anchor_4dtensors
nsm_score_threshold = self.nsm_score_threshold
clzbbe_3dtensors = [x[0][0], x[1][0], x[2][0], x[3][0]]
roi_2dtensor = pnsm(
anchor_4dtensors=anchor_4dtensors,
clzbbe_3dtensors=clzbbe_3dtensors,
max_num_of_rois=max_num_of_rois,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
ishape=ishape) # (num_of_rois, 4)
roi_3dtensor = tf.expand_dims(input=roi_2dtensor, axis=0) # (batch_size, num_of_rois, 4), batch_size = 1
return roi_3dtensor | 0.633637 | 0.474144 |
from azure import *
from azure.servicemanagement import *
import errno
import getopt
import os
import shutil
import subprocess
import sys
import time
# read env_local.sh
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.rstrip().partition("=")
os.environ[key] = value
proc.communicate()
source_env_local()
# make sure we have required parameters
AZURE_SUBSCRIPTION_ID = os.environ.get('AZURE_SUBSCRIPTION_ID')
if not AZURE_SUBSCRIPTION_ID:
print('AZURE_SUBSCRIPTION_ID is not set.')
exit(1)
AZURE_SERVICE_NAME = os.environ.get('AZURE_SERVICE_NAME')
if not AZURE_SERVICE_NAME:
print('AZURE_SERVICE_NAME is not set.')
exit(1)
AZURE_ROLE_SIZE = os.environ.get('AZURE_ROLE_SIZE')
if not AZURE_ROLE_SIZE:
print('AZURE_ROLE_SIZE is not set.')
exit(1)
AZURE_STORAGE_ACCOUNT = os.environ.get('AZURE_STORAGE_ACCOUNT')
if not AZURE_STORAGE_ACCOUNT:
print('AZURE_STORAGE_ACCOUNT is not set.')
exit(1)
# management certificate
AZURE_MGMT_CERT = 'ssh/mycert.pem'
# service certificate
AZURE_SERVICE_PEM = 'ssh/bazaar.pem'
# vm settings
linux_image_name = 'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB'
container_name = 'bazaarctr'
location = 'West US'
class AzureClient:
def __init__(self):
self.sms = ServiceManagementService(AZURE_SUBSCRIPTION_ID, AZURE_MGMT_CERT)
def service_exists(self):
try:
props = self.sms.get_hosted_service_properties(AZURE_SERVICE_NAME)
return props is not None
except:
return False
def create_hosted_service(self):
if not self.service_exists():
print('Creating service ' + AZURE_SERVICE_NAME)
result = self.sms.create_hosted_service(
AZURE_SERVICE_NAME,
AZURE_SERVICE_NAME + 'label',
AZURE_SERVICE_NAME + 'description',
location)
self._wait_for_async(result.request_id)
self.create_service_certificate()
def list_services(self):
result = self.sms.list_hosted_services()
for hosted_service in result:
print('- Service name: ' + hosted_service.service_name)
print(' Management URL: ' + hosted_service.url)
print(' Location: ' + hosted_service.hosted_service_properties.location)
def delete_service():
self.sms.delete_hosted_service(AZURE_SERVICE_NAME)
def delete_deployment():
self.sms.delete_deployment('myhostedservice', 'v1')
def _linux_role(self, role_name, subnet_name=None, port='22'):
container_name = 'bazaarctr' + role_name
host_name = 'hn' + role_name
system = self._linux_config(host_name)
os_hd = self._os_hd(linux_image_name,
container_name,
role_name + '.vhd')
network = self._network_config(subnet_name, port)
return (system, os_hd, network)
def get_fingerprint(self):
import hashlib
with open (AZURE_SERVICE_PEM, "r") as myfile:
data = myfile.readlines()
lines = data[1:-1]
all = ''.join([x.rstrip() for x in lines])
key = base64.b64decode(all.encode('ascii'))
fp = hashlib.sha1(key).hexdigest()
return fp.upper()
def _linux_config(self, hostname):
SERVICE_CERT_THUMBPRINT = self.get_fingerprint()
pk = PublicKey(SERVICE_CERT_THUMBPRINT, '/home/bazaar/.ssh/authorized_keys')
pair = KeyPair(SERVICE_CERT_THUMBPRINT, '/home/bazaar/.ssh/id_rsa')
system = LinuxConfigurationSet(hostname, 'bazaar', 'u7;9jbp!', True)
system.ssh.public_keys.public_keys.append(pk)
system.ssh.key_pairs.key_pairs.append(pair)
system.disable_ssh_password_authentication = True
return system
def _network_config(self, subnet_name=None, port='22'):
network = ConfigurationSet()
network.configuration_set_type = 'NetworkConfiguration'
network.input_endpoints.input_endpoints.append(
ConfigurationSetInputEndpoint('SSH', 'tcp', port, '22'))
if subnet_name:
network.subnet_names.append(subnet_name)
return network
def _os_hd(self, image_name, target_container_name, target_blob_name):
media_link = self._make_blob_url(
AZURE_STORAGE_ACCOUNT,
target_container_name, target_blob_name)
os_hd = OSVirtualHardDisk(image_name, media_link,
disk_label=target_blob_name)
return os_hd
def _make_blob_url(self, storage_account_name, container_name, blob_name):
return 'http://{0}.blob.core.windows.net/{1}/{2}'.format(
storage_account_name, container_name, blob_name)
def create_storage(self):
name = AZURE_STORAGE_ACCOUNT
label = 'mystorageaccount'
location = 'West US'
desc = 'My storage account description.'
result = self.sms.create_storage_account(name, desc, label, location=location)
self._wait_for_async(result.request_id)
def storage_account_exists(self, name):
try:
props = self.sms.get_storage_account_properties(name)
return props is not None
except:
return False
def list_storage(self):
result = self.sms.list_storage_accounts()
for account in result:
print('Service name: ' + account.service_name)
print('Location: ' + account.storage_service_properties.location)
print('')
def delete_storage(self):
self.sms.delete_storage_account(AZURE_STORAGE_ACCOUNT)
def list_role_sizes(self):
result = self.sms.list_role_sizes()
for rs in result:
print('Name: ' + rs.name)
def _wait_for_async(self, request_id):
try:
self.sms.wait_for_operation_status(request_id, timeout=600)
except azure.WindowsAzureAsyncOperationError as e:
from pprint import pprint
pprint (vars(e.result.error))
def _wait_for_deployment(self, service_name, deployment_name,
status='Running'):
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while props.status != status:
count = count + 1
if count > 120:
self.assertTrue(
False, 'Timed out waiting for deployment status.')
time.sleep(5)
props = self.sms.get_deployment_by_name(
service_name, deployment_name)
def _wait_for_role(self, service_name, deployment_name, role_instance_name,
status='ReadyRole'):
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while self._get_role_instance_status(props, role_instance_name) != status:
count = count + 1
if count > 120:
self.assertTrue(
False, 'Timed out waiting for role instance status.')
time.sleep(5)
props = self.sms.get_deployment_by_name(
service_name, deployment_name)
def _get_role_instance_status(self, deployment, role_instance_name):
for role_instance in deployment.role_instance_list:
if role_instance.instance_name == role_instance_name:
return role_instance.instance_status
return None
def delete_hosted_service(self):
print('Terminating service')
try:
self.sms.delete_hosted_service(AZURE_SERVICE_NAME, complete=True)
except:
pass
if os.path.exists('.state'):
shutil.rmtree('.state')
def create_state_dir(self):
try:
os.makedirs('.state')
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir('.state'):
print("Found existing .state dir. Please terminate instances first.")
exit(1)
else: raise
def list_os_images_public(self):
result = self.sms.list_os_images()
for img in result:
print(img.name)
def create_service_certificate(self):
with open(AZURE_SERVICE_PEM, "rb") as bfile:
cert_data = base64.b64encode(bfile.read()).decode()
cert_format = 'pfx'
cert_password = ''
cert_res = self.sms.add_service_certificate(service_name=AZURE_SERVICE_NAME,
data=cert_data,
certificate_format=cert_format,
password=<PASSWORD>)
self._wait_for_async(cert_res.request_id)
def create_deployment_and_roles(self, num_machines = 1):
deployment_name = AZURE_SERVICE_NAME
# one role for each machine
roles = []
for i in range(0, num_machines):
roles.append(AZURE_SERVICE_NAME + str(i))
system, os_hd, network = self._linux_role(roles[0], port='2000')
result = self.sms.create_virtual_machine_deployment(
AZURE_SERVICE_NAME, deployment_name, 'production',
deployment_name + 'label', roles[0], system, os_hd,
network, role_size=AZURE_ROLE_SIZE)
self._wait_for_async(result.request_id)
self._wait_for_deployment(AZURE_SERVICE_NAME, deployment_name)
self._wait_for_role(AZURE_SERVICE_NAME, deployment_name, roles[0])
for i in range(1, len(roles)):
system, os_hd, network = self._linux_role(roles[i], port=str(2000+i))
result = self.sms.add_role(AZURE_SERVICE_NAME, deployment_name, roles[i],
system, os_hd, network, role_size=AZURE_ROLE_SIZE)
self._wait_for_async(result.request_id)
self._wait_for_role(AZURE_SERVICE_NAME, deployment_name, roles[i])
# write to .state
with open('.state/HOSTS', 'w') as f:
for i in range(0, len(roles)):
f.write('bazaar@' + AZURE_SERVICE_NAME + '.cloudapp.net:' + str(2000+i) + '\n')
with open('.state/DIRS', 'w') as f:
for i in range(0, len(roles)):
f.write('/mnt\n')
with open('.state/CLOUD', 'w') as f:
f.write('azure')
def launch(argv):
num_instances = 1
try:
opts, args = getopt.getopt(argv,"n:",[])
except getopt.GetoptError:
#print " -n <numinstances>"
sys.exit(2)
for opt, arg in opts:
if opt == '-n':
num_instances = int(arg)
print('Launching ' + str(num_instances) + ' instances on Azure')
client = AzureClient()
client.create_state_dir()
client.create_hosted_service()
if not client.storage_account_exists(AZURE_STORAGE_ACCOUNT):
client.create_storage()
client.create_deployment_and_roles(num_instances)
def terminate():
client = AzureClient()
client.delete_hosted_service()
# We don't delete storage account, because it takes a long time to re-create.
#client.delete_storage()
def usage():
print("Usage: azure-client.py launch|terminate|role_sizes [OPTIONS]")
exit(1)
def main(argv):
if len(argv) < 1:
usage()
cmd = argv[0]
if cmd == 'launch':
launch(argv[1:])
elif cmd == 'terminate':
terminate()
elif cmd == 'role_sizes':
client = AzureClient()
client.list_role_sizes()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:]) | udf/bazaar/distribute/azure-client.py |
from azure import *
from azure.servicemanagement import *
import errno
import getopt
import os
import shutil
import subprocess
import sys
import time
# read env_local.sh
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.rstrip().partition("=")
os.environ[key] = value
proc.communicate()
source_env_local()
# make sure we have required parameters
AZURE_SUBSCRIPTION_ID = os.environ.get('AZURE_SUBSCRIPTION_ID')
if not AZURE_SUBSCRIPTION_ID:
print('AZURE_SUBSCRIPTION_ID is not set.')
exit(1)
AZURE_SERVICE_NAME = os.environ.get('AZURE_SERVICE_NAME')
if not AZURE_SERVICE_NAME:
print('AZURE_SERVICE_NAME is not set.')
exit(1)
AZURE_ROLE_SIZE = os.environ.get('AZURE_ROLE_SIZE')
if not AZURE_ROLE_SIZE:
print('AZURE_ROLE_SIZE is not set.')
exit(1)
AZURE_STORAGE_ACCOUNT = os.environ.get('AZURE_STORAGE_ACCOUNT')
if not AZURE_STORAGE_ACCOUNT:
print('AZURE_STORAGE_ACCOUNT is not set.')
exit(1)
# management certificate
AZURE_MGMT_CERT = 'ssh/mycert.pem'
# service certificate
AZURE_SERVICE_PEM = 'ssh/bazaar.pem'
# vm settings
linux_image_name = 'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB'
container_name = 'bazaarctr'
location = 'West US'
class AzureClient:
def __init__(self):
self.sms = ServiceManagementService(AZURE_SUBSCRIPTION_ID, AZURE_MGMT_CERT)
def service_exists(self):
try:
props = self.sms.get_hosted_service_properties(AZURE_SERVICE_NAME)
return props is not None
except:
return False
def create_hosted_service(self):
if not self.service_exists():
print('Creating service ' + AZURE_SERVICE_NAME)
result = self.sms.create_hosted_service(
AZURE_SERVICE_NAME,
AZURE_SERVICE_NAME + 'label',
AZURE_SERVICE_NAME + 'description',
location)
self._wait_for_async(result.request_id)
self.create_service_certificate()
def list_services(self):
result = self.sms.list_hosted_services()
for hosted_service in result:
print('- Service name: ' + hosted_service.service_name)
print(' Management URL: ' + hosted_service.url)
print(' Location: ' + hosted_service.hosted_service_properties.location)
def delete_service():
self.sms.delete_hosted_service(AZURE_SERVICE_NAME)
def delete_deployment():
self.sms.delete_deployment('myhostedservice', 'v1')
def _linux_role(self, role_name, subnet_name=None, port='22'):
container_name = 'bazaarctr' + role_name
host_name = 'hn' + role_name
system = self._linux_config(host_name)
os_hd = self._os_hd(linux_image_name,
container_name,
role_name + '.vhd')
network = self._network_config(subnet_name, port)
return (system, os_hd, network)
def get_fingerprint(self):
import hashlib
with open (AZURE_SERVICE_PEM, "r") as myfile:
data = myfile.readlines()
lines = data[1:-1]
all = ''.join([x.rstrip() for x in lines])
key = base64.b64decode(all.encode('ascii'))
fp = hashlib.sha1(key).hexdigest()
return fp.upper()
def _linux_config(self, hostname):
SERVICE_CERT_THUMBPRINT = self.get_fingerprint()
pk = PublicKey(SERVICE_CERT_THUMBPRINT, '/home/bazaar/.ssh/authorized_keys')
pair = KeyPair(SERVICE_CERT_THUMBPRINT, '/home/bazaar/.ssh/id_rsa')
system = LinuxConfigurationSet(hostname, 'bazaar', 'u7;9jbp!', True)
system.ssh.public_keys.public_keys.append(pk)
system.ssh.key_pairs.key_pairs.append(pair)
system.disable_ssh_password_authentication = True
return system
def _network_config(self, subnet_name=None, port='22'):
network = ConfigurationSet()
network.configuration_set_type = 'NetworkConfiguration'
network.input_endpoints.input_endpoints.append(
ConfigurationSetInputEndpoint('SSH', 'tcp', port, '22'))
if subnet_name:
network.subnet_names.append(subnet_name)
return network
def _os_hd(self, image_name, target_container_name, target_blob_name):
media_link = self._make_blob_url(
AZURE_STORAGE_ACCOUNT,
target_container_name, target_blob_name)
os_hd = OSVirtualHardDisk(image_name, media_link,
disk_label=target_blob_name)
return os_hd
def _make_blob_url(self, storage_account_name, container_name, blob_name):
return 'http://{0}.blob.core.windows.net/{1}/{2}'.format(
storage_account_name, container_name, blob_name)
def create_storage(self):
name = AZURE_STORAGE_ACCOUNT
label = 'mystorageaccount'
location = 'West US'
desc = 'My storage account description.'
result = self.sms.create_storage_account(name, desc, label, location=location)
self._wait_for_async(result.request_id)
def storage_account_exists(self, name):
try:
props = self.sms.get_storage_account_properties(name)
return props is not None
except:
return False
def list_storage(self):
result = self.sms.list_storage_accounts()
for account in result:
print('Service name: ' + account.service_name)
print('Location: ' + account.storage_service_properties.location)
print('')
def delete_storage(self):
self.sms.delete_storage_account(AZURE_STORAGE_ACCOUNT)
def list_role_sizes(self):
result = self.sms.list_role_sizes()
for rs in result:
print('Name: ' + rs.name)
def _wait_for_async(self, request_id):
try:
self.sms.wait_for_operation_status(request_id, timeout=600)
except azure.WindowsAzureAsyncOperationError as e:
from pprint import pprint
pprint (vars(e.result.error))
def _wait_for_deployment(self, service_name, deployment_name,
status='Running'):
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while props.status != status:
count = count + 1
if count > 120:
self.assertTrue(
False, 'Timed out waiting for deployment status.')
time.sleep(5)
props = self.sms.get_deployment_by_name(
service_name, deployment_name)
def _wait_for_role(self, service_name, deployment_name, role_instance_name,
status='ReadyRole'):
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while self._get_role_instance_status(props, role_instance_name) != status:
count = count + 1
if count > 120:
self.assertTrue(
False, 'Timed out waiting for role instance status.')
time.sleep(5)
props = self.sms.get_deployment_by_name(
service_name, deployment_name)
def _get_role_instance_status(self, deployment, role_instance_name):
for role_instance in deployment.role_instance_list:
if role_instance.instance_name == role_instance_name:
return role_instance.instance_status
return None
def delete_hosted_service(self):
print('Terminating service')
try:
self.sms.delete_hosted_service(AZURE_SERVICE_NAME, complete=True)
except:
pass
if os.path.exists('.state'):
shutil.rmtree('.state')
def create_state_dir(self):
try:
os.makedirs('.state')
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir('.state'):
print("Found existing .state dir. Please terminate instances first.")
exit(1)
else: raise
def list_os_images_public(self):
result = self.sms.list_os_images()
for img in result:
print(img.name)
def create_service_certificate(self):
with open(AZURE_SERVICE_PEM, "rb") as bfile:
cert_data = base64.b64encode(bfile.read()).decode()
cert_format = 'pfx'
cert_password = ''
cert_res = self.sms.add_service_certificate(service_name=AZURE_SERVICE_NAME,
data=cert_data,
certificate_format=cert_format,
password=<PASSWORD>)
self._wait_for_async(cert_res.request_id)
def create_deployment_and_roles(self, num_machines = 1):
deployment_name = AZURE_SERVICE_NAME
# one role for each machine
roles = []
for i in range(0, num_machines):
roles.append(AZURE_SERVICE_NAME + str(i))
system, os_hd, network = self._linux_role(roles[0], port='2000')
result = self.sms.create_virtual_machine_deployment(
AZURE_SERVICE_NAME, deployment_name, 'production',
deployment_name + 'label', roles[0], system, os_hd,
network, role_size=AZURE_ROLE_SIZE)
self._wait_for_async(result.request_id)
self._wait_for_deployment(AZURE_SERVICE_NAME, deployment_name)
self._wait_for_role(AZURE_SERVICE_NAME, deployment_name, roles[0])
for i in range(1, len(roles)):
system, os_hd, network = self._linux_role(roles[i], port=str(2000+i))
result = self.sms.add_role(AZURE_SERVICE_NAME, deployment_name, roles[i],
system, os_hd, network, role_size=AZURE_ROLE_SIZE)
self._wait_for_async(result.request_id)
self._wait_for_role(AZURE_SERVICE_NAME, deployment_name, roles[i])
# write to .state
with open('.state/HOSTS', 'w') as f:
for i in range(0, len(roles)):
f.write('bazaar@' + AZURE_SERVICE_NAME + '.cloudapp.net:' + str(2000+i) + '\n')
with open('.state/DIRS', 'w') as f:
for i in range(0, len(roles)):
f.write('/mnt\n')
with open('.state/CLOUD', 'w') as f:
f.write('azure')
def launch(argv):
num_instances = 1
try:
opts, args = getopt.getopt(argv,"n:",[])
except getopt.GetoptError:
#print " -n <numinstances>"
sys.exit(2)
for opt, arg in opts:
if opt == '-n':
num_instances = int(arg)
print('Launching ' + str(num_instances) + ' instances on Azure')
client = AzureClient()
client.create_state_dir()
client.create_hosted_service()
if not client.storage_account_exists(AZURE_STORAGE_ACCOUNT):
client.create_storage()
client.create_deployment_and_roles(num_instances)
def terminate():
client = AzureClient()
client.delete_hosted_service()
# We don't delete storage account, because it takes a long time to re-create.
#client.delete_storage()
def usage():
print("Usage: azure-client.py launch|terminate|role_sizes [OPTIONS]")
exit(1)
def main(argv):
if len(argv) < 1:
usage()
cmd = argv[0]
if cmd == 'launch':
launch(argv[1:])
elif cmd == 'terminate':
terminate()
elif cmd == 'role_sizes':
client = AzureClient()
client.list_role_sizes()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:]) | 0.273186 | 0.05875 |
from typing import List
from aws_cdk.aws_lambda import Runtime
import jsii
from aws_cdk import core as cdk
from aws_cdk import aws_lambda_nodejs
from aws_cdk.aws_ec2 import IInstance, IVpc, SubnetSelection
from aws_cdk.aws_secretsmanager import ISecret
from aws_cdk.aws_lambda_nodejs import ICommandHooks, NodejsFunction, BundlingOptions
from aws_cdk.aws_apigateway import ApiKeySourceType, Cors, CorsOptions, LambdaRestApi
class GraphqlApiStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
graphql_api_function = NodejsFunction(self, 'lambda-function-graphql-api',
function_name="function-altimeter--graphql-api",
runtime=Runtime.NODEJS_12_X, # TODO: Check out if NODEJS_14_X also works with graphql handler.
entry='../graphql-api/app.ts',
memory_size=512,
timeout=cdk.Duration.seconds(10),
vpc=vpc,
vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
environment={
"neo4j_address": instance.instance_private_ip,
"neo4j_user_secret_name": neo4j_user_secret.secret_name
},
bundling=BundlingOptions(
source_map=True,
target= 'es2018',
command_hooks=self.CommandHooks(),
node_modules=[
# Something goes wrong when these modules are bundled, so leave them out
"graphql",
"neo4j-graphql-js"
]
)
)
# Grant lambda read access to the neo4j user secret
neo4j_user_secret.grant_read(graphql_api_function.role)
api = LambdaRestApi(self, 'apigateway-api-altimeter-graphql',
rest_api_name='api-altimeter--graphql-api',
handler=graphql_api_function,
proxy=False
)
# Minimal security: Require an API key - use must go get the key value and configure its browser to send it along.
default_key = api.add_api_key('default')
default_usage_plan = api.add_usage_plan('apigateway-usageplan-altimeter-graphql', name='default')
default_usage_plan.add_api_key(default_key)
default_usage_plan.add_api_stage(stage=api.deployment_stage)
items = api.root.add_resource('graphql',
default_cors_preflight_options=CorsOptions(
allow_origins=Cors.ALL_ORIGINS, # TODO: Limit to GUI?
allow_methods=['GET','POST']
)
)
items.add_method('GET', api_key_required=True)
items.add_method('POST', api_key_required=True)
@jsii.implements(ICommandHooks)
class CommandHooks:
def before_install(self, input_dir: str, output_dir: str):
return []
def before_bundling(self, input_dir: str, output_dir: str):
return []
def after_bundling(self, input_dir: str, output_dir: str):
commands: List[str] = []
commands.append(f"cp {input_dir}/../graphql-api/schema.graphql {output_dir}")
commands.append(f"cp {input_dir}/../graphql-api/accounts.json {output_dir}")
commands.append("echo 'AFTER BUNDLING COMMANDS DONE'")
return commands | scanner/stacks/graphql_api_stack.py | from typing import List
from aws_cdk.aws_lambda import Runtime
import jsii
from aws_cdk import core as cdk
from aws_cdk import aws_lambda_nodejs
from aws_cdk.aws_ec2 import IInstance, IVpc, SubnetSelection
from aws_cdk.aws_secretsmanager import ISecret
from aws_cdk.aws_lambda_nodejs import ICommandHooks, NodejsFunction, BundlingOptions
from aws_cdk.aws_apigateway import ApiKeySourceType, Cors, CorsOptions, LambdaRestApi
class GraphqlApiStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
graphql_api_function = NodejsFunction(self, 'lambda-function-graphql-api',
function_name="function-altimeter--graphql-api",
runtime=Runtime.NODEJS_12_X, # TODO: Check out if NODEJS_14_X also works with graphql handler.
entry='../graphql-api/app.ts',
memory_size=512,
timeout=cdk.Duration.seconds(10),
vpc=vpc,
vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
environment={
"neo4j_address": instance.instance_private_ip,
"neo4j_user_secret_name": neo4j_user_secret.secret_name
},
bundling=BundlingOptions(
source_map=True,
target= 'es2018',
command_hooks=self.CommandHooks(),
node_modules=[
# Something goes wrong when these modules are bundled, so leave them out
"graphql",
"neo4j-graphql-js"
]
)
)
# Grant lambda read access to the neo4j user secret
neo4j_user_secret.grant_read(graphql_api_function.role)
api = LambdaRestApi(self, 'apigateway-api-altimeter-graphql',
rest_api_name='api-altimeter--graphql-api',
handler=graphql_api_function,
proxy=False
)
# Minimal security: Require an API key - use must go get the key value and configure its browser to send it along.
default_key = api.add_api_key('default')
default_usage_plan = api.add_usage_plan('apigateway-usageplan-altimeter-graphql', name='default')
default_usage_plan.add_api_key(default_key)
default_usage_plan.add_api_stage(stage=api.deployment_stage)
items = api.root.add_resource('graphql',
default_cors_preflight_options=CorsOptions(
allow_origins=Cors.ALL_ORIGINS, # TODO: Limit to GUI?
allow_methods=['GET','POST']
)
)
items.add_method('GET', api_key_required=True)
items.add_method('POST', api_key_required=True)
@jsii.implements(ICommandHooks)
class CommandHooks:
def before_install(self, input_dir: str, output_dir: str):
return []
def before_bundling(self, input_dir: str, output_dir: str):
return []
def after_bundling(self, input_dir: str, output_dir: str):
commands: List[str] = []
commands.append(f"cp {input_dir}/../graphql-api/schema.graphql {output_dir}")
commands.append(f"cp {input_dir}/../graphql-api/accounts.json {output_dir}")
commands.append("echo 'AFTER BUNDLING COMMANDS DONE'")
return commands | 0.482917 | 0.083778 |
import numpy as np
import matplotlib.pyplot as plt
import cv2
class GenCoe:
def __init__(self, dir:str, filename:str, mode="gray"):
self.dir = dir
self.filename = filename
loc = self.dir + "\\" + self.filename
self.img = cv2.imread(loc, cv2.IMREAD_UNCHANGED)
self.height, self.width, g = (self.img.shape)
self.grayinfo = np.empty((self.height * self.width)).astype(np.int32)
self.alphainfo = np.empty((self.height * self.width)).astype(np.int32)
self.colorinfo = np.empty((self.height * self.width, 3)).astype(np.int32)
self.monoinfo = np.empty((self.height, self.width)).astype(np.int8)
if mode == "gray":
self.gray()
elif mode == "mono":
self.mono()
elif mode == "color":
self.color()
def readimage(self, dir, filename, mode="gray"):
GenCoe.__init__(dir, filename, mode);
def gray(self):
def list_aver(list):
aver = 0
for item in list:
aver += item
aver /= len(list)
return aver
for row_idx in range(self.height):
for col_idx in range(self.width):
self.grayinfo[row_idx * self.width + col_idx] = (int)(list_aver(self.img[row_idx][col_idx][0:3]/16))
self.alphainfo[row_idx * self.width + col_idx] = (int)(self.img[row_idx][col_idx][3]/128)
def color(self):
for row_idx in range(self.height):
for col_idx in range(self.width):
self.colorinfo[row_idx * self.width + col_idx][0] = (int)(self.img[row_idx][col_idx][2] / 16)
self.colorinfo[row_idx * self.width + col_idx][1] = (int)(self.img[row_idx][col_idx][1] / 16)
self.colorinfo[row_idx * self.width + col_idx][2] = (int)(self.img[row_idx][col_idx][0] / 16)
self.alphainfo[row_idx * self.width + col_idx] = (int)(self.img[row_idx][col_idx][3] / 128)
def mono(self):
for row_idx in range(self.height):
for col_idx in range(self.width):
# self.monoinfo[row_idx][col_idx] = (int)(self.img[row_idx][col_idx][3] / 128)
pixel = self.img[row_idx][col_idx]
self.monoinfo[row_idx][col_idx] = 1 if (int(pixel[0]) + int(pixel[1]) + int(pixel[2]) < 300) else 0
def get_grayinfo(self):
return self.grayinfo
def get_alphainfo(self):
return self.alphainfo
def get_monoinfo(self):
return self.monoinfo
def get_colorinfo(self):
return self.colorinfo
def to_binary(num, bitlen=-1):
res = bin(num)[2:]
if bitlen == -1:
return res
else:
for i in range(bitlen - len(res)):
res = '0' + res
return res
def generate_coe(dir, filename, *infos):
coefile_location = dir + "\\" + filename
depth = len(infos[0][1])
with open(coefile_location, 'w') as f:
f.write("memory_initialization_radix = 2;\n")
f.write("memory_initialization_vector = \n")
for i in range(depth):
rowinfo = ""
for info in infos:
if(info[0] == 'gray'):
rowinfo += GenCoe.to_binary(info[1][i], bitlen=4)
elif(info[0] == 'alpha'):
rowinfo += str(info[1][i])
elif(info[0] == 'mono'):
for j in range(len(info[1][i])):
rowinfo += str(info[1][i][j]) + ",\n"
elif(info[0] == 'color'):
rowinfo += GenCoe.to_binary(info[1][i][0], bitlen=4)
rowinfo += GenCoe.to_binary(info[1][i][1], bitlen=4)
rowinfo += GenCoe.to_binary(info[1][i][2], bitlen=4)
if info[0] == 'mono':
f.write(rowinfo)
else:
f.write(rowinfo + ",\n")
print("Generate COE file " + filename + " successfully, the depth is " + str(depth))
if __name__ == "__main__":
ori_dir = "D:\\fpga\\project\PlaneWar\\src\\img\\origin"
des_dir = "D:\\fpga\\project\PlaneWar\\src\\img"
def gen_me():
me1 = GenCoe(ori_dir, "me1.png")
me2 = GenCoe(ori_dir, "me2.png")
me_destroy_1 = GenCoe(ori_dir, "me_destroy_1.png")
me_destroy_3 = GenCoe(ori_dir, "me_destroy_3.png")
me_destroy_4 = GenCoe(ori_dir, "me_destroy_4.png")
# GenCoe.generate_coe(des_dir, 'me.coe', ('alpha', me1.get_alphainfo()), ('gray', me1.get_grayinfo()), \
# ('alpha', me2.get_alphainfo()), ('gray', me2.get_grayinfo()), \
# ('gray', me_destroy_1.get_grayinfo()), ('gray', me_destroy_3.get_grayinfo()), \
# ('gray', me_destroy_4.get_grayinfo()))
GenCoe.generate_coe(des_dir, 'me.coe', ('alpha', me1.get_alphainfo()), ('gray', me1.get_grayinfo()),\
('alpha', me2.get_alphainfo()), ('gray', me2.get_grayinfo()),\
('alpha', me_destroy_1.get_alphainfo()), ('gray', me_destroy_1.get_grayinfo()), \
('alpha', me_destroy_3.get_alphainfo()), ('gray', me_destroy_3.get_grayinfo()))
def gen_enemy1():
enemy1 = GenCoe(ori_dir, "enemy1.png")
enemy1_down1 = GenCoe(ori_dir, "enemy1_down1.png")
enemy1_down2 = GenCoe(ori_dir, "enemy1_down2.png")
enemy1_down3 = GenCoe(ori_dir, "enemy1_down3.png")
# enemy1_down4 = GenCoe(ori_dir, "enemy1_down4.png")
# GenCoe.generate_coe(des_dir, 'enemy1.coe', ('alpha', enemy1.get_alphainfo()), ('gray', enemy1.get_grayinfo()), \
# ('gray', enemy1_down1.get_grayinfo()), ('gray', enemy1_down2.get_grayinfo()), \
# ('alpha', enemy1_down3.get_alphainfo()), ('gray', enemy1_down3.get_grayinfo()))
GenCoe.generate_coe(des_dir, 'enemy1.coe', ('alpha', enemy1.get_alphainfo()), ('gray', enemy1.get_grayinfo()), \
('alpha', enemy1_down1.get_alphainfo()), ('gray', enemy1_down1.get_grayinfo()), \
('alpha', enemy1_down2.get_alphainfo()), ('gray', enemy1_down2.get_grayinfo()), \
('alpha', enemy1_down3.get_alphainfo()), ('gray', enemy1_down3.get_grayinfo()))
def gen_enemy2():
enemy2 = GenCoe(ori_dir, "enemy2.png")
enemy2_hit = GenCoe(ori_dir, "enemy2_hit.png")
enemy2_down1 = GenCoe(ori_dir, "enemy2_down1.png")
enemy2_down2 = GenCoe(ori_dir, "enemy2_down2.png")
enemy2_down3 = GenCoe(ori_dir, "enemy2_down3.png")
GenCoe.generate_coe(des_dir, 'enemy2.coe', \
('alpha', enemy2.get_alphainfo()), ('gray', enemy2.get_grayinfo()),\
('alpha', enemy2_hit.get_alphainfo()), ('gray', enemy2_hit.get_grayinfo()),\
('alpha', enemy2_down1.get_alphainfo()), ('gray', enemy2_down1.get_grayinfo()),\
('alpha', enemy2_down2.get_alphainfo()), ('gray', enemy2_down2.get_grayinfo()),\
('alpha', enemy2_down3.get_alphainfo()), ('gray', enemy2_down3.get_grayinfo()))
def gen_enemy3():
enemy3_n1 = GenCoe(ori_dir, 'enemy3_n1.png')
enemy3_n2 = GenCoe(ori_dir, 'enemy3_n2.png')
enemy3_hit = GenCoe(ori_dir, 'enemy3_hit.png')
enemy3_down1 = GenCoe(ori_dir, 'enemy3_down1.png')
enemy3_down2 = GenCoe(ori_dir, 'enemy3_down2.png')
enemy3_down3 = GenCoe(ori_dir, 'enemy3_down3.png')
enemy3_down4 = GenCoe(ori_dir, 'enemy3_down4.png')
enemy3_down5 = GenCoe(ori_dir, 'enemy3_down5.png')
GenCoe.generate_coe(des_dir, 'enemy3.coe', \
('alpha', enemy3_n1.get_alphainfo()), ('gray', enemy3_n1.get_grayinfo()), \
# ('alpha', enemy3_n2.get_alphainfo()), ('gray', enemy3_n2.get_grayinfo()), \
('alpha', enemy3_hit.get_alphainfo()), ('gray', enemy3_hit.get_grayinfo()), \
# ('alpha', enemy3_down1.get_alphainfo()), ('gray', enemy3_down1.get_grayinfo()), \
# ('alpha', enemy3_down2.get_alphainfo()), ('gray', enemy3_down2.get_grayinfo()), \
('alpha', enemy3_down3.get_alphainfo()), ('gray', enemy3_down3.get_grayinfo()), \
# ('alpha', enemy3_down4.get_alphainfo()), ('gray', enemy3_down4.get_grayinfo()), \
('alpha', enemy3_down5.get_alphainfo()), ('gray', enemy3_down5.get_grayinfo()))
def gen_startinfo():
startinfo = GenCoe(ori_dir, 'startinfo.png', mode="mono")
GenCoe.generate_coe(des_dir, 'startinfo.coe', ('mono', startinfo.get_monoinfo()))
# gen_enemy1()
def gen_bomb():
bomb_supply = GenCoe(ori_dir, 'bomb_supply.png', mode='color')
GenCoe.generate_coe(des_dir, 'bomb.coe', ('alpha', bomb_supply.get_alphainfo()),('color', bomb_supply.get_colorinfo()))
def gen_bullet_supply():
bullet_supply = GenCoe(ori_dir, 'bullet_supply.png', mode='color')
GenCoe.generate_coe(des_dir, 'bullet_supply.coe', ('alpha', bullet_supply.get_alphainfo()), ('color', bullet_supply.get_colorinfo()))
def gen_number():
number_dir = "D:\\fpga\\project\\PlaneWar\\src\\img\\origin\\numbers"
for i in range(10):
filename = str(i) + ".png"
number = GenCoe(number_dir, filename, mode='mono')
GenCoe.generate_coe(des_dir, str(i) + ".coe", ('mono', number.get_monoinfo()))
gen_me() | utils/gen_coe.py | import numpy as np
import matplotlib.pyplot as plt
import cv2
class GenCoe:
def __init__(self, dir:str, filename:str, mode="gray"):
self.dir = dir
self.filename = filename
loc = self.dir + "\\" + self.filename
self.img = cv2.imread(loc, cv2.IMREAD_UNCHANGED)
self.height, self.width, g = (self.img.shape)
self.grayinfo = np.empty((self.height * self.width)).astype(np.int32)
self.alphainfo = np.empty((self.height * self.width)).astype(np.int32)
self.colorinfo = np.empty((self.height * self.width, 3)).astype(np.int32)
self.monoinfo = np.empty((self.height, self.width)).astype(np.int8)
if mode == "gray":
self.gray()
elif mode == "mono":
self.mono()
elif mode == "color":
self.color()
def readimage(self, dir, filename, mode="gray"):
GenCoe.__init__(dir, filename, mode);
def gray(self):
def list_aver(list):
aver = 0
for item in list:
aver += item
aver /= len(list)
return aver
for row_idx in range(self.height):
for col_idx in range(self.width):
self.grayinfo[row_idx * self.width + col_idx] = (int)(list_aver(self.img[row_idx][col_idx][0:3]/16))
self.alphainfo[row_idx * self.width + col_idx] = (int)(self.img[row_idx][col_idx][3]/128)
def color(self):
for row_idx in range(self.height):
for col_idx in range(self.width):
self.colorinfo[row_idx * self.width + col_idx][0] = (int)(self.img[row_idx][col_idx][2] / 16)
self.colorinfo[row_idx * self.width + col_idx][1] = (int)(self.img[row_idx][col_idx][1] / 16)
self.colorinfo[row_idx * self.width + col_idx][2] = (int)(self.img[row_idx][col_idx][0] / 16)
self.alphainfo[row_idx * self.width + col_idx] = (int)(self.img[row_idx][col_idx][3] / 128)
def mono(self):
for row_idx in range(self.height):
for col_idx in range(self.width):
# self.monoinfo[row_idx][col_idx] = (int)(self.img[row_idx][col_idx][3] / 128)
pixel = self.img[row_idx][col_idx]
self.monoinfo[row_idx][col_idx] = 1 if (int(pixel[0]) + int(pixel[1]) + int(pixel[2]) < 300) else 0
def get_grayinfo(self):
return self.grayinfo
def get_alphainfo(self):
return self.alphainfo
def get_monoinfo(self):
return self.monoinfo
def get_colorinfo(self):
return self.colorinfo
def to_binary(num, bitlen=-1):
res = bin(num)[2:]
if bitlen == -1:
return res
else:
for i in range(bitlen - len(res)):
res = '0' + res
return res
def generate_coe(dir, filename, *infos):
coefile_location = dir + "\\" + filename
depth = len(infos[0][1])
with open(coefile_location, 'w') as f:
f.write("memory_initialization_radix = 2;\n")
f.write("memory_initialization_vector = \n")
for i in range(depth):
rowinfo = ""
for info in infos:
if(info[0] == 'gray'):
rowinfo += GenCoe.to_binary(info[1][i], bitlen=4)
elif(info[0] == 'alpha'):
rowinfo += str(info[1][i])
elif(info[0] == 'mono'):
for j in range(len(info[1][i])):
rowinfo += str(info[1][i][j]) + ",\n"
elif(info[0] == 'color'):
rowinfo += GenCoe.to_binary(info[1][i][0], bitlen=4)
rowinfo += GenCoe.to_binary(info[1][i][1], bitlen=4)
rowinfo += GenCoe.to_binary(info[1][i][2], bitlen=4)
if info[0] == 'mono':
f.write(rowinfo)
else:
f.write(rowinfo + ",\n")
print("Generate COE file " + filename + " successfully, the depth is " + str(depth))
if __name__ == "__main__":
ori_dir = "D:\\fpga\\project\PlaneWar\\src\\img\\origin"
des_dir = "D:\\fpga\\project\PlaneWar\\src\\img"
def gen_me():
me1 = GenCoe(ori_dir, "me1.png")
me2 = GenCoe(ori_dir, "me2.png")
me_destroy_1 = GenCoe(ori_dir, "me_destroy_1.png")
me_destroy_3 = GenCoe(ori_dir, "me_destroy_3.png")
me_destroy_4 = GenCoe(ori_dir, "me_destroy_4.png")
# GenCoe.generate_coe(des_dir, 'me.coe', ('alpha', me1.get_alphainfo()), ('gray', me1.get_grayinfo()), \
# ('alpha', me2.get_alphainfo()), ('gray', me2.get_grayinfo()), \
# ('gray', me_destroy_1.get_grayinfo()), ('gray', me_destroy_3.get_grayinfo()), \
# ('gray', me_destroy_4.get_grayinfo()))
GenCoe.generate_coe(des_dir, 'me.coe', ('alpha', me1.get_alphainfo()), ('gray', me1.get_grayinfo()),\
('alpha', me2.get_alphainfo()), ('gray', me2.get_grayinfo()),\
('alpha', me_destroy_1.get_alphainfo()), ('gray', me_destroy_1.get_grayinfo()), \
('alpha', me_destroy_3.get_alphainfo()), ('gray', me_destroy_3.get_grayinfo()))
def gen_enemy1():
enemy1 = GenCoe(ori_dir, "enemy1.png")
enemy1_down1 = GenCoe(ori_dir, "enemy1_down1.png")
enemy1_down2 = GenCoe(ori_dir, "enemy1_down2.png")
enemy1_down3 = GenCoe(ori_dir, "enemy1_down3.png")
# enemy1_down4 = GenCoe(ori_dir, "enemy1_down4.png")
# GenCoe.generate_coe(des_dir, 'enemy1.coe', ('alpha', enemy1.get_alphainfo()), ('gray', enemy1.get_grayinfo()), \
# ('gray', enemy1_down1.get_grayinfo()), ('gray', enemy1_down2.get_grayinfo()), \
# ('alpha', enemy1_down3.get_alphainfo()), ('gray', enemy1_down3.get_grayinfo()))
GenCoe.generate_coe(des_dir, 'enemy1.coe', ('alpha', enemy1.get_alphainfo()), ('gray', enemy1.get_grayinfo()), \
('alpha', enemy1_down1.get_alphainfo()), ('gray', enemy1_down1.get_grayinfo()), \
('alpha', enemy1_down2.get_alphainfo()), ('gray', enemy1_down2.get_grayinfo()), \
('alpha', enemy1_down3.get_alphainfo()), ('gray', enemy1_down3.get_grayinfo()))
def gen_enemy2():
enemy2 = GenCoe(ori_dir, "enemy2.png")
enemy2_hit = GenCoe(ori_dir, "enemy2_hit.png")
enemy2_down1 = GenCoe(ori_dir, "enemy2_down1.png")
enemy2_down2 = GenCoe(ori_dir, "enemy2_down2.png")
enemy2_down3 = GenCoe(ori_dir, "enemy2_down3.png")
GenCoe.generate_coe(des_dir, 'enemy2.coe', \
('alpha', enemy2.get_alphainfo()), ('gray', enemy2.get_grayinfo()),\
('alpha', enemy2_hit.get_alphainfo()), ('gray', enemy2_hit.get_grayinfo()),\
('alpha', enemy2_down1.get_alphainfo()), ('gray', enemy2_down1.get_grayinfo()),\
('alpha', enemy2_down2.get_alphainfo()), ('gray', enemy2_down2.get_grayinfo()),\
('alpha', enemy2_down3.get_alphainfo()), ('gray', enemy2_down3.get_grayinfo()))
def gen_enemy3():
enemy3_n1 = GenCoe(ori_dir, 'enemy3_n1.png')
enemy3_n2 = GenCoe(ori_dir, 'enemy3_n2.png')
enemy3_hit = GenCoe(ori_dir, 'enemy3_hit.png')
enemy3_down1 = GenCoe(ori_dir, 'enemy3_down1.png')
enemy3_down2 = GenCoe(ori_dir, 'enemy3_down2.png')
enemy3_down3 = GenCoe(ori_dir, 'enemy3_down3.png')
enemy3_down4 = GenCoe(ori_dir, 'enemy3_down4.png')
enemy3_down5 = GenCoe(ori_dir, 'enemy3_down5.png')
GenCoe.generate_coe(des_dir, 'enemy3.coe', \
('alpha', enemy3_n1.get_alphainfo()), ('gray', enemy3_n1.get_grayinfo()), \
# ('alpha', enemy3_n2.get_alphainfo()), ('gray', enemy3_n2.get_grayinfo()), \
('alpha', enemy3_hit.get_alphainfo()), ('gray', enemy3_hit.get_grayinfo()), \
# ('alpha', enemy3_down1.get_alphainfo()), ('gray', enemy3_down1.get_grayinfo()), \
# ('alpha', enemy3_down2.get_alphainfo()), ('gray', enemy3_down2.get_grayinfo()), \
('alpha', enemy3_down3.get_alphainfo()), ('gray', enemy3_down3.get_grayinfo()), \
# ('alpha', enemy3_down4.get_alphainfo()), ('gray', enemy3_down4.get_grayinfo()), \
('alpha', enemy3_down5.get_alphainfo()), ('gray', enemy3_down5.get_grayinfo()))
def gen_startinfo():
startinfo = GenCoe(ori_dir, 'startinfo.png', mode="mono")
GenCoe.generate_coe(des_dir, 'startinfo.coe', ('mono', startinfo.get_monoinfo()))
# gen_enemy1()
def gen_bomb():
bomb_supply = GenCoe(ori_dir, 'bomb_supply.png', mode='color')
GenCoe.generate_coe(des_dir, 'bomb.coe', ('alpha', bomb_supply.get_alphainfo()),('color', bomb_supply.get_colorinfo()))
def gen_bullet_supply():
bullet_supply = GenCoe(ori_dir, 'bullet_supply.png', mode='color')
GenCoe.generate_coe(des_dir, 'bullet_supply.coe', ('alpha', bullet_supply.get_alphainfo()), ('color', bullet_supply.get_colorinfo()))
def gen_number():
number_dir = "D:\\fpga\\project\\PlaneWar\\src\\img\\origin\\numbers"
for i in range(10):
filename = str(i) + ".png"
number = GenCoe(number_dir, filename, mode='mono')
GenCoe.generate_coe(des_dir, str(i) + ".coe", ('mono', number.get_monoinfo()))
gen_me() | 0.187839 | 0.148325 |
# Install boto before running the script
# Setup AWS keys to get details from AWS Account
import argparse
import re
import sys
import time
import boto.ec2
AMI_NAMES_TO_USER = {
'amzn' : 'ec2-user',
'ubuntu' : 'ubuntu',
'CentOS' : 'root',
'DataStax' : 'ubuntu',
'CoreOS' : 'core'
}
AMI_IDS_TO_USER = {
'ami-ada2b6c4' : 'ubuntu'
}
AMI_IDS_TO_KEY = {
'ami-ada2b6c4' : 'custom_key'
}
BLACKLISTED_REGIONS = [
'cn-north-1',
'us-gov-west-1'
]
def generate_id(instance, tags_filter, region):
instance_id = ''
if tags_filter is not None:
for tag in tags_filter.split(','):
value = instance.tags.get(tag, None)
if value:
if not instance_id:
instance_id = value
else:
instance_id += '-' + value
else:
for tag, value in instance.tags.items():
if not tag.startswith('aws'):
if not instance_id:
instance_id = value
else:
instance_id += '-' + value
if not instance_id:
instance_id = instance.id
if region:
instance_id += '-' + instance.placement
return instance_id
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--default-user', help='Default ssh username to use if it can\'t be detected from AMI name')
parser.add_argument('--keydir', default='~/.ssh/', help='Location of private keys')
parser.add_argument('--no-identities-only', action='store_true', help='Do not include IdentitiesOnly=yes in ssh config; may cause connection refused if using ssh-agent')
parser.add_argument('--prefix', default='', help='Specify a prefix to prepend to all host names')
parser.add_argument('--private', action='store_true', help='Use private IP addresses (public are used by default)')
parser.add_argument('--profile', help='Specify AWS credential profile to use')
parser.add_argument('--region', action='store_true', help='Append the region name at the end of the concatenation')
parser.add_argument('--ssh-key-name', default='', help='Override the ssh key to use')
parser.add_argument('--strict-hostkey-checking', action='store_true', help='Do not include StrictHostKeyChecking=no in ssh config')
parser.add_argument('--tags', help='A comma-separated list of tag names to be considered for concatenation. If omitted, all tags will be used')
parser.add_argument('--user', help='Override the ssh username for all hosts')
parser.add_argument('--white-list-region', default='', help='Which regions must be included. If omitted, all regions are considered', nargs='+')
args = parser.parse_args()
instances = {}
counts_total = {}
counts_incremental = {}
amis = AMI_IDS_TO_USER.copy()
print('# Generated on ' + time.asctime(time.localtime(time.time())))
print('# ' + ' '.join(sys.argv))
print('# ')
print('')
for region in boto.ec2.regions():
if args.white_list_region and region.name not in args.white_list_region:
continue
if region.name in BLACKLISTED_REGIONS:
continue
if args.profile:
conn = boto.ec2.connect_to_region(region.name, profile_name=args.profile)
else:
conn = boto.ec2.connect_to_region(region.name)
for instance in conn.get_only_instances():
if instance.state != 'running':
continue
if instance.platform == 'windows':
continue
if instance.key_name is None:
continue
if instance.launch_time not in instances:
instances[instance.launch_time] = []
instances[instance.launch_time].append(instance)
instance_id = generate_id(instance, args.tags, args.region)
if instance_id not in counts_total:
counts_total[instance_id] = 0
counts_incremental[instance_id] = 0
counts_total[instance_id] += 1
if args.user:
amis[instance.image_id] = args.user
else:
if not instance.image_id in amis:
image = conn.get_image(instance.image_id)
for ami, user in AMI_NAMES_TO_USER.items():
regexp = re.compile(ami)
if image and regexp.match(image.name):
amis[instance.image_id] = user
break
if instance.image_id not in amis:
amis[instance.image_id] = args.default_user
if args.default_user is None:
image_label = image.name if image is not None else instance.image_id
sys.stderr.write('Can\'t lookup user for AMI \'' + image_label + '\', add a rule to the script\n')
for k in sorted(instances):
for instance in instances[k]:
if args.private:
if instance.private_ip_address:
ip_addr = instance.private_ip_address
else:
if instance.ip_address:
ip_addr = instance.ip_address
elif instance.private_ip_address:
ip_addr = instance.private_ip_address
else:
sys.stderr.write('Cannot lookup ip address for instance %s, skipped it.' % instance.id)
continue
instance_id = generate_id(instance, args.tags, args.region)
if counts_total[instance_id] != 1:
counts_incremental[instance_id] += 1
instance_id += '-' + str(counts_incremental[instance_id])
hostid = args.prefix + instance_id
hostid = hostid.replace(' ', '_') # get rid of spaces
if instance.id:
print('# id: ' + instance.id)
print('Host ' + hostid)
print(' HostName ' + ip_addr)
try:
if amis[instance.image_id] is not None:
print(' User ' + amis[instance.image_id])
except:
pass
if args.keydir:
keydir = args.keydir
else:
keydir = '~/.ssh/'
if args.ssh_key_name:
print(' IdentityFile ' + keydir + args.ssh_key_name + '.pem')
else:
key_name = AMI_IDS_TO_KEY.get(instance.image_id, instance.key_name)
print(' IdentityFile ' + keydir + key_name.replace(' ', '_') + '.pem')
if not args.no_identities_only:
# ensure ssh-agent keys don't flood when we know the right file to use
print(' IdentitiesOnly yes')
if not args.strict_hostkey_checking:
print(' StrictHostKeyChecking no')
print('')
if __name__ == '__main__':
main() | create-sshconfig.py |
# Install boto before running the script
# Setup AWS keys to get details from AWS Account
import argparse
import re
import sys
import time
import boto.ec2
AMI_NAMES_TO_USER = {
'amzn' : 'ec2-user',
'ubuntu' : 'ubuntu',
'CentOS' : 'root',
'DataStax' : 'ubuntu',
'CoreOS' : 'core'
}
AMI_IDS_TO_USER = {
'ami-ada2b6c4' : 'ubuntu'
}
AMI_IDS_TO_KEY = {
'ami-ada2b6c4' : 'custom_key'
}
BLACKLISTED_REGIONS = [
'cn-north-1',
'us-gov-west-1'
]
def generate_id(instance, tags_filter, region):
instance_id = ''
if tags_filter is not None:
for tag in tags_filter.split(','):
value = instance.tags.get(tag, None)
if value:
if not instance_id:
instance_id = value
else:
instance_id += '-' + value
else:
for tag, value in instance.tags.items():
if not tag.startswith('aws'):
if not instance_id:
instance_id = value
else:
instance_id += '-' + value
if not instance_id:
instance_id = instance.id
if region:
instance_id += '-' + instance.placement
return instance_id
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--default-user', help='Default ssh username to use if it can\'t be detected from AMI name')
parser.add_argument('--keydir', default='~/.ssh/', help='Location of private keys')
parser.add_argument('--no-identities-only', action='store_true', help='Do not include IdentitiesOnly=yes in ssh config; may cause connection refused if using ssh-agent')
parser.add_argument('--prefix', default='', help='Specify a prefix to prepend to all host names')
parser.add_argument('--private', action='store_true', help='Use private IP addresses (public are used by default)')
parser.add_argument('--profile', help='Specify AWS credential profile to use')
parser.add_argument('--region', action='store_true', help='Append the region name at the end of the concatenation')
parser.add_argument('--ssh-key-name', default='', help='Override the ssh key to use')
parser.add_argument('--strict-hostkey-checking', action='store_true', help='Do not include StrictHostKeyChecking=no in ssh config')
parser.add_argument('--tags', help='A comma-separated list of tag names to be considered for concatenation. If omitted, all tags will be used')
parser.add_argument('--user', help='Override the ssh username for all hosts')
parser.add_argument('--white-list-region', default='', help='Which regions must be included. If omitted, all regions are considered', nargs='+')
args = parser.parse_args()
instances = {}
counts_total = {}
counts_incremental = {}
amis = AMI_IDS_TO_USER.copy()
print('# Generated on ' + time.asctime(time.localtime(time.time())))
print('# ' + ' '.join(sys.argv))
print('# ')
print('')
for region in boto.ec2.regions():
if args.white_list_region and region.name not in args.white_list_region:
continue
if region.name in BLACKLISTED_REGIONS:
continue
if args.profile:
conn = boto.ec2.connect_to_region(region.name, profile_name=args.profile)
else:
conn = boto.ec2.connect_to_region(region.name)
for instance in conn.get_only_instances():
if instance.state != 'running':
continue
if instance.platform == 'windows':
continue
if instance.key_name is None:
continue
if instance.launch_time not in instances:
instances[instance.launch_time] = []
instances[instance.launch_time].append(instance)
instance_id = generate_id(instance, args.tags, args.region)
if instance_id not in counts_total:
counts_total[instance_id] = 0
counts_incremental[instance_id] = 0
counts_total[instance_id] += 1
if args.user:
amis[instance.image_id] = args.user
else:
if not instance.image_id in amis:
image = conn.get_image(instance.image_id)
for ami, user in AMI_NAMES_TO_USER.items():
regexp = re.compile(ami)
if image and regexp.match(image.name):
amis[instance.image_id] = user
break
if instance.image_id not in amis:
amis[instance.image_id] = args.default_user
if args.default_user is None:
image_label = image.name if image is not None else instance.image_id
sys.stderr.write('Can\'t lookup user for AMI \'' + image_label + '\', add a rule to the script\n')
for k in sorted(instances):
for instance in instances[k]:
if args.private:
if instance.private_ip_address:
ip_addr = instance.private_ip_address
else:
if instance.ip_address:
ip_addr = instance.ip_address
elif instance.private_ip_address:
ip_addr = instance.private_ip_address
else:
sys.stderr.write('Cannot lookup ip address for instance %s, skipped it.' % instance.id)
continue
instance_id = generate_id(instance, args.tags, args.region)
if counts_total[instance_id] != 1:
counts_incremental[instance_id] += 1
instance_id += '-' + str(counts_incremental[instance_id])
hostid = args.prefix + instance_id
hostid = hostid.replace(' ', '_') # get rid of spaces
if instance.id:
print('# id: ' + instance.id)
print('Host ' + hostid)
print(' HostName ' + ip_addr)
try:
if amis[instance.image_id] is not None:
print(' User ' + amis[instance.image_id])
except:
pass
if args.keydir:
keydir = args.keydir
else:
keydir = '~/.ssh/'
if args.ssh_key_name:
print(' IdentityFile ' + keydir + args.ssh_key_name + '.pem')
else:
key_name = AMI_IDS_TO_KEY.get(instance.image_id, instance.key_name)
print(' IdentityFile ' + keydir + key_name.replace(' ', '_') + '.pem')
if not args.no_identities_only:
# ensure ssh-agent keys don't flood when we know the right file to use
print(' IdentitiesOnly yes')
if not args.strict_hostkey_checking:
print(' StrictHostKeyChecking no')
print('')
if __name__ == '__main__':
main() | 0.402979 | 0.065425 |
import pandas as pd
import pytest
from tabelio.mock import mock_table_data
from tabelio.table import (FORMATS, BaseFormat, _find_format,
convert_table_file, read_table_format,
write_table_format)
KNOWN_EXT = 'csv'
UNKNOWN_EXT = 'unknown'
@pytest.fixture
def df():
return mock_table_data(rows=3, start_date='2018-01-01')
@pytest.fixture
def double_df(df):
ddf = pd.concat([df, df])
ddf = ddf.reset_index(drop=True)
return ddf
@pytest.fixture
def triple_df(df):
ddf = pd.concat([df, df, df])
ddf = ddf.reset_index(drop=True)
return ddf
@pytest.fixture
def csv_file(df, tmpdir_factory):
fn = str(tmpdir_factory.mktemp("data").join("temp.csv"))
df.to_csv(fn, index=False)
return fn
@pytest.mark.parametrize('method', ['read', 'write', 'append'])
def test_baseformat_is_abstract(method):
with pytest.raises(NotImplementedError):
getattr(BaseFormat, method)(df=None, filename=None)
def test_one_extension_known():
assert KNOWN_EXT in FORMATS
@pytest.mark.parametrize('format, fmt_class', FORMATS.items())
class TestFormat:
def test_format_is_valid(self, format, fmt_class):
assert isinstance(format, str)
assert issubclass(fmt_class, BaseFormat)
def test_append_non_file(self, format, fmt_class, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
with pytest.raises(FileNotFoundError):
fmt_class.append(df=df, filename=filename)
def test_write_read(self, format, fmt_class, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
fmt_class.write(df=df, filename=filename)
new_df = fmt_class.read(filename=filename)
pd.testing.assert_frame_equal(new_df, df)
def test_write_append_read(
self, format, fmt_class, df, double_df, tmpdir_factory
):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
fmt_class.write(df=df, filename=filename)
fmt_class.append(df=df, filename=filename)
new_df = fmt_class.read(filename=filename)
pd.testing.assert_frame_equal(new_df, double_df)
def test_write_append_x2_read(
self, format, fmt_class, df, triple_df, tmpdir_factory
):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
fmt_class.write(df=df, filename=filename)
fmt_class.append(df=df, filename=filename)
fmt_class.append(df=df, filename=filename)
new_df = fmt_class.read(filename=filename)
pd.testing.assert_frame_equal(new_df, triple_df)
class TestFindFormat:
@pytest.mark.parametrize(
'format, filename', [
(UNKNOWN_EXT, f'file.{UNKNOWN_EXT}'),
(UNKNOWN_EXT, f'file.{KNOWN_EXT}'), (None, f'file.{UNKNOWN_EXT}'),
(UNKNOWN_EXT, None), (None, None)
]
)
def test_unknown_format_raises(self, format, filename):
with pytest.raises(ValueError):
_find_format(format=format, filename=filename)
@pytest.mark.parametrize(
'format, filename, expected_format', [
(KNOWN_EXT, f'file.{UNKNOWN_EXT}', KNOWN_EXT),
(None, f'file.{KNOWN_EXT}', KNOWN_EXT),
]
)
def test_format_found_correctly(self, format, filename, expected_format):
found_format = _find_format(format=format, filename=filename)
assert found_format == expected_format
@pytest.mark.parametrize('format', FORMATS.keys())
class TestReadWrite:
def test_write_read(self, format, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
write_table_format(df=df, filename=filename)
new_df = read_table_format(filename=filename)
pd.testing.assert_frame_equal(new_df, df)
def test_write_append_read(self, format, df, double_df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
write_table_format(df=df, filename=filename)
write_table_format(df=df, filename=filename, append=True)
new_df = read_table_format(filename=filename)
pd.testing.assert_frame_equal(new_df, double_df)
def test_append_read(self, format, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
write_table_format(df=df, filename=filename, append=True)
new_df = read_table_format(filename=filename)
pd.testing.assert_frame_equal(new_df, df)
@pytest.mark.parametrize('to_format', FORMATS.keys())
def test_convert(df, csv_file, to_format):
from_format = 'csv'
from_file = csv_file
to_file = convert_table_file(
filename=csv_file, from_format=from_format, to_format=to_format
)
from_df = read_table_format(filename=from_file)
to_df = read_table_format(filename=to_file)
assert to_file.endswith(to_format)
pd.testing.assert_frame_equal(to_df, from_df) | tests/test_table.py | import pandas as pd
import pytest
from tabelio.mock import mock_table_data
from tabelio.table import (FORMATS, BaseFormat, _find_format,
convert_table_file, read_table_format,
write_table_format)
KNOWN_EXT = 'csv'
UNKNOWN_EXT = 'unknown'
@pytest.fixture
def df():
return mock_table_data(rows=3, start_date='2018-01-01')
@pytest.fixture
def double_df(df):
ddf = pd.concat([df, df])
ddf = ddf.reset_index(drop=True)
return ddf
@pytest.fixture
def triple_df(df):
ddf = pd.concat([df, df, df])
ddf = ddf.reset_index(drop=True)
return ddf
@pytest.fixture
def csv_file(df, tmpdir_factory):
fn = str(tmpdir_factory.mktemp("data").join("temp.csv"))
df.to_csv(fn, index=False)
return fn
@pytest.mark.parametrize('method', ['read', 'write', 'append'])
def test_baseformat_is_abstract(method):
with pytest.raises(NotImplementedError):
getattr(BaseFormat, method)(df=None, filename=None)
def test_one_extension_known():
assert KNOWN_EXT in FORMATS
@pytest.mark.parametrize('format, fmt_class', FORMATS.items())
class TestFormat:
def test_format_is_valid(self, format, fmt_class):
assert isinstance(format, str)
assert issubclass(fmt_class, BaseFormat)
def test_append_non_file(self, format, fmt_class, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
with pytest.raises(FileNotFoundError):
fmt_class.append(df=df, filename=filename)
def test_write_read(self, format, fmt_class, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
fmt_class.write(df=df, filename=filename)
new_df = fmt_class.read(filename=filename)
pd.testing.assert_frame_equal(new_df, df)
def test_write_append_read(
self, format, fmt_class, df, double_df, tmpdir_factory
):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
fmt_class.write(df=df, filename=filename)
fmt_class.append(df=df, filename=filename)
new_df = fmt_class.read(filename=filename)
pd.testing.assert_frame_equal(new_df, double_df)
def test_write_append_x2_read(
self, format, fmt_class, df, triple_df, tmpdir_factory
):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
fmt_class.write(df=df, filename=filename)
fmt_class.append(df=df, filename=filename)
fmt_class.append(df=df, filename=filename)
new_df = fmt_class.read(filename=filename)
pd.testing.assert_frame_equal(new_df, triple_df)
class TestFindFormat:
@pytest.mark.parametrize(
'format, filename', [
(UNKNOWN_EXT, f'file.{UNKNOWN_EXT}'),
(UNKNOWN_EXT, f'file.{KNOWN_EXT}'), (None, f'file.{UNKNOWN_EXT}'),
(UNKNOWN_EXT, None), (None, None)
]
)
def test_unknown_format_raises(self, format, filename):
with pytest.raises(ValueError):
_find_format(format=format, filename=filename)
@pytest.mark.parametrize(
'format, filename, expected_format', [
(KNOWN_EXT, f'file.{UNKNOWN_EXT}', KNOWN_EXT),
(None, f'file.{KNOWN_EXT}', KNOWN_EXT),
]
)
def test_format_found_correctly(self, format, filename, expected_format):
found_format = _find_format(format=format, filename=filename)
assert found_format == expected_format
@pytest.mark.parametrize('format', FORMATS.keys())
class TestReadWrite:
def test_write_read(self, format, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
write_table_format(df=df, filename=filename)
new_df = read_table_format(filename=filename)
pd.testing.assert_frame_equal(new_df, df)
def test_write_append_read(self, format, df, double_df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
write_table_format(df=df, filename=filename)
write_table_format(df=df, filename=filename, append=True)
new_df = read_table_format(filename=filename)
pd.testing.assert_frame_equal(new_df, double_df)
def test_append_read(self, format, df, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("data").join(f'temp.{format}'))
write_table_format(df=df, filename=filename, append=True)
new_df = read_table_format(filename=filename)
pd.testing.assert_frame_equal(new_df, df)
@pytest.mark.parametrize('to_format', FORMATS.keys())
def test_convert(df, csv_file, to_format):
from_format = 'csv'
from_file = csv_file
to_file = convert_table_file(
filename=csv_file, from_format=from_format, to_format=to_format
)
from_df = read_table_format(filename=from_file)
to_df = read_table_format(filename=to_file)
assert to_file.endswith(to_format)
pd.testing.assert_frame_equal(to_df, from_df) | 0.392803 | 0.379005 |
import _thread
def init(port):
import zigbee;
zigbee.init(port);
def forward():
import zigbee;
zigbee.sendString("w#");
def stop():
import zigbee;
zigbee.sendString(" #");
def backward():
import zigbee;
zigbee.sendString("s#");
def left():
import zigbee;
zigbee.sendString("a#");
def right():
import zigbee;
zigbee.sendString("d#");
def buzzerOn():
import zigbee;
zigbee.sendString("h#");
def buzzerOff():
import zigbee;
zigbee.sendString("m#");
def lcdString(x):
import zigbee;
zigbee.sendString("lcd#");
zigbee.sendString(x);
zigbee.sendString("#");
def getString():
import zigbee;
return zigbee.getString();
def sendString(x):
import zigbee;
zigbee.sendString(str(x));
zigbee.sendString("#");
def setPort(portName,value):
import zigbee;
zigbee.sendString("setPort#");
zigbee.sendString(portName);
zigbee.sendString("#");
zigbee.sendString(str(value));
zigbee.sendString("#");
def getPin(portName):
import zigbee;
zigbee.sendString("getPin#");
zigbee.sendString(portName);
zigbee.sendString("#");
return int(zigbee.getString());
def strictForward():
import zigbee;
zigbee.sendString("strictForward#");
def strictBack():
import zigbee;
zigbee.sendString("strictBack#");
def moveOnArc(radius,dir):
import zigbee;
zigbee.sendString("moveOnArc#");
zigbee.sendString(str(radius));
zigbee.sendString("#");
zigbee.sendString(str(dir));
zigbee.sendString("#");
def rollLcd(data):
import zigbee;
zigbee.sendString("rollLCD#");
zigbee.sendString(data);
zigbee.sendString("#");
def getLeftWLS():
import zigbee;
zigbee.sendString("getLeftWLS#");
return int(zigbee.getString());
def getRightWS():
import zigbee;
zigbee.sendString("getRightWLS#");
return int(zigbee.getString());
def getCenterWLS():
import zigbee;
zigbee.sendString("getCenterWLS#");
return int(zigbee.getString());
def setVelocity(x,y):
import zigbee;
zigbee.sendString("setVelocity#");
zigbee.sendString(str(x));
zigbee.sendString("#");
zigbee.sendString(str(y));
zigbee.sendString("#");
def listenForInterrupt(interruptName):
import zigbee;
zigbee.sendString("listenForInterrupt#");
zigbee.sendString(interruptName);
zigbee.sendString("#");
return int(zigbee.getString());
def interruptHandler(interruptName,func,delay):
import time;
while(1==1):
y=listenForInterrupt(interruptName);
for i in range(0,y):
func();
time.sleep(delay);
def onInterrupt(interruptName,func,delay=.15):
import zigbee;
zigbee.sendString("resetInterrupt#");
zigbee.sendString(interruptName);
zigbee.sendString("#");
_thread.start_new_thread(interruptHandler,(interruptName,func,delay,));
def getIRSharp(num):
import zigbee;
zigbee.sendString("getIRSharp#");
zigbee.sendString(str(num));
zigbee.sendString("#");
return int(zigbee.getString());
def getIRProx(num):
import zigbee;
zigbee.sendString("getIRProx#");
zigbee.sendString(str(num));
zigbee.sendString("#");
return int(zigbee.getString()); | Codes/examples/functionList.py | import _thread
def init(port):
import zigbee;
zigbee.init(port);
def forward():
import zigbee;
zigbee.sendString("w#");
def stop():
import zigbee;
zigbee.sendString(" #");
def backward():
import zigbee;
zigbee.sendString("s#");
def left():
import zigbee;
zigbee.sendString("a#");
def right():
import zigbee;
zigbee.sendString("d#");
def buzzerOn():
import zigbee;
zigbee.sendString("h#");
def buzzerOff():
import zigbee;
zigbee.sendString("m#");
def lcdString(x):
import zigbee;
zigbee.sendString("lcd#");
zigbee.sendString(x);
zigbee.sendString("#");
def getString():
import zigbee;
return zigbee.getString();
def sendString(x):
import zigbee;
zigbee.sendString(str(x));
zigbee.sendString("#");
def setPort(portName,value):
import zigbee;
zigbee.sendString("setPort#");
zigbee.sendString(portName);
zigbee.sendString("#");
zigbee.sendString(str(value));
zigbee.sendString("#");
def getPin(portName):
import zigbee;
zigbee.sendString("getPin#");
zigbee.sendString(portName);
zigbee.sendString("#");
return int(zigbee.getString());
def strictForward():
import zigbee;
zigbee.sendString("strictForward#");
def strictBack():
import zigbee;
zigbee.sendString("strictBack#");
def moveOnArc(radius,dir):
import zigbee;
zigbee.sendString("moveOnArc#");
zigbee.sendString(str(radius));
zigbee.sendString("#");
zigbee.sendString(str(dir));
zigbee.sendString("#");
def rollLcd(data):
import zigbee;
zigbee.sendString("rollLCD#");
zigbee.sendString(data);
zigbee.sendString("#");
def getLeftWLS():
import zigbee;
zigbee.sendString("getLeftWLS#");
return int(zigbee.getString());
def getRightWS():
import zigbee;
zigbee.sendString("getRightWLS#");
return int(zigbee.getString());
def getCenterWLS():
import zigbee;
zigbee.sendString("getCenterWLS#");
return int(zigbee.getString());
def setVelocity(x,y):
import zigbee;
zigbee.sendString("setVelocity#");
zigbee.sendString(str(x));
zigbee.sendString("#");
zigbee.sendString(str(y));
zigbee.sendString("#");
def listenForInterrupt(interruptName):
import zigbee;
zigbee.sendString("listenForInterrupt#");
zigbee.sendString(interruptName);
zigbee.sendString("#");
return int(zigbee.getString());
def interruptHandler(interruptName,func,delay):
import time;
while(1==1):
y=listenForInterrupt(interruptName);
for i in range(0,y):
func();
time.sleep(delay);
def onInterrupt(interruptName,func,delay=.15):
import zigbee;
zigbee.sendString("resetInterrupt#");
zigbee.sendString(interruptName);
zigbee.sendString("#");
_thread.start_new_thread(interruptHandler,(interruptName,func,delay,));
def getIRSharp(num):
import zigbee;
zigbee.sendString("getIRSharp#");
zigbee.sendString(str(num));
zigbee.sendString("#");
return int(zigbee.getString());
def getIRProx(num):
import zigbee;
zigbee.sendString("getIRProx#");
zigbee.sendString(str(num));
zigbee.sendString("#");
return int(zigbee.getString()); | 0.173498 | 0.041696 |
import json
import shutil
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.plugins.callback import CallbackBase
import ansible.constants as C
from ansible import context
from optparse import Values
from ansible.utils.sentinel import Sentinel
class ResultCallback(CallbackBase):
def __init__(self, *args, **kwargs):
# super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
class AnsibleApi(object):
def __init__(self):
self.options = {'verbosity': 0, 'ask_pass': False, 'private_key_file': None, 'remote_user': None,
'connection': 'smart', 'timeout': 10, 'ssh_common_args': '', 'sftp_extra_args': '',
'scp_extra_args': '', 'ssh_extra_args': '', 'force_handlers': False, 'flush_cache': None,
'become': False, 'become_method': 'sudo', 'become_user': None, 'become_ask_pass': False,
'tags': ['all'], 'skip_tags': [], 'check': False, 'syntax': None, 'diff': False,
'inventory': '~/inventory',
'listhosts': None, 'subset': None, 'extra_vars': [], 'ask_vault_pass': False,
'vault_password_files': [], 'vault_ids': [], 'forks': 5, 'module_path': None, 'listtasks': None,
'listtags': None, 'step': None, 'start_at_task': None, 'args': ['fake']}
self.ops = Values(self.options)
self.loader = DataLoader()
self.passwords = dict()
self.results_callback = ResultCallback()
self.inventory = InventoryManager(loader=self.loader, sources=[self.options['inventory']])
self.variable_manager = VariableManager(loader=self.loader, inventory=self.inventory)
def runansible(self, host_list, task_list):
play_source = dict(
name="Ansible Play",
hosts=host_list,
gather_facts='no',
tasks=task_list
)
play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
tqm = None
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
# options=self.ops,
passwords=<PASSWORD>.passwords,
stdout_callback=self.results_callback,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
)
result = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
# shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
results_raw = {}
results_raw['success'] = {}
results_raw['failed'] = {}
results_raw['unreachable'] = {}
for host, result in self.results_callback.host_ok.items():
results_raw['success'][host] = json.dumps(result._result)
for host, result in self.results_callback.host_failed.items():
results_raw['failed'][host] = result._result['msg']
for host, result in self.results_callback.host_unreachable.items():
results_raw['unreachable'][host] = result._result['msg']
print(results_raw)
def playbookrun(self, playbook_path):
# self.variable_manager.extra_vars = {'customer': 'test', 'disabled': 'yes'}
context._init_global_context(self.ops)
playbook = PlaybookExecutor(playbooks=playbook_path,
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader, passwords=self.passwords)
print(self.inventory.hosts.get("192.168.11.21").vars)
result = playbook.run()
return result
if __name__ == "__main__":
a = AnsibleApi()
host_list = ['all']
tasks_list = [
dict(action=dict(module='command', args='ls')),
# dict(action=dict(module='shell', args='python cat.py')),
# dict(action=dict(module='synchronize', args='src=/home/test dest=/home/xx/ delete=yes')),
]
a.runansible(host_list, tasks_list)
file_dir = 'playbook.yml'
a.playbookrun(playbook_path=[file_dir]) | python/ansible/ansible_2.9_api.py |
import json
import shutil
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.plugins.callback import CallbackBase
import ansible.constants as C
from ansible import context
from optparse import Values
from ansible.utils.sentinel import Sentinel
class ResultCallback(CallbackBase):
def __init__(self, *args, **kwargs):
# super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
class AnsibleApi(object):
def __init__(self):
self.options = {'verbosity': 0, 'ask_pass': False, 'private_key_file': None, 'remote_user': None,
'connection': 'smart', 'timeout': 10, 'ssh_common_args': '', 'sftp_extra_args': '',
'scp_extra_args': '', 'ssh_extra_args': '', 'force_handlers': False, 'flush_cache': None,
'become': False, 'become_method': 'sudo', 'become_user': None, 'become_ask_pass': False,
'tags': ['all'], 'skip_tags': [], 'check': False, 'syntax': None, 'diff': False,
'inventory': '~/inventory',
'listhosts': None, 'subset': None, 'extra_vars': [], 'ask_vault_pass': False,
'vault_password_files': [], 'vault_ids': [], 'forks': 5, 'module_path': None, 'listtasks': None,
'listtags': None, 'step': None, 'start_at_task': None, 'args': ['fake']}
self.ops = Values(self.options)
self.loader = DataLoader()
self.passwords = dict()
self.results_callback = ResultCallback()
self.inventory = InventoryManager(loader=self.loader, sources=[self.options['inventory']])
self.variable_manager = VariableManager(loader=self.loader, inventory=self.inventory)
def runansible(self, host_list, task_list):
play_source = dict(
name="Ansible Play",
hosts=host_list,
gather_facts='no',
tasks=task_list
)
play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
tqm = None
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
# options=self.ops,
passwords=<PASSWORD>.passwords,
stdout_callback=self.results_callback,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
)
result = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
# shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
results_raw = {}
results_raw['success'] = {}
results_raw['failed'] = {}
results_raw['unreachable'] = {}
for host, result in self.results_callback.host_ok.items():
results_raw['success'][host] = json.dumps(result._result)
for host, result in self.results_callback.host_failed.items():
results_raw['failed'][host] = result._result['msg']
for host, result in self.results_callback.host_unreachable.items():
results_raw['unreachable'][host] = result._result['msg']
print(results_raw)
def playbookrun(self, playbook_path):
# self.variable_manager.extra_vars = {'customer': 'test', 'disabled': 'yes'}
context._init_global_context(self.ops)
playbook = PlaybookExecutor(playbooks=playbook_path,
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader, passwords=self.passwords)
print(self.inventory.hosts.get("192.168.11.21").vars)
result = playbook.run()
return result
if __name__ == "__main__":
a = AnsibleApi()
host_list = ['all']
tasks_list = [
dict(action=dict(module='command', args='ls')),
# dict(action=dict(module='shell', args='python cat.py')),
# dict(action=dict(module='synchronize', args='src=/home/test dest=/home/xx/ delete=yes')),
]
a.runansible(host_list, tasks_list)
file_dir = 'playbook.yml'
a.playbookrun(playbook_path=[file_dir]) | 0.403802 | 0.171442 |
import os
import csv
import shutil
from fama.utils.const import ENDS, STATUS_GOOD
from fama.utils.utils import autovivify, run_external_program, run_external_program_ignoreerror
from fama.gene_assembler.contig import Contig
from fama.gene_assembler.gene import Gene
from fama.gene_assembler.gene_assembly import GeneAssembly
from fama.diamond_parser.diamond_hit_list import DiamondHitList
from fama.diamond_parser.diamond_hit import DiamondHit
from fama.diamond_parser.hit_utils import compare_protein_hits_lca
from fama.output.json_util import export_gene_assembly
from fama.taxonomy.taxonomy_profile import TaxonomyProfile
from fama.output.krona_xml_writer import make_assembly_taxonomy_chart
from fama.output.report import generate_assembly_report
from fama.output.xlsx_util import make_assembly_xlsx
class GeneAssembler(object):
"""GeneAssembler is a working horse of Fama assembly pipeline. It exports
sequence reads, feeds external assembler with them, imports resulting
contigs, maps reads to contigs with Bowtie, finds genes with Prodigal,
assigns functions to the genes and sends gene assembly data to report generator
Attributes:
project (:obj:Project): Project instance storing sample data,
reference data and reads for assembly
assembler (str): external asembly program, valid values are 'metaspades'
(default) and 'megahit'
assembly (:obj:GeneAssembly): gene assembly with contigs and genes
is_paired_end (bool): True for paired-end project, False for others
assembly_dir (path): directory for assembly files
"""
def __init__(self, project, assembler='metaspades'):
"""Args:
project (:obj:Project): Project instance storing sample data,
reference data and reads for assembly
assembler (str): external asembly program, valid values are 'metaspades'
(default) and 'megahit'
"""
self.project = project
self.assembler = assembler
project.load_project()
self.assembly = GeneAssembly()
self.is_paired_end = None
self.assembly_dir = self.project.options.assembly_dir
if os.path.exists(self.assembly_dir):
raise FileExistsError('Assembly subdirectory already exists.' +
' Delete existing directory or change subdirectory name.')
if not os.path.isdir(self.assembly_dir):
os.mkdir(self.assembly_dir)
if not os.path.isdir(os.path.join(self.assembly_dir, 'out')):
os.mkdir(os.path.join(self.assembly_dir, 'out'))
def export_reads(self, do_coassembly=True):
"""Exports annotated reads in FASTQ format.
Args:
do_coassembly (bool): if True, all reads are exported into
Coassembly_1.fastq (and Coassembly_2.fastq for paired-end
reads) files. If False, for each function a separate file
(or pair of files) will be created.
"""
# Delete all existing FASTQ files
for filename in os.listdir(self.assembly_dir):
if filename.endswith('.fastq'):
os.remove(os.path.join(self.assembly_dir, filename))
# Load reads, export reads in FASTQ format, remove reads from memory
for sample_id in sorted(self.project.list_samples()):
self.is_paired_end = self.project.samples[sample_id].is_paired_end
self.project.import_reads_json(sample_id, ENDS)
for end in ENDS:
# print ('Loading mapped reads: ', sample, end)
# self.project.load_annotated_reads(sample, end) # Lazy load
for read_id in self.project.samples[sample_id].reads[end]:
read = self.project.samples[sample_id].reads[end][read_id]
if read.status != STATUS_GOOD:
continue
for function in read.functions:
if do_coassembly:
function = 'Coassembly'
if read_id in self.assembly.reads[function]:
continue
self.assembly.reads[function][read_id] = sample_id
fwd_outfile = os.path.join(self.assembly_dir, function + '_pe1.fastq')
rev_outfile = os.path.join(self.assembly_dir, function + '_pe2.fastq')
if self.is_paired_end:
if end == 'pe2':
fwd_outfile = os.path.join(self.assembly_dir,
function + '_pe2.fastq')
rev_outfile = os.path.join(self.assembly_dir,
function + '_pe1.fastq')
with open(rev_outfile, 'a') as rev_of:
rev_of.write(read.pe_id + '\n')
rev_of.write(read.pe_sequence + '\n')
rev_of.write(read.pe_line3 + '\n')
rev_of.write(read.pe_quality + '\n')
with open(fwd_outfile, 'a') as fwd_of:
fwd_of.write(read.read_id_line + '\n')
fwd_of.write(read.sequence + '\n')
fwd_of.write(read.line3 + '\n')
fwd_of.write(read.quality + '\n')
# Delete reads from memory
self.project.samples[sample_id].reads[end] = None
def assemble_contigs(self):
"""Assembles contigs from annotated reads, a separate assembly for
each of functions, runs read mapping, calculates read coverage
"""
# Run Assembler ('megahit' for Megahit or 'metaSPAdes' for metaSPAdes)
if self.assembler == 'megahit':
run_assembler(sorted(self.assembly.reads.keys()),
self.project.config.megahit_path,
self.assembly_dir)
elif self.assembler == 'metaspades':
run_assembler(sorted(self.assembly.reads.keys()),
self.project.config.metaspades_path,
self.assembly_dir,
is_paired_end=self.is_paired_end)
else:
raise ValueError('Unknown assembler: ' + self.assembler)
# Filter contigs by size
self.filter_contigs_by_length()
# Run Bowtie
run_mapper_indexing(sorted(self.assembly.reads.keys()),
self.assembly_dir,
self.project.config.bowtie_indexer_path)
run_mapper(sorted(self.assembly.reads.keys()),
self.assembly_dir,
self.project.config.bowtie_path,
is_paired_end=self.is_paired_end)
self.import_contigs()
self.import_read_mappings()
def import_contigs(self):
"""Imports assembled contigs from filtered FASTA file"""
for function in sorted(self.assembly.reads.keys()):
contig_file = os.path.join(self.assembly_dir,
function,
'final.contigs.filtered.fa')
if os.path.exists(contig_file):
with open(contig_file, 'r') as infile:
current_id = None
sequence = ''
for line in infile:
line = line.rstrip('\n\r')
if line.startswith('>'):
if current_id is not None:
contig = Contig(contig_id=current_id, sequence=sequence)
self.assembly.contigs[function][current_id] = contig
if self.assembler == 'megahit':
line_tokens = line[1:].split(' ')
current_id = line_tokens[0]
elif self.assembler == 'metaspades':
current_id = line[1:]
else:
raise ValueError('Unknown assembler: ' + self.assembler)
sequence = ''
else:
sequence += line
if current_id is not None:
contig = Contig(contig_id=current_id, sequence=sequence)
self.assembly.contigs[function][current_id] = contig
else:
print('File ' + contig_file + ' does not exist.')
def import_read_mappings(self):
"""Imports read mapping data from SAM file(s)"""
for function in sorted(self.assembly.reads.keys()):
sam_file = os.path.join(self.assembly_dir, function, 'contigs.sam')
if os.path.exists(sam_file):
with open(sam_file, 'r') as infile:
for line in infile:
if line.startswith('@'):
continue
line_tokens = line.split('\t')
if len(line_tokens) > 9:
read_id = line_tokens[0]
contig_id = line_tokens[2]
alignment_length = len(line_tokens[9])
if contig_id in self.assembly.contigs[function]:
self.assembly.contigs[function][contig_id].update_coverage(
self.assembly.reads[function][read_id],
alignment_length
)
self.assembly.contigs[function][contig_id].reads.append(read_id)
else:
print('File ' + sam_file + ' does not exist.')
def filter_contigs_by_length(self):
"""Filters list of contigs by length
TODO:
make contig_length_threshold a parameter in ProgramConfig or constant
"""
contig_length_threshold = 300
for function in self.assembly.reads.keys():
contig_file = os.path.join(self.assembly_dir, function, 'final.contigs.fa')
if not os.path.exists(contig_file):
continue
outfile = os.path.join(self.assembly_dir, function, 'final.contigs.filtered.fa')
with open(outfile, 'w') as outfile:
with open(contig_file, 'r') as infile:
current_id = None
sequence = []
for line in infile:
line = line.rstrip('\n\r')
if line.startswith('>'):
contig_sequence = ''.join(sequence)
if current_id and len(contig_sequence) >= contig_length_threshold:
outfile.write('\n'.join([current_id, contig_sequence, '']))
line_tokens = line.split(' ')
current_id = line_tokens[0]
sequence = []
else:
sequence.append(line)
contig_sequence = ''.join(sequence)
if len(contig_sequence) >= contig_length_threshold:
outfile.write('\n'.join([current_id, contig_sequence, '']))
def parse_reference_output(self):
"""Reads and processes DIAMOND tabular output of the preselection
DIAMOND search.
Note: this function finds query sequences similar to reference
proteins. Since a query sequence may have more than one areas of
similarity (for instance, in fusion proteins of two subunits or
in multi-domain proteins), it will try to find as many such areas
as possible.
DIAMOND hits are filtered by two parameters: length of alignment
and amino acid identity %, which are defined in program config ini.
"""
tsvfile = os.path.join(self.assembly_dir,
'all_contigs_' + self.project.options.ref_output_name)
current_id = ''
hit_list = DiamondHitList(current_id)
identity_cutoff = self.project.config.get_identity_cutoff(
self.project.options.get_collection())
length_cutoff = self.project.config.get_length_cutoff(
self.project.options.get_collection())
print('Parse reference output: Identity cutoff: ',
identity_cutoff,
', Length cutoff: ',
length_cutoff)
with open(tsvfile, 'r', newline='') as infile:
tsvin = csv.reader(infile, delimiter='\t')
for row in tsvin:
hit = DiamondHit()
hit.create_hit(row)
# filtering by identity and length
if hit.identity < identity_cutoff:
continue # skip this line
if hit.length < length_cutoff:
continue # skip this line
if hit.query_id != current_id:
# filter list for overlapping hits
hit_list.filter_list(self.project.config.get_overlap_cutoff(
self.project.options.get_collection()))
if hit_list.hits_number != 0:
# annotate_hits
hit_list.annotate_hits(self.project.ref_data)
function_id, contig_id, _ = parse_gene_id(current_id)
self.assembly.contigs[function_id][contig_id].\
genes[current_id].hit_list = hit_list
current_id = hit.query_id
hit_list = DiamondHitList(current_id)
hit_list.add_hit(hit)
hit_list.filter_list(
self.project.config.get_overlap_cutoff(self.project.options.get_collection()))
if hit_list.hits_number != 0:
# annotate_hits
hit_list.annotate_hits(self.project.ref_data)
function_id, contig_id, _ = parse_gene_id(current_id)
self.assembly.contigs[function_id][contig_id].genes[current_id].hit_list = \
hit_list
def export_hit_fasta(self):
"""Exports hit sequences as gzipped FASTA file"""
outfile = os.path.join(
self.assembly_dir, 'all_contigs_' + self.project.options.ref_hits_fastq_name
)
with open(outfile, 'w') as outfile:
for function in sorted(self.assembly.contigs.keys()):
for contig_id in sorted(self.assembly.contigs[function].keys()):
for gene_id in self.assembly.contigs[function][contig_id].genes.keys():
gene = self.assembly.contigs[function][contig_id].genes[gene_id]
if not gene.hit_list:
continue
for hit in gene.hit_list.hits:
start = hit.q_start
end = hit.q_end
outfile.write('>' + '|'.join([gene_id, str(start), str(end)]) + '\n')
start = start - 1
try:
outfile.write(gene.protein_sequence[start:end] + '\n')
except TypeError:
print('TypeError occurred while exporting ', gene.gene_id)
def parse_background_output(self):
"""Reads and processes DIAMOND tabular output of the classification
DIAMOND search.
Note: this function takes existing list of hits and compares each
of them with results of new similarity serach (against classification DB).
For the comparison, it calls compare_hits_lca function.
"""
tsvfile = os.path.join(self.assembly_dir,
'all_contigs_' + self.project.options.background_output_name)
current_query_id = None
hit_list = None
length_cutoff = self.project.config.get_length_cutoff(
self.project.options.get_collection())
biscore_range_cutoff = self.project.config.get_biscore_range_cutoff(
self.project.options.get_collection())
print('Relative bit-score cutoff: ', biscore_range_cutoff,
', Length cutoff: ', length_cutoff)
average_coverage = self.assembly.calculate_average_coverage()
with open(tsvfile, 'r', newline='') as infile:
tsvin = csv.reader(infile, delimiter='\t')
function_id = ''
contig_id = ''
gene_id = ''
coverage = ''
for row in tsvin:
if current_query_id is None:
current_query_id = row[0]
hit_list = DiamondHitList(current_query_id)
hit = DiamondHit()
hit.create_hit(row)
# filtering by identity and length
if hit.length < length_cutoff:
continue # skip this hit
if hit.query_id != current_query_id:
hit_list.annotate_hits(self.project.ref_data)
hit_list.filter_list_by_identity(self.project.ref_data)
# compare list of hits from search in background DB with existing
# hit from search in reference DB
current_query_id_tokens = current_query_id.split('|')
function_id = current_query_id_tokens[0]
contig_id = '_'.join(current_query_id_tokens[1].split('_')[:-1])
gene_id = '|'.join(current_query_id_tokens[:-2])
coverage = self.assembly.contigs[function_id][contig_id].get_coverage()
try:
compare_protein_hits_lca(
self.assembly.contigs[function_id][contig_id].genes[gene_id],
int(current_query_id_tokens[-2]), # hit_start
int(current_query_id_tokens[-1]), # hit_end
hit_list,
biscore_range_cutoff,
coverage,
average_coverage,
self.project.taxonomy_data,
self.project.ref_data
)
except KeyError:
print(' '.join(['Gene not found:', gene_id, 'in', function_id, contig_id]))
current_query_id = hit.query_id
hit_list = DiamondHitList(current_query_id)
hit_list.add_hit(hit)
hit_list.annotate_hits(self.project.ref_data)
hit_list.filter_list_by_identity(self.project.ref_data)
current_query_id_tokens = current_query_id.split('|')
function_id = current_query_id_tokens[0]
contig_id = '_'.join(current_query_id_tokens[1].split('_')[:-1])
gene_id = '|'.join(current_query_id_tokens[:-2])
coverage = self.assembly.contigs[function_id][contig_id].get_coverage()
try:
compare_protein_hits_lca(
self.assembly.contigs[function_id][contig_id].genes[gene_id],
int(current_query_id_tokens[-2]), # hit_start
int(current_query_id_tokens[-1]), # hit_end
hit_list,
biscore_range_cutoff,
coverage,
average_coverage,
self.project.taxonomy_data,
self.project.ref_data
)
except KeyError:
print(' '.join(['Gene not found:', gene_id, 'in', function_id, contig_id]))
def predict_genes(self):
"""Filters contigs by coverage, runs Prodigal on remaining contigs,
Todo:
make contig_coverage_cutoff a parameter or a constant
"""
# Filter contigs by coverage
contig_coverage_cutoff = 3.0
prodigal_infile = os.path.join(self.assembly_dir, 'all_contigs.fa')
with open(prodigal_infile, 'w') as outfile:
for function in sorted(self.assembly.contigs.keys()):
for contig in sorted(self.assembly.contigs[function].keys()):
if self.assembly.contigs[function][
contig
].get_coverage() >= contig_coverage_cutoff:
outfile.write('>' + function + '|' + contig + '\n')
outfile.write(self.assembly.contigs[function][contig].sequence + '\n')
# Run Prodigal
prodigal_outfile = os.path.join(self.assembly_dir, 'all_contigs.prodigal.out.faa')
run_prodigal(prodigal_infile, prodigal_outfile, self.project.config.prodigal_path)
with open(prodigal_outfile, 'r') as infile:
current_id = None
sequence = ''
for line in infile:
line = line.rstrip('\n\r')
if line.startswith('>'):
if current_id:
line_tokens = current_id.split(' # ')
function_id, contig_id, _ = parse_gene_id(line_tokens[0])
gene = Gene(contig_id=contig_id,
gene_id=line_tokens[0],
sequence=sequence,
start=line_tokens[1],
end=line_tokens[2],
strand=line_tokens[3])
self.assembly.contigs[function_id][contig_id].add_gene(gene)
line_tokens = line.split(' ')
current_id = line[1:] # line_tokens[0][1:]
sequence = ''
else:
sequence += line
line_tokens = current_id.split(' # ')
function_id, contig_id, _ = parse_gene_id(line_tokens[0])
gene = Gene(contig_id=contig_id,
gene_id=line_tokens[0],
sequence=sequence,
start=line_tokens[1],
end=line_tokens[2],
strand=line_tokens[3])
self.assembly.contigs[function_id][contig_id].add_gene(gene)
def annotate_genes(self):
"""Runs pre-selection DIAMOND search, runs classification DIAMOND search,
exports assembly in JSON format
Todo:
make contig_coverage_cutoff a parameter or a constant
"""
# Search in reference database
run_ref_search(self.project)
# Process output of reference DB search
self.parse_reference_output()
export_gene_assembly(
self.assembly, os.path.join(self.assembly_dir, 'all_contigs_assembly.json'))
# Import sequence data for selected sequence reads
print('Reading FASTQ file')
self.export_hit_fasta()
# Search in background database
run_bgr_search(self.project)
# Process output of reference DB search
self.parse_background_output()
print('Exporting JSON')
export_gene_assembly(self.assembly,
os.path.join(self.assembly_dir,
'all_contigs_assembly.json'))
def generate_taxonomy_chart(self, taxonomy_data):
'''
Collects data about functional genes in assembly and
creates one Krona chart for all functions
Args:
taxonomy_data (:obj:TaxonomyData): NCBI taxonomy data
'''
functions_list = set()
genes = autovivify(2) # genes[gene][function][parameter] = parameter_value
scores = autovivify(2) # scores[taxonomy ID][function][parameter] = parameter_value
total_read_count = 0
for sample in self.project.list_samples():
total_read_count += self.project.options.get_fastq1_readcount(sample)
for function in self.assembly.contigs:
functions_list.add(function)
for _, contig in self.assembly.contigs[function].items():
for gene_id, gene in contig.genes.items():
if gene.status != STATUS_GOOD:
continue
taxonomy_id = gene.taxonomy # Was get_taxonomy_id()
for hit in gene.hit_list.hits:
identity = hit.identity
for hit_function in hit.functions:
functions_list.add(hit_function)
if 'rpkm' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['rpkm'] += \
contig.get_rpkm(total_read_count) * \
len(gene.protein_sequence) * 3 / len(contig.sequence)
else:
scores[taxonomy_id][hit_function]['rpkm'] = \
contig.get_rpkm(total_read_count) * \
len(gene.protein_sequence) * 3 / len(contig.sequence)
if 'count' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['count'] += \
contig.get_read_count() * len(gene.protein_sequence) * 3 / \
len(contig.sequence)
else:
scores[taxonomy_id][hit_function]['count'] = \
contig.get_read_count() * len(gene.protein_sequence) * 3 / \
len(contig.sequence)
if 'hit_count' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['hit_count'] += 1
else:
scores[taxonomy_id][hit_function]['hit_count'] = 1
if 'identity' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['identity'] += \
identity
else:
scores[taxonomy_id][hit_function]['identity'] = \
identity
if 'genes' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['genes'] += gene_id + ' '
else:
scores[taxonomy_id][hit_function]['genes'] = gene_id + ' '
genes[gene_id][hit_function]['Length'] = \
str(len(gene.protein_sequence)) + 'aa'
genes[gene_id][hit_function]['Completeness'] = '{0:.0f}'.format(
len(gene.protein_sequence) * 100 / hit.s_len
)
genes[gene_id][hit_function]['identity'] = '{0:.1f}'.format(
identity
)
genes[gene_id][hit_function]['rpkm'] = '{0:.6f}'.format(
contig.get_rpkm(
total_read_count
) * len(gene.protein_sequence) * 3 / len(contig.sequence)
)
genes[gene_id][hit_function]['count'] = '{0:.0f}'.format(
contig.get_read_count() * len(
gene.protein_sequence
) * 3 / len(contig.sequence)
)
genes[gene_id][hit_function]['coverage'] = '{0:.1f}'.format(
contig.get_coverage()
)
taxonomic_profile = TaxonomyProfile()
taxonomic_profile.make_assembly_taxonomy_profile(taxonomy_data, scores)
outfile = os.path.join(self.assembly_dir, 'assembly_taxonomic_profile.xml')
make_assembly_taxonomy_chart(
taxonomic_profile, genes, sorted(functions_list), outfile,
self.project.config.krona_path, metric='rpkm'
)
def generate_function_taxonomy_charts(self, taxonomy_data):
'''
Generates series of Krona charts visualizing functions in assembly:
one function per file, separate stats for each sample
Args:
taxonomy_data (:obj:TaxonomyData): NCBI taxonomy data
'''
functions_list = set()
samples_list = sorted(self.project.list_samples())
total_read_count = 0
for sample in self.project.list_samples():
total_read_count += self.project.options.get_fastq1_readcount(sample)
# Make list of functions
for function in self.assembly.contigs:
for contig in self.assembly.contigs[function]:
for gene_id, gene in self.assembly.contigs[function][contig].genes.items():
if gene.status == STATUS_GOOD:
for gene_function in gene.functions:
functions_list.add(gene_function)
for function in sorted(functions_list):
genes = autovivify(2) # genes[gene][sample][parameter] = parameter_value
scores = autovivify(2) # scores[taxonomy ID][sample][parameter] = parameter_value
outfile = os.path.join(self.assembly_dir, 'out', function + '_taxonomic_profile.xml')
for assembly_function in self.assembly.contigs:
for _, contig in self.assembly.contigs[assembly_function].items():
for gene_id, gene in contig.genes.items():
function_counted = False
if gene.status != STATUS_GOOD or function not in gene.functions:
continue
taxonomy_id = gene.taxonomy
if taxonomy_id not in scores:
for sample_id in samples_list:
scores[taxonomy_id][sample_id]['rpkm'] = 0.0
scores[taxonomy_id][sample_id]['count'] = 0
scores[taxonomy_id][sample_id]['hit_count'] = 0
scores[taxonomy_id][sample_id]['identity'] = 0.0
scores[taxonomy_id][sample_id]['genes'] = ''
scores[taxonomy_id]['All samples']['rpkm'] = 0.0
scores[taxonomy_id]['All samples']['count'] = 0
scores[taxonomy_id]['All samples']['hit_count'] = 0
scores[taxonomy_id]['All samples']['identity'] = 0.0
scores[taxonomy_id]['All samples']['genes'] = ''
for hit in gene.hit_list.hits:
identity = hit.identity
if function in hit.functions:
if function_counted:
continue
for sample in samples_list:
if sample in contig.read_count:
scores[taxonomy_id][sample]['rpkm'] += contig.get_rpkm(
self.project.options.get_fastq1_readcount(sample),
sample
) * len(gene.protein_sequence) * 3 / len(
contig.sequence
)
scores[taxonomy_id][sample]['count'] += \
contig.get_read_count(sample) * \
len(gene.protein_sequence) * 3 / \
len(contig.sequence)
scores[taxonomy_id][sample]['hit_count'] += 1
scores[taxonomy_id][sample]['identity'] += identity
scores[taxonomy_id][sample]['genes'] += gene_id + ' '
genes[gene_id][sample]['Length'] = \
str(len(gene.protein_sequence)) + 'aa'
genes[gene_id][sample]['Completeness'] = '{0:.0f}'.format(
len(gene.protein_sequence) * 100 / hit.s_len
)
genes[gene_id][sample]['identity'] = '{0:.1f}'.format(
identity
)
genes[gene_id][sample]['rpkm'] = '{0:.7f}'.format(
contig.get_rpkm(
self.project.options.get_fastq1_readcount(
sample
),
sample
) * len(gene.protein_sequence) * 3 / len(
contig.sequence
)
)
genes[gene_id][sample]['count'] = 3 * '{0:.0f}'.format(
contig.get_read_count(sample) * len(
gene.protein_sequence
) / len(contig.sequence)
)
genes[gene_id][sample]['coverage'] = '{0:.1f}'.format(
contig.get_coverage(sample)
)
scores[taxonomy_id]['All samples']['rpkm'] += \
contig.get_rpkm(total_read_count) * \
len(gene.protein_sequence) \
* 3 / len(contig.sequence)
scores[taxonomy_id]['All samples']['count'] += \
contig.get_read_count() * len(gene.protein_sequence) \
* 3 / len(contig.sequence)
scores[taxonomy_id]['All samples']['hit_count'] += 1
scores[taxonomy_id]['All samples']['identity'] += identity
scores[taxonomy_id]['All samples']['genes'] += gene_id + ' '
genes[gene_id]['All samples']['Length'] = \
str(len(gene.protein_sequence)) + 'aa'
genes[gene_id]['All samples']['Completeness'] = '{0:.0f}'.format(
len(gene.protein_sequence) * 100 / hit.s_len
)
genes[gene_id]['All samples']['identity'] = \
'{0:.1f}'.format(identity)
genes[gene_id]['All samples']['rpkm'] = '{0:.7f}'.format(
contig.get_rpkm(total_read_count) * len(
gene.protein_sequence
) * 3 / len(contig.sequence)
)
genes[gene_id]['All samples']['count'] = '{0:.0f}'.format(
contig.get_read_count() * len(
gene.protein_sequence
) * 3 / len(contig.sequence)
)
genes[gene_id]['All samples']['coverage'] = '{0:.1f}'.format(
contig.get_coverage()
)
function_counted = True
taxonomic_profile = TaxonomyProfile()
taxonomic_profile.make_assembly_taxonomy_profile(taxonomy_data, scores)
output_sample_ids = sorted(self.project.list_samples())
output_sample_ids.append('All samples')
make_assembly_taxonomy_chart(
taxonomic_profile, genes, output_sample_ids, outfile,
self.project.config.krona_path, metric='rpkm'
)
def write_sequences(self):
"""Exports gene and protein sequences in FASTA format"""
genes = autovivify(2) # genes[function][gene][parameter] = parameter_value
for function in self.assembly.contigs:
for contig in self.assembly.contigs[function]:
for gene_id, gene in self.assembly.contigs[function][contig].genes.items():
if gene.status == STATUS_GOOD:
for hit in gene.hit_list.data:
taxonomy_id = gene.taxonomy
for hit_function in hit.functions:
start = gene.start
end = gene.end
strand = gene.strand
genes[hit_function][gene_id]['start'] = start
genes[hit_function][gene_id]['end'] = end
genes[hit_function][gene_id]['strand'] = strand
genes[hit_function][gene_id]['taxonomy'] = taxonomy_id
gene_sequence = self.assembly.contigs[function][contig].\
sequence[int(start) - 1: int(end)]
if strand == '-1':
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
gene_sequence = ''.join(
[complement[nucl] for nucl in reversed(gene_sequence)]
)
genes[hit_function][gene_id]['sequence'] = gene_sequence
genes[hit_function][gene_id]['protein'] = gene.protein_sequence
genes[hit_function][gene_id]['aai'] = hit.identity
genes[hit_function][gene_id]['completeness'] = \
len(gene.protein_sequence) * 100 / hit.s_len
for function in genes:
outfile = os.path.join(self.project.options.assembly_dir,
'out',
function + '_genes_Fama.fna')
with open(outfile, 'w') as outfile:
for gene_id in genes[function]:
lineage = self.project.taxonomy_data.get_taxonomy_lineage(
genes[function][gene_id]['taxonomy'])
outfile.write('>' + gene_id + '|' +
genes[function][gene_id]['start'] + '|' +
genes[function][gene_id]['end'] + '|' +
genes[function][gene_id]['strand'] + '|' +
lineage + '\n') # '|'
outfile.write(genes[function][gene_id]['sequence'] + '\n')
outfile = os.path.join(self.project.options.assembly_dir,
'out',
function + '_proteins_Fama.faa')
with open(outfile, 'w') as outfile:
for gene_id in genes[function]:
lineage = self.project.taxonomy_data.get_taxonomy_lineage(
genes[function][gene_id]['taxonomy'])
outfile.write('>' + gene_id + '|' +
genes[function][gene_id]['start'] + '|' +
genes[function][gene_id]['end'] + '|' +
genes[function][gene_id]['strand'] + '|' +
lineage + '\n') # '|'
outfile.write(genes[function][gene_id]['protein'] + '\n')
def generate_output(self):
"""Sends assembly data to Excel report generator, exports genes and
proteins, calls methods for taxonomy chart generation
"""
self.write_sequences()
make_assembly_xlsx(self)
self.generate_taxonomy_chart(self.project.taxonomy_data)
self.generate_function_taxonomy_charts(self.project.taxonomy_data)
generate_assembly_report(self)
def run_assembler(functions, assembler, output_dir, is_paired_end=True):
"""Fires up external assembler, either metaSPAdes or MEGAHIT"""
if assembler.endswith('megahit'):
run_megahit(functions, output_dir, assembler, is_paired_end)
elif assembler.endswith('metaspades.py'):
if is_paired_end:
run_spades(functions, output_dir, assembler, is_paired_end)
else:
raise RuntimeError(
'Current version of metaSPAdes does not support single-end libraries.'
)
def run_megahit(functions, output_dir, assembler_command, is_paired_end=True):
"""Runs MEGAHIT assembler on exported reads"""
print('Starting assembly')
for function in functions:
print('Run assembler for function', function)
if is_paired_end:
assembler_args = [assembler_command,
'-1',
os.path.join(output_dir, function + '_pe1.fastq'),
'-2',
os.path.join(output_dir, function + '_pe2.fastq'),
'-o',
os.path.join(output_dir, function)]
else:
assembler_args = [assembler_command,
'-r',
os.path.join(output_dir, function + '_pe1.fastq'),
'-o',
os.path.join(output_dir, function)]
run_external_program(assembler_args)
print('Assembler finished for function ', function)
print('Assembly finished')
def run_spades(functions, output_dir, assembler_command, is_paired_end=True):
"""Runs metaSPAdes assembler on exported reads"""
print('Starting metaSPAdes')
tmp_dir = os.path.join(output_dir, 'tmp')
for function in functions:
print('Run metaSPAdes for function', function)
assembler_args = [assembler_command,
'--meta',
'-t',
'12',
'-m',
'50', # TODO: make a parameter
'-k',
'33,55,71,91,111', # TODO: adjust automatically
'-o',
os.path.join(output_dir, function),
'--tmp-dir',
tmp_dir]
if is_paired_end:
assembler_args.extend(['-1',
os.path.join(output_dir, function + '_pe1.fastq'),
'-2',
os.path.join(output_dir, function + '_pe2.fastq')])
else:
assembler_args.extend(['-s',
os.path.join(output_dir, function + '_pe1.fastq')])
run_external_program_ignoreerror(assembler_args)
if os.path.exists(os.path.join(output_dir, function, 'contigs.fasta')):
shutil.copyfile(os.path.join(output_dir, function, 'contigs.fasta'),
os.path.join(output_dir, function, 'final.contigs.fa'))
print('Assembler finished for function ', function)
print('metaSPAdes finished')
def run_mapper_indexing(functions, output_dir, mapper_command):
"""Runs Bowtie2 indexer on filtered contigs"""
mapper_command = 'bowtie2-build'
for function in functions:
if not os.path.exists(os.path.join(output_dir, function, 'final.contigs.filtered.fa')):
print('Contigs file for function', function, 'not found')
continue
print('Run indexing for function', function)
if os.path.getsize(os.path.join(output_dir, function, 'final.contigs.filtered.fa')) > 0:
if not os.path.exists(os.path.join(output_dir, function, 'index')):
os.mkdir(os.path.join(output_dir, function, 'index'))
mapper_args = [mapper_command,
'-f',
os.path.join(output_dir, function, 'final.contigs.filtered.fa'),
os.path.join(output_dir, function, 'index', 'index')]
run_external_program(mapper_args)
def run_mapper(functions, output_dir, mapper_command, is_paired_end=True):
"""Runs Bowtie2 mapper on filtered contigs"""
mapper_command = 'bowtie2'
for function in functions:
if not os.path.exists(os.path.join(output_dir, function, 'final.contigs.filtered.fa')):
continue
if os.path.getsize(os.path.join(output_dir, function, 'final.contigs.filtered.fa')) > 0:
print('Run read mapping for function', function)
if is_paired_end:
mapper_args = [mapper_command,
'-q',
'--very-sensitive',
'--quiet',
'-x',
os.path.join(output_dir, function, 'index', 'index'),
'-1',
os.path.join(output_dir, function + '_pe1.fastq'),
'-2',
os.path.join(output_dir, function + '_pe2.fastq'),
'>' + os.path.join(output_dir, function, 'contigs.sam')]
else:
mapper_args = [mapper_command,
'-q',
'--very-sensitive',
'--quiet',
'-x',
os.path.join(output_dir, function, 'index', 'index'),
'-U',
os.path.join(output_dir, function + '_pe1.fastq'),
'>' + os.path.join(output_dir, function, 'contigs.sam')]
run_external_program(mapper_args)
def run_prodigal(infile, outfile, prodigal_path):
"""Runs Prodigal gene prediction on filtered contigs"""
print('Starting Prodigal')
prodigal_args = [prodigal_path,
'-p',
'meta',
'-a',
outfile,
'-i',
infile,
'-o',
outfile+'prodigal.txt']
run_external_program(prodigal_args)
print('Prodigal finished')
def run_ref_search(project):
"""Runs DIAMOND pre-selection search on predicted genes"""
print('Starting DIAMOND')
diamond_args = [project.config.diamond_path,
'blastp',
'--db',
project.config.get_reference_diamond_db(project.options.get_collection()),
'--query',
os.path.join(project.options.assembly_dir,
'all_contigs.prodigal.out.faa'),
'--out',
os.path.join(project.options.assembly_dir,
'all_contigs_' + project.options.ref_output_name),
'--max-target-seqs',
'50',
'--evalue',
str(project.config.get_evalue_cutoff(project.options.get_collection())),
'--threads',
project.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def run_bgr_search(project):
"""Runs DIAMOND classification search on predicted genes"""
print('Starting DIAMOND')
diamond_args = [project.config.diamond_path,
'blastp',
'--db',
project.config.get_background_diamond_db(project.options.get_collection()),
'--query',
os.path.join(
project.options.assembly_dir,
'all_contigs_' + project.options.ref_hits_fastq_name
),
'--out',
os.path.join(
project.options.assembly_dir,
'all_contigs_' + project.options.background_output_name
),
'--max-target-seqs',
'50',
'--evalue',
str(project.config.get_background_db_size(project.options.get_collection())
* project.config.get_evalue_cutoff(project.options.get_collection())
/ project.config.get_reference_db_size(project.options.get_collection())),
'--threads',
project.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def parse_gene_id(gene_id):
"""Extracts contig identifier and function identifier from gene identifier"""
(function_id, gene) = gene_id.split('|')
gene_id_tokens = gene.split('_')
gene_id = gene_id_tokens[-1]
contig_id = '_'.join(gene_id_tokens[:-1])
return function_id, contig_id, gene_id | lib/fama/gene_assembler/gene_assembler.py | import os
import csv
import shutil
from fama.utils.const import ENDS, STATUS_GOOD
from fama.utils.utils import autovivify, run_external_program, run_external_program_ignoreerror
from fama.gene_assembler.contig import Contig
from fama.gene_assembler.gene import Gene
from fama.gene_assembler.gene_assembly import GeneAssembly
from fama.diamond_parser.diamond_hit_list import DiamondHitList
from fama.diamond_parser.diamond_hit import DiamondHit
from fama.diamond_parser.hit_utils import compare_protein_hits_lca
from fama.output.json_util import export_gene_assembly
from fama.taxonomy.taxonomy_profile import TaxonomyProfile
from fama.output.krona_xml_writer import make_assembly_taxonomy_chart
from fama.output.report import generate_assembly_report
from fama.output.xlsx_util import make_assembly_xlsx
class GeneAssembler(object):
"""GeneAssembler is a working horse of Fama assembly pipeline. It exports
sequence reads, feeds external assembler with them, imports resulting
contigs, maps reads to contigs with Bowtie, finds genes with Prodigal,
assigns functions to the genes and sends gene assembly data to report generator
Attributes:
project (:obj:Project): Project instance storing sample data,
reference data and reads for assembly
assembler (str): external asembly program, valid values are 'metaspades'
(default) and 'megahit'
assembly (:obj:GeneAssembly): gene assembly with contigs and genes
is_paired_end (bool): True for paired-end project, False for others
assembly_dir (path): directory for assembly files
"""
def __init__(self, project, assembler='metaspades'):
"""Args:
project (:obj:Project): Project instance storing sample data,
reference data and reads for assembly
assembler (str): external asembly program, valid values are 'metaspades'
(default) and 'megahit'
"""
self.project = project
self.assembler = assembler
project.load_project()
self.assembly = GeneAssembly()
self.is_paired_end = None
self.assembly_dir = self.project.options.assembly_dir
if os.path.exists(self.assembly_dir):
raise FileExistsError('Assembly subdirectory already exists.' +
' Delete existing directory or change subdirectory name.')
if not os.path.isdir(self.assembly_dir):
os.mkdir(self.assembly_dir)
if not os.path.isdir(os.path.join(self.assembly_dir, 'out')):
os.mkdir(os.path.join(self.assembly_dir, 'out'))
def export_reads(self, do_coassembly=True):
"""Exports annotated reads in FASTQ format.
Args:
do_coassembly (bool): if True, all reads are exported into
Coassembly_1.fastq (and Coassembly_2.fastq for paired-end
reads) files. If False, for each function a separate file
(or pair of files) will be created.
"""
# Delete all existing FASTQ files
for filename in os.listdir(self.assembly_dir):
if filename.endswith('.fastq'):
os.remove(os.path.join(self.assembly_dir, filename))
# Load reads, export reads in FASTQ format, remove reads from memory
for sample_id in sorted(self.project.list_samples()):
self.is_paired_end = self.project.samples[sample_id].is_paired_end
self.project.import_reads_json(sample_id, ENDS)
for end in ENDS:
# print ('Loading mapped reads: ', sample, end)
# self.project.load_annotated_reads(sample, end) # Lazy load
for read_id in self.project.samples[sample_id].reads[end]:
read = self.project.samples[sample_id].reads[end][read_id]
if read.status != STATUS_GOOD:
continue
for function in read.functions:
if do_coassembly:
function = 'Coassembly'
if read_id in self.assembly.reads[function]:
continue
self.assembly.reads[function][read_id] = sample_id
fwd_outfile = os.path.join(self.assembly_dir, function + '_pe1.fastq')
rev_outfile = os.path.join(self.assembly_dir, function + '_pe2.fastq')
if self.is_paired_end:
if end == 'pe2':
fwd_outfile = os.path.join(self.assembly_dir,
function + '_pe2.fastq')
rev_outfile = os.path.join(self.assembly_dir,
function + '_pe1.fastq')
with open(rev_outfile, 'a') as rev_of:
rev_of.write(read.pe_id + '\n')
rev_of.write(read.pe_sequence + '\n')
rev_of.write(read.pe_line3 + '\n')
rev_of.write(read.pe_quality + '\n')
with open(fwd_outfile, 'a') as fwd_of:
fwd_of.write(read.read_id_line + '\n')
fwd_of.write(read.sequence + '\n')
fwd_of.write(read.line3 + '\n')
fwd_of.write(read.quality + '\n')
# Delete reads from memory
self.project.samples[sample_id].reads[end] = None
def assemble_contigs(self):
"""Assembles contigs from annotated reads, a separate assembly for
each of functions, runs read mapping, calculates read coverage
"""
# Run Assembler ('megahit' for Megahit or 'metaSPAdes' for metaSPAdes)
if self.assembler == 'megahit':
run_assembler(sorted(self.assembly.reads.keys()),
self.project.config.megahit_path,
self.assembly_dir)
elif self.assembler == 'metaspades':
run_assembler(sorted(self.assembly.reads.keys()),
self.project.config.metaspades_path,
self.assembly_dir,
is_paired_end=self.is_paired_end)
else:
raise ValueError('Unknown assembler: ' + self.assembler)
# Filter contigs by size
self.filter_contigs_by_length()
# Run Bowtie
run_mapper_indexing(sorted(self.assembly.reads.keys()),
self.assembly_dir,
self.project.config.bowtie_indexer_path)
run_mapper(sorted(self.assembly.reads.keys()),
self.assembly_dir,
self.project.config.bowtie_path,
is_paired_end=self.is_paired_end)
self.import_contigs()
self.import_read_mappings()
def import_contigs(self):
"""Imports assembled contigs from filtered FASTA file"""
for function in sorted(self.assembly.reads.keys()):
contig_file = os.path.join(self.assembly_dir,
function,
'final.contigs.filtered.fa')
if os.path.exists(contig_file):
with open(contig_file, 'r') as infile:
current_id = None
sequence = ''
for line in infile:
line = line.rstrip('\n\r')
if line.startswith('>'):
if current_id is not None:
contig = Contig(contig_id=current_id, sequence=sequence)
self.assembly.contigs[function][current_id] = contig
if self.assembler == 'megahit':
line_tokens = line[1:].split(' ')
current_id = line_tokens[0]
elif self.assembler == 'metaspades':
current_id = line[1:]
else:
raise ValueError('Unknown assembler: ' + self.assembler)
sequence = ''
else:
sequence += line
if current_id is not None:
contig = Contig(contig_id=current_id, sequence=sequence)
self.assembly.contigs[function][current_id] = contig
else:
print('File ' + contig_file + ' does not exist.')
def import_read_mappings(self):
"""Imports read mapping data from SAM file(s)"""
for function in sorted(self.assembly.reads.keys()):
sam_file = os.path.join(self.assembly_dir, function, 'contigs.sam')
if os.path.exists(sam_file):
with open(sam_file, 'r') as infile:
for line in infile:
if line.startswith('@'):
continue
line_tokens = line.split('\t')
if len(line_tokens) > 9:
read_id = line_tokens[0]
contig_id = line_tokens[2]
alignment_length = len(line_tokens[9])
if contig_id in self.assembly.contigs[function]:
self.assembly.contigs[function][contig_id].update_coverage(
self.assembly.reads[function][read_id],
alignment_length
)
self.assembly.contigs[function][contig_id].reads.append(read_id)
else:
print('File ' + sam_file + ' does not exist.')
def filter_contigs_by_length(self):
"""Filters list of contigs by length
TODO:
make contig_length_threshold a parameter in ProgramConfig or constant
"""
contig_length_threshold = 300
for function in self.assembly.reads.keys():
contig_file = os.path.join(self.assembly_dir, function, 'final.contigs.fa')
if not os.path.exists(contig_file):
continue
outfile = os.path.join(self.assembly_dir, function, 'final.contigs.filtered.fa')
with open(outfile, 'w') as outfile:
with open(contig_file, 'r') as infile:
current_id = None
sequence = []
for line in infile:
line = line.rstrip('\n\r')
if line.startswith('>'):
contig_sequence = ''.join(sequence)
if current_id and len(contig_sequence) >= contig_length_threshold:
outfile.write('\n'.join([current_id, contig_sequence, '']))
line_tokens = line.split(' ')
current_id = line_tokens[0]
sequence = []
else:
sequence.append(line)
contig_sequence = ''.join(sequence)
if len(contig_sequence) >= contig_length_threshold:
outfile.write('\n'.join([current_id, contig_sequence, '']))
def parse_reference_output(self):
"""Reads and processes DIAMOND tabular output of the preselection
DIAMOND search.
Note: this function finds query sequences similar to reference
proteins. Since a query sequence may have more than one areas of
similarity (for instance, in fusion proteins of two subunits or
in multi-domain proteins), it will try to find as many such areas
as possible.
DIAMOND hits are filtered by two parameters: length of alignment
and amino acid identity %, which are defined in program config ini.
"""
tsvfile = os.path.join(self.assembly_dir,
'all_contigs_' + self.project.options.ref_output_name)
current_id = ''
hit_list = DiamondHitList(current_id)
identity_cutoff = self.project.config.get_identity_cutoff(
self.project.options.get_collection())
length_cutoff = self.project.config.get_length_cutoff(
self.project.options.get_collection())
print('Parse reference output: Identity cutoff: ',
identity_cutoff,
', Length cutoff: ',
length_cutoff)
with open(tsvfile, 'r', newline='') as infile:
tsvin = csv.reader(infile, delimiter='\t')
for row in tsvin:
hit = DiamondHit()
hit.create_hit(row)
# filtering by identity and length
if hit.identity < identity_cutoff:
continue # skip this line
if hit.length < length_cutoff:
continue # skip this line
if hit.query_id != current_id:
# filter list for overlapping hits
hit_list.filter_list(self.project.config.get_overlap_cutoff(
self.project.options.get_collection()))
if hit_list.hits_number != 0:
# annotate_hits
hit_list.annotate_hits(self.project.ref_data)
function_id, contig_id, _ = parse_gene_id(current_id)
self.assembly.contigs[function_id][contig_id].\
genes[current_id].hit_list = hit_list
current_id = hit.query_id
hit_list = DiamondHitList(current_id)
hit_list.add_hit(hit)
hit_list.filter_list(
self.project.config.get_overlap_cutoff(self.project.options.get_collection()))
if hit_list.hits_number != 0:
# annotate_hits
hit_list.annotate_hits(self.project.ref_data)
function_id, contig_id, _ = parse_gene_id(current_id)
self.assembly.contigs[function_id][contig_id].genes[current_id].hit_list = \
hit_list
def export_hit_fasta(self):
"""Exports hit sequences as gzipped FASTA file"""
outfile = os.path.join(
self.assembly_dir, 'all_contigs_' + self.project.options.ref_hits_fastq_name
)
with open(outfile, 'w') as outfile:
for function in sorted(self.assembly.contigs.keys()):
for contig_id in sorted(self.assembly.contigs[function].keys()):
for gene_id in self.assembly.contigs[function][contig_id].genes.keys():
gene = self.assembly.contigs[function][contig_id].genes[gene_id]
if not gene.hit_list:
continue
for hit in gene.hit_list.hits:
start = hit.q_start
end = hit.q_end
outfile.write('>' + '|'.join([gene_id, str(start), str(end)]) + '\n')
start = start - 1
try:
outfile.write(gene.protein_sequence[start:end] + '\n')
except TypeError:
print('TypeError occurred while exporting ', gene.gene_id)
def parse_background_output(self):
"""Reads and processes DIAMOND tabular output of the classification
DIAMOND search.
Note: this function takes existing list of hits and compares each
of them with results of new similarity serach (against classification DB).
For the comparison, it calls compare_hits_lca function.
"""
tsvfile = os.path.join(self.assembly_dir,
'all_contigs_' + self.project.options.background_output_name)
current_query_id = None
hit_list = None
length_cutoff = self.project.config.get_length_cutoff(
self.project.options.get_collection())
biscore_range_cutoff = self.project.config.get_biscore_range_cutoff(
self.project.options.get_collection())
print('Relative bit-score cutoff: ', biscore_range_cutoff,
', Length cutoff: ', length_cutoff)
average_coverage = self.assembly.calculate_average_coverage()
with open(tsvfile, 'r', newline='') as infile:
tsvin = csv.reader(infile, delimiter='\t')
function_id = ''
contig_id = ''
gene_id = ''
coverage = ''
for row in tsvin:
if current_query_id is None:
current_query_id = row[0]
hit_list = DiamondHitList(current_query_id)
hit = DiamondHit()
hit.create_hit(row)
# filtering by identity and length
if hit.length < length_cutoff:
continue # skip this hit
if hit.query_id != current_query_id:
hit_list.annotate_hits(self.project.ref_data)
hit_list.filter_list_by_identity(self.project.ref_data)
# compare list of hits from search in background DB with existing
# hit from search in reference DB
current_query_id_tokens = current_query_id.split('|')
function_id = current_query_id_tokens[0]
contig_id = '_'.join(current_query_id_tokens[1].split('_')[:-1])
gene_id = '|'.join(current_query_id_tokens[:-2])
coverage = self.assembly.contigs[function_id][contig_id].get_coverage()
try:
compare_protein_hits_lca(
self.assembly.contigs[function_id][contig_id].genes[gene_id],
int(current_query_id_tokens[-2]), # hit_start
int(current_query_id_tokens[-1]), # hit_end
hit_list,
biscore_range_cutoff,
coverage,
average_coverage,
self.project.taxonomy_data,
self.project.ref_data
)
except KeyError:
print(' '.join(['Gene not found:', gene_id, 'in', function_id, contig_id]))
current_query_id = hit.query_id
hit_list = DiamondHitList(current_query_id)
hit_list.add_hit(hit)
hit_list.annotate_hits(self.project.ref_data)
hit_list.filter_list_by_identity(self.project.ref_data)
current_query_id_tokens = current_query_id.split('|')
function_id = current_query_id_tokens[0]
contig_id = '_'.join(current_query_id_tokens[1].split('_')[:-1])
gene_id = '|'.join(current_query_id_tokens[:-2])
coverage = self.assembly.contigs[function_id][contig_id].get_coverage()
try:
compare_protein_hits_lca(
self.assembly.contigs[function_id][contig_id].genes[gene_id],
int(current_query_id_tokens[-2]), # hit_start
int(current_query_id_tokens[-1]), # hit_end
hit_list,
biscore_range_cutoff,
coverage,
average_coverage,
self.project.taxonomy_data,
self.project.ref_data
)
except KeyError:
print(' '.join(['Gene not found:', gene_id, 'in', function_id, contig_id]))
def predict_genes(self):
"""Filters contigs by coverage, runs Prodigal on remaining contigs,
Todo:
make contig_coverage_cutoff a parameter or a constant
"""
# Filter contigs by coverage
contig_coverage_cutoff = 3.0
prodigal_infile = os.path.join(self.assembly_dir, 'all_contigs.fa')
with open(prodigal_infile, 'w') as outfile:
for function in sorted(self.assembly.contigs.keys()):
for contig in sorted(self.assembly.contigs[function].keys()):
if self.assembly.contigs[function][
contig
].get_coverage() >= contig_coverage_cutoff:
outfile.write('>' + function + '|' + contig + '\n')
outfile.write(self.assembly.contigs[function][contig].sequence + '\n')
# Run Prodigal
prodigal_outfile = os.path.join(self.assembly_dir, 'all_contigs.prodigal.out.faa')
run_prodigal(prodigal_infile, prodigal_outfile, self.project.config.prodigal_path)
with open(prodigal_outfile, 'r') as infile:
current_id = None
sequence = ''
for line in infile:
line = line.rstrip('\n\r')
if line.startswith('>'):
if current_id:
line_tokens = current_id.split(' # ')
function_id, contig_id, _ = parse_gene_id(line_tokens[0])
gene = Gene(contig_id=contig_id,
gene_id=line_tokens[0],
sequence=sequence,
start=line_tokens[1],
end=line_tokens[2],
strand=line_tokens[3])
self.assembly.contigs[function_id][contig_id].add_gene(gene)
line_tokens = line.split(' ')
current_id = line[1:] # line_tokens[0][1:]
sequence = ''
else:
sequence += line
line_tokens = current_id.split(' # ')
function_id, contig_id, _ = parse_gene_id(line_tokens[0])
gene = Gene(contig_id=contig_id,
gene_id=line_tokens[0],
sequence=sequence,
start=line_tokens[1],
end=line_tokens[2],
strand=line_tokens[3])
self.assembly.contigs[function_id][contig_id].add_gene(gene)
def annotate_genes(self):
"""Runs pre-selection DIAMOND search, runs classification DIAMOND search,
exports assembly in JSON format
Todo:
make contig_coverage_cutoff a parameter or a constant
"""
# Search in reference database
run_ref_search(self.project)
# Process output of reference DB search
self.parse_reference_output()
export_gene_assembly(
self.assembly, os.path.join(self.assembly_dir, 'all_contigs_assembly.json'))
# Import sequence data for selected sequence reads
print('Reading FASTQ file')
self.export_hit_fasta()
# Search in background database
run_bgr_search(self.project)
# Process output of reference DB search
self.parse_background_output()
print('Exporting JSON')
export_gene_assembly(self.assembly,
os.path.join(self.assembly_dir,
'all_contigs_assembly.json'))
def generate_taxonomy_chart(self, taxonomy_data):
'''
Collects data about functional genes in assembly and
creates one Krona chart for all functions
Args:
taxonomy_data (:obj:TaxonomyData): NCBI taxonomy data
'''
functions_list = set()
genes = autovivify(2) # genes[gene][function][parameter] = parameter_value
scores = autovivify(2) # scores[taxonomy ID][function][parameter] = parameter_value
total_read_count = 0
for sample in self.project.list_samples():
total_read_count += self.project.options.get_fastq1_readcount(sample)
for function in self.assembly.contigs:
functions_list.add(function)
for _, contig in self.assembly.contigs[function].items():
for gene_id, gene in contig.genes.items():
if gene.status != STATUS_GOOD:
continue
taxonomy_id = gene.taxonomy # Was get_taxonomy_id()
for hit in gene.hit_list.hits:
identity = hit.identity
for hit_function in hit.functions:
functions_list.add(hit_function)
if 'rpkm' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['rpkm'] += \
contig.get_rpkm(total_read_count) * \
len(gene.protein_sequence) * 3 / len(contig.sequence)
else:
scores[taxonomy_id][hit_function]['rpkm'] = \
contig.get_rpkm(total_read_count) * \
len(gene.protein_sequence) * 3 / len(contig.sequence)
if 'count' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['count'] += \
contig.get_read_count() * len(gene.protein_sequence) * 3 / \
len(contig.sequence)
else:
scores[taxonomy_id][hit_function]['count'] = \
contig.get_read_count() * len(gene.protein_sequence) * 3 / \
len(contig.sequence)
if 'hit_count' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['hit_count'] += 1
else:
scores[taxonomy_id][hit_function]['hit_count'] = 1
if 'identity' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['identity'] += \
identity
else:
scores[taxonomy_id][hit_function]['identity'] = \
identity
if 'genes' in scores[taxonomy_id][hit_function]:
scores[taxonomy_id][hit_function]['genes'] += gene_id + ' '
else:
scores[taxonomy_id][hit_function]['genes'] = gene_id + ' '
genes[gene_id][hit_function]['Length'] = \
str(len(gene.protein_sequence)) + 'aa'
genes[gene_id][hit_function]['Completeness'] = '{0:.0f}'.format(
len(gene.protein_sequence) * 100 / hit.s_len
)
genes[gene_id][hit_function]['identity'] = '{0:.1f}'.format(
identity
)
genes[gene_id][hit_function]['rpkm'] = '{0:.6f}'.format(
contig.get_rpkm(
total_read_count
) * len(gene.protein_sequence) * 3 / len(contig.sequence)
)
genes[gene_id][hit_function]['count'] = '{0:.0f}'.format(
contig.get_read_count() * len(
gene.protein_sequence
) * 3 / len(contig.sequence)
)
genes[gene_id][hit_function]['coverage'] = '{0:.1f}'.format(
contig.get_coverage()
)
taxonomic_profile = TaxonomyProfile()
taxonomic_profile.make_assembly_taxonomy_profile(taxonomy_data, scores)
outfile = os.path.join(self.assembly_dir, 'assembly_taxonomic_profile.xml')
make_assembly_taxonomy_chart(
taxonomic_profile, genes, sorted(functions_list), outfile,
self.project.config.krona_path, metric='rpkm'
)
def generate_function_taxonomy_charts(self, taxonomy_data):
'''
Generates series of Krona charts visualizing functions in assembly:
one function per file, separate stats for each sample
Args:
taxonomy_data (:obj:TaxonomyData): NCBI taxonomy data
'''
functions_list = set()
samples_list = sorted(self.project.list_samples())
total_read_count = 0
for sample in self.project.list_samples():
total_read_count += self.project.options.get_fastq1_readcount(sample)
# Make list of functions
for function in self.assembly.contigs:
for contig in self.assembly.contigs[function]:
for gene_id, gene in self.assembly.contigs[function][contig].genes.items():
if gene.status == STATUS_GOOD:
for gene_function in gene.functions:
functions_list.add(gene_function)
for function in sorted(functions_list):
genes = autovivify(2) # genes[gene][sample][parameter] = parameter_value
scores = autovivify(2) # scores[taxonomy ID][sample][parameter] = parameter_value
outfile = os.path.join(self.assembly_dir, 'out', function + '_taxonomic_profile.xml')
for assembly_function in self.assembly.contigs:
for _, contig in self.assembly.contigs[assembly_function].items():
for gene_id, gene in contig.genes.items():
function_counted = False
if gene.status != STATUS_GOOD or function not in gene.functions:
continue
taxonomy_id = gene.taxonomy
if taxonomy_id not in scores:
for sample_id in samples_list:
scores[taxonomy_id][sample_id]['rpkm'] = 0.0
scores[taxonomy_id][sample_id]['count'] = 0
scores[taxonomy_id][sample_id]['hit_count'] = 0
scores[taxonomy_id][sample_id]['identity'] = 0.0
scores[taxonomy_id][sample_id]['genes'] = ''
scores[taxonomy_id]['All samples']['rpkm'] = 0.0
scores[taxonomy_id]['All samples']['count'] = 0
scores[taxonomy_id]['All samples']['hit_count'] = 0
scores[taxonomy_id]['All samples']['identity'] = 0.0
scores[taxonomy_id]['All samples']['genes'] = ''
for hit in gene.hit_list.hits:
identity = hit.identity
if function in hit.functions:
if function_counted:
continue
for sample in samples_list:
if sample in contig.read_count:
scores[taxonomy_id][sample]['rpkm'] += contig.get_rpkm(
self.project.options.get_fastq1_readcount(sample),
sample
) * len(gene.protein_sequence) * 3 / len(
contig.sequence
)
scores[taxonomy_id][sample]['count'] += \
contig.get_read_count(sample) * \
len(gene.protein_sequence) * 3 / \
len(contig.sequence)
scores[taxonomy_id][sample]['hit_count'] += 1
scores[taxonomy_id][sample]['identity'] += identity
scores[taxonomy_id][sample]['genes'] += gene_id + ' '
genes[gene_id][sample]['Length'] = \
str(len(gene.protein_sequence)) + 'aa'
genes[gene_id][sample]['Completeness'] = '{0:.0f}'.format(
len(gene.protein_sequence) * 100 / hit.s_len
)
genes[gene_id][sample]['identity'] = '{0:.1f}'.format(
identity
)
genes[gene_id][sample]['rpkm'] = '{0:.7f}'.format(
contig.get_rpkm(
self.project.options.get_fastq1_readcount(
sample
),
sample
) * len(gene.protein_sequence) * 3 / len(
contig.sequence
)
)
genes[gene_id][sample]['count'] = 3 * '{0:.0f}'.format(
contig.get_read_count(sample) * len(
gene.protein_sequence
) / len(contig.sequence)
)
genes[gene_id][sample]['coverage'] = '{0:.1f}'.format(
contig.get_coverage(sample)
)
scores[taxonomy_id]['All samples']['rpkm'] += \
contig.get_rpkm(total_read_count) * \
len(gene.protein_sequence) \
* 3 / len(contig.sequence)
scores[taxonomy_id]['All samples']['count'] += \
contig.get_read_count() * len(gene.protein_sequence) \
* 3 / len(contig.sequence)
scores[taxonomy_id]['All samples']['hit_count'] += 1
scores[taxonomy_id]['All samples']['identity'] += identity
scores[taxonomy_id]['All samples']['genes'] += gene_id + ' '
genes[gene_id]['All samples']['Length'] = \
str(len(gene.protein_sequence)) + 'aa'
genes[gene_id]['All samples']['Completeness'] = '{0:.0f}'.format(
len(gene.protein_sequence) * 100 / hit.s_len
)
genes[gene_id]['All samples']['identity'] = \
'{0:.1f}'.format(identity)
genes[gene_id]['All samples']['rpkm'] = '{0:.7f}'.format(
contig.get_rpkm(total_read_count) * len(
gene.protein_sequence
) * 3 / len(contig.sequence)
)
genes[gene_id]['All samples']['count'] = '{0:.0f}'.format(
contig.get_read_count() * len(
gene.protein_sequence
) * 3 / len(contig.sequence)
)
genes[gene_id]['All samples']['coverage'] = '{0:.1f}'.format(
contig.get_coverage()
)
function_counted = True
taxonomic_profile = TaxonomyProfile()
taxonomic_profile.make_assembly_taxonomy_profile(taxonomy_data, scores)
output_sample_ids = sorted(self.project.list_samples())
output_sample_ids.append('All samples')
make_assembly_taxonomy_chart(
taxonomic_profile, genes, output_sample_ids, outfile,
self.project.config.krona_path, metric='rpkm'
)
def write_sequences(self):
"""Exports gene and protein sequences in FASTA format"""
genes = autovivify(2) # genes[function][gene][parameter] = parameter_value
for function in self.assembly.contigs:
for contig in self.assembly.contigs[function]:
for gene_id, gene in self.assembly.contigs[function][contig].genes.items():
if gene.status == STATUS_GOOD:
for hit in gene.hit_list.data:
taxonomy_id = gene.taxonomy
for hit_function in hit.functions:
start = gene.start
end = gene.end
strand = gene.strand
genes[hit_function][gene_id]['start'] = start
genes[hit_function][gene_id]['end'] = end
genes[hit_function][gene_id]['strand'] = strand
genes[hit_function][gene_id]['taxonomy'] = taxonomy_id
gene_sequence = self.assembly.contigs[function][contig].\
sequence[int(start) - 1: int(end)]
if strand == '-1':
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
gene_sequence = ''.join(
[complement[nucl] for nucl in reversed(gene_sequence)]
)
genes[hit_function][gene_id]['sequence'] = gene_sequence
genes[hit_function][gene_id]['protein'] = gene.protein_sequence
genes[hit_function][gene_id]['aai'] = hit.identity
genes[hit_function][gene_id]['completeness'] = \
len(gene.protein_sequence) * 100 / hit.s_len
for function in genes:
outfile = os.path.join(self.project.options.assembly_dir,
'out',
function + '_genes_Fama.fna')
with open(outfile, 'w') as outfile:
for gene_id in genes[function]:
lineage = self.project.taxonomy_data.get_taxonomy_lineage(
genes[function][gene_id]['taxonomy'])
outfile.write('>' + gene_id + '|' +
genes[function][gene_id]['start'] + '|' +
genes[function][gene_id]['end'] + '|' +
genes[function][gene_id]['strand'] + '|' +
lineage + '\n') # '|'
outfile.write(genes[function][gene_id]['sequence'] + '\n')
outfile = os.path.join(self.project.options.assembly_dir,
'out',
function + '_proteins_Fama.faa')
with open(outfile, 'w') as outfile:
for gene_id in genes[function]:
lineage = self.project.taxonomy_data.get_taxonomy_lineage(
genes[function][gene_id]['taxonomy'])
outfile.write('>' + gene_id + '|' +
genes[function][gene_id]['start'] + '|' +
genes[function][gene_id]['end'] + '|' +
genes[function][gene_id]['strand'] + '|' +
lineage + '\n') # '|'
outfile.write(genes[function][gene_id]['protein'] + '\n')
def generate_output(self):
"""Sends assembly data to Excel report generator, exports genes and
proteins, calls methods for taxonomy chart generation
"""
self.write_sequences()
make_assembly_xlsx(self)
self.generate_taxonomy_chart(self.project.taxonomy_data)
self.generate_function_taxonomy_charts(self.project.taxonomy_data)
generate_assembly_report(self)
def run_assembler(functions, assembler, output_dir, is_paired_end=True):
"""Fires up external assembler, either metaSPAdes or MEGAHIT"""
if assembler.endswith('megahit'):
run_megahit(functions, output_dir, assembler, is_paired_end)
elif assembler.endswith('metaspades.py'):
if is_paired_end:
run_spades(functions, output_dir, assembler, is_paired_end)
else:
raise RuntimeError(
'Current version of metaSPAdes does not support single-end libraries.'
)
def run_megahit(functions, output_dir, assembler_command, is_paired_end=True):
"""Runs MEGAHIT assembler on exported reads"""
print('Starting assembly')
for function in functions:
print('Run assembler for function', function)
if is_paired_end:
assembler_args = [assembler_command,
'-1',
os.path.join(output_dir, function + '_pe1.fastq'),
'-2',
os.path.join(output_dir, function + '_pe2.fastq'),
'-o',
os.path.join(output_dir, function)]
else:
assembler_args = [assembler_command,
'-r',
os.path.join(output_dir, function + '_pe1.fastq'),
'-o',
os.path.join(output_dir, function)]
run_external_program(assembler_args)
print('Assembler finished for function ', function)
print('Assembly finished')
def run_spades(functions, output_dir, assembler_command, is_paired_end=True):
"""Runs metaSPAdes assembler on exported reads"""
print('Starting metaSPAdes')
tmp_dir = os.path.join(output_dir, 'tmp')
for function in functions:
print('Run metaSPAdes for function', function)
assembler_args = [assembler_command,
'--meta',
'-t',
'12',
'-m',
'50', # TODO: make a parameter
'-k',
'33,55,71,91,111', # TODO: adjust automatically
'-o',
os.path.join(output_dir, function),
'--tmp-dir',
tmp_dir]
if is_paired_end:
assembler_args.extend(['-1',
os.path.join(output_dir, function + '_pe1.fastq'),
'-2',
os.path.join(output_dir, function + '_pe2.fastq')])
else:
assembler_args.extend(['-s',
os.path.join(output_dir, function + '_pe1.fastq')])
run_external_program_ignoreerror(assembler_args)
if os.path.exists(os.path.join(output_dir, function, 'contigs.fasta')):
shutil.copyfile(os.path.join(output_dir, function, 'contigs.fasta'),
os.path.join(output_dir, function, 'final.contigs.fa'))
print('Assembler finished for function ', function)
print('metaSPAdes finished')
def run_mapper_indexing(functions, output_dir, mapper_command):
"""Runs Bowtie2 indexer on filtered contigs"""
mapper_command = 'bowtie2-build'
for function in functions:
if not os.path.exists(os.path.join(output_dir, function, 'final.contigs.filtered.fa')):
print('Contigs file for function', function, 'not found')
continue
print('Run indexing for function', function)
if os.path.getsize(os.path.join(output_dir, function, 'final.contigs.filtered.fa')) > 0:
if not os.path.exists(os.path.join(output_dir, function, 'index')):
os.mkdir(os.path.join(output_dir, function, 'index'))
mapper_args = [mapper_command,
'-f',
os.path.join(output_dir, function, 'final.contigs.filtered.fa'),
os.path.join(output_dir, function, 'index', 'index')]
run_external_program(mapper_args)
def run_mapper(functions, output_dir, mapper_command, is_paired_end=True):
"""Runs Bowtie2 mapper on filtered contigs"""
mapper_command = 'bowtie2'
for function in functions:
if not os.path.exists(os.path.join(output_dir, function, 'final.contigs.filtered.fa')):
continue
if os.path.getsize(os.path.join(output_dir, function, 'final.contigs.filtered.fa')) > 0:
print('Run read mapping for function', function)
if is_paired_end:
mapper_args = [mapper_command,
'-q',
'--very-sensitive',
'--quiet',
'-x',
os.path.join(output_dir, function, 'index', 'index'),
'-1',
os.path.join(output_dir, function + '_pe1.fastq'),
'-2',
os.path.join(output_dir, function + '_pe2.fastq'),
'>' + os.path.join(output_dir, function, 'contigs.sam')]
else:
mapper_args = [mapper_command,
'-q',
'--very-sensitive',
'--quiet',
'-x',
os.path.join(output_dir, function, 'index', 'index'),
'-U',
os.path.join(output_dir, function + '_pe1.fastq'),
'>' + os.path.join(output_dir, function, 'contigs.sam')]
run_external_program(mapper_args)
def run_prodigal(infile, outfile, prodigal_path):
"""Runs Prodigal gene prediction on filtered contigs"""
print('Starting Prodigal')
prodigal_args = [prodigal_path,
'-p',
'meta',
'-a',
outfile,
'-i',
infile,
'-o',
outfile+'prodigal.txt']
run_external_program(prodigal_args)
print('Prodigal finished')
def run_ref_search(project):
"""Runs DIAMOND pre-selection search on predicted genes"""
print('Starting DIAMOND')
diamond_args = [project.config.diamond_path,
'blastp',
'--db',
project.config.get_reference_diamond_db(project.options.get_collection()),
'--query',
os.path.join(project.options.assembly_dir,
'all_contigs.prodigal.out.faa'),
'--out',
os.path.join(project.options.assembly_dir,
'all_contigs_' + project.options.ref_output_name),
'--max-target-seqs',
'50',
'--evalue',
str(project.config.get_evalue_cutoff(project.options.get_collection())),
'--threads',
project.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def run_bgr_search(project):
"""Runs DIAMOND classification search on predicted genes"""
print('Starting DIAMOND')
diamond_args = [project.config.diamond_path,
'blastp',
'--db',
project.config.get_background_diamond_db(project.options.get_collection()),
'--query',
os.path.join(
project.options.assembly_dir,
'all_contigs_' + project.options.ref_hits_fastq_name
),
'--out',
os.path.join(
project.options.assembly_dir,
'all_contigs_' + project.options.background_output_name
),
'--max-target-seqs',
'50',
'--evalue',
str(project.config.get_background_db_size(project.options.get_collection())
* project.config.get_evalue_cutoff(project.options.get_collection())
/ project.config.get_reference_db_size(project.options.get_collection())),
'--threads',
project.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def parse_gene_id(gene_id):
"""Extracts contig identifier and function identifier from gene identifier"""
(function_id, gene) = gene_id.split('|')
gene_id_tokens = gene.split('_')
gene_id = gene_id_tokens[-1]
contig_id = '_'.join(gene_id_tokens[:-1])
return function_id, contig_id, gene_id | 0.539954 | 0.175467 |
import os
tf_version = float(os.environ["TF_VERSION"][:3])
tf_keras = bool(os.environ["TF_KERAS"] == "True")
tf_python = bool(os.environ["TF_PYTHON"] == "True")
if tf_version >= 2:
if tf_keras:
from keras_adamw.optimizers_v2 import AdamW, NadamW, SGDW
elif tf_python:
from keras_adamw.optimizers_tfpy import AdamW, NadamW, SGDW
else:
from keras_adamw.optimizers import AdamW, NadamW, SGDW
else:
if tf_keras:
from keras_adamw.optimizers_225tf import AdamW, NadamW, SGDW
else:
from keras_adamw.optimizers_225 import AdamW, NadamW, SGDW
if tf_keras:
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Dense, GRU, Bidirectional, Embedding
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.constraints import MaxNorm as maxnorm
from tensorflow.keras.optimizers import Adam, Nadam, SGD
elif tf_python:
import tensorflow.keras.backend as K # tf.python.keras.backend is very buggy
from tensorflow.python.keras.layers import Input, Dense, GRU, Bidirectional
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.constraints import MaxNorm as maxnorm
from tensorflow.python.keras.optimizers import Adam, Nadam, SGD
else:
import keras.backend as K
from keras.layers import Input, Dense, GRU, Bidirectional, Embedding
from keras.models import Model, load_model
from keras.regularizers import l2
from keras.constraints import MaxNorm as maxnorm
from keras.optimizers import Adam, Nadam, SGD
if tf_version < 2 and tf_keras:
from keras_adamw.utils_225tf import get_weight_decays, fill_dict_in_order
from keras_adamw.utils_225tf import reset_seeds, K_eval
else:
from keras_adamw.utils import get_weight_decays, fill_dict_in_order
from keras_adamw.utils import reset_seeds, K_eval
# ALL TESTS (7 total):
# - keras (TF 1.14.0, Keras 2.2.5) [test_optimizers.py]
# - tf.keras (TF 1.14.0, Keras 2.2.5) [test_optimizers_v2.py]
# - keras (TF 2.0.0, Keras 2.3.0) [test_optimizers.py --TF_EAGER=True]
# - keras (TF 2.0.0, Keras 2.3.0) [test_optimizers.py --TF_EAGER=False]
# - tf.keras (TF 2.0.0, Keras 2.3.0) [test_optimizers_v2.py, --TF_EAGER=True]
# - tf.keras (TF 2.0.0, Keras 2.3.0) [test_optimizers_v2.py, --TF_EAGER=False]
# - tf.python.keras (TF 2.0.0, Keras 2.3.0) [test_optimizers_tfpy.py] | tests/import_selection.py | import os
tf_version = float(os.environ["TF_VERSION"][:3])
tf_keras = bool(os.environ["TF_KERAS"] == "True")
tf_python = bool(os.environ["TF_PYTHON"] == "True")
if tf_version >= 2:
if tf_keras:
from keras_adamw.optimizers_v2 import AdamW, NadamW, SGDW
elif tf_python:
from keras_adamw.optimizers_tfpy import AdamW, NadamW, SGDW
else:
from keras_adamw.optimizers import AdamW, NadamW, SGDW
else:
if tf_keras:
from keras_adamw.optimizers_225tf import AdamW, NadamW, SGDW
else:
from keras_adamw.optimizers_225 import AdamW, NadamW, SGDW
if tf_keras:
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Dense, GRU, Bidirectional, Embedding
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.constraints import MaxNorm as maxnorm
from tensorflow.keras.optimizers import Adam, Nadam, SGD
elif tf_python:
import tensorflow.keras.backend as K # tf.python.keras.backend is very buggy
from tensorflow.python.keras.layers import Input, Dense, GRU, Bidirectional
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.constraints import MaxNorm as maxnorm
from tensorflow.python.keras.optimizers import Adam, Nadam, SGD
else:
import keras.backend as K
from keras.layers import Input, Dense, GRU, Bidirectional, Embedding
from keras.models import Model, load_model
from keras.regularizers import l2
from keras.constraints import MaxNorm as maxnorm
from keras.optimizers import Adam, Nadam, SGD
if tf_version < 2 and tf_keras:
from keras_adamw.utils_225tf import get_weight_decays, fill_dict_in_order
from keras_adamw.utils_225tf import reset_seeds, K_eval
else:
from keras_adamw.utils import get_weight_decays, fill_dict_in_order
from keras_adamw.utils import reset_seeds, K_eval
# ALL TESTS (7 total):
# - keras (TF 1.14.0, Keras 2.2.5) [test_optimizers.py]
# - tf.keras (TF 1.14.0, Keras 2.2.5) [test_optimizers_v2.py]
# - keras (TF 2.0.0, Keras 2.3.0) [test_optimizers.py --TF_EAGER=True]
# - keras (TF 2.0.0, Keras 2.3.0) [test_optimizers.py --TF_EAGER=False]
# - tf.keras (TF 2.0.0, Keras 2.3.0) [test_optimizers_v2.py, --TF_EAGER=True]
# - tf.keras (TF 2.0.0, Keras 2.3.0) [test_optimizers_v2.py, --TF_EAGER=False]
# - tf.python.keras (TF 2.0.0, Keras 2.3.0) [test_optimizers_tfpy.py] | 0.726717 | 0.325346 |
import numpy as np
from qa_tools.utils import *
from qa_tools.prediction import *
def qa_pes_errors(
df_qc, n_electrons, excitation_level=0, basis_set='aug-cc-pV5Z',
bond_length=None, return_energies=False, energy_type='total'):
"""Computes the error associated with predicting a system's absolute
electronic energy using quantum alchemy.
In other words, this quantifies the error when using a quantum alchemy
reference and nuclear charge perturbation to model a target. For example,
how accurate is using C- basis set with a lambda of 1 to predict N.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
n_electrons : :obj:`int`
Total number of electrons for the quantum alchemical PES.
excitation_level : :obj:`int`, optional
Electronic state of the system with respect to the ground state. ``0``
represents the ground state, ``1`` the first excited state, etc.
Defaults to ground state.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
bond_length : :obj:`float`, optional
Desired bond length for dimers; must be specified.
return_energies : :obj:`bool`, optional
Return quantum alchemy energies instead of errors. Defaults to ``False``.
energy_type : :obj:`str`, optional
Species the energy type/contributions to examine. Can be ``'total'``
energies, ``'hf'`` for Hartree-Fock contributions, or ``'correlation'``
energies. Defaults to ``'total'``.
Returns
-------
:obj:`list` [:obj:`str`]
System and state labels (e.g., `'c.chrg0.mult1'`) in the order of
increasing atomic number (and charge).
:obj:`numpy.ndarray`
Quantum alchemy errors (or energies) with respect to standard quantum
chemistry.
"""
if energy_type == 'total':
df_energy_type = 'electronic_energy'
elif energy_type == 'hf':
df_energy_type = 'hf_energy'
elif energy_type == 'correlation':
df_energy_type = 'correlation_energy'
df_qa_pes = df_qc.query(
'n_electrons == @n_electrons'
'& basis_set == @basis_set'
)
sys_labels = list(set(df_qa_pes['system'].values))
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
is_dimer = True
else:
is_dimer = False
# Gets data.
sys_atomic_numbers = []
system_labels = []
calc_labels = []
lambda_values = []
energies = []
true_energies = []
for sys_label in sys_labels:
df_sys = df_qa_pes.query('system == @sys_label')
if is_dimer:
assert bond_length is not None
df_sys = df_sys.query('bond_length == @bond_length')
# Select multiplicity
df_state = select_state(
df_sys.query('lambda_value == 0.0'), excitation_level,
ignore_one_row=True
)
atomic_numbers = df_state.iloc[0]['atomic_numbers']
if is_dimer:
sys_atomic_numbers.append(atomic_numbers)
else:
sys_atomic_numbers.append(atomic_numbers[0])
state_mult = df_state.iloc[0]['multiplicity']
state_chrg = df_state.iloc[0]['charge']
true_energies.append(df_state.iloc[0][df_energy_type])
system_labels.append(sys_label)
calc_labels.append(f'{sys_label}.chrg{state_chrg}.mult{state_mult}')
df_sys = df_sys.query('multiplicity == @state_mult')
lambda_values.append(df_sys.lambda_value.values)
energies.append(df_sys[df_energy_type].values)
sys_atomic_numbers = np.array(sys_atomic_numbers)
lambda_values = np.array(lambda_values)
true_energies = np.array(true_energies)
energies = np.array(energies)
# Prepares stuff to organize data
## Lambdas
sys_min_lambda_values = lambda_values.min(axis=1).astype('int')
global_min_lambda_value = np.min(sys_min_lambda_values)
adjust_lambdas = global_min_lambda_value - sys_min_lambda_values
for i in range(len(adjust_lambdas)):
lambda_values[i] += adjust_lambdas[i]
if is_dimer:
sys_atomic_numbers_sum = np.sum(sys_atomic_numbers, axis=1)
if np.all(sys_atomic_numbers_sum==sys_atomic_numbers_sum.flatten()[0]):
sort_z = np.argsort(np.min(sys_atomic_numbers, axis=1))
else:
sort_z = np.argsort(sys_atomic_numbers_sum)
else:
sort_z = np.argsort(sys_atomic_numbers)
sys_energies = []
for target_idx,true_e_idx in zip(np.flip(sort_z), sort_z): # Smallest to largest lambda value
target_lambda = sys_min_lambda_values[target_idx]
true_energy = true_energies[true_e_idx]
errors = []
for ref_idx in sort_z:
lambda_idx = np.where(lambda_values[ref_idx] == target_lambda)[0]
if return_energies:
errors.append(energies[ref_idx][lambda_idx][0])
else:
errors.append(energies[ref_idx][lambda_idx][0] - true_energy)
sys_energies.append(errors)
sys_energies = np.array(sys_energies) # Hartree
return [calc_labels[i] for i in sort_z], sys_energies
def qats_pes_errors(
df_qc, df_qats, n_electrons, qats_order=2, excitation_level=0,
basis_set='aug-cc-pV5Z', return_energies=False):
"""Computes the error associated with using a Taylor series to approximate
the quantum alchemical potential energy surface.
Errors are in reference to quantum alchemy. Only atom dataframes are
supported.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
n_electrons : :obj:`int`
Total number of electrons for the quantum alchemical PES.
qats_order : :obj:`int`, optional
Desired Taylor series order to use. Defaults to ``2``.
excitation_level : :obj:`int`, optional
Electronic state of the system with respect to the ground state. ``0``
represents the ground state, ``1`` the first excited state, etc.
Defaults to ground state.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
return_energies : :obj:`bool`, optional
Return QATS energies instead of errors. Defaults to ``False``.
Returns
-------
:obj:`list` [:obj:`str`]
System and state labels (e.g., `'c.chrg0.mult1'`) in the order of
increasing atomic number.
:obj:`numpy.ndarray`
Alchemical energy errors due to modeling a target system by changing
the nuclear charge of a reference system (e.g., c -> n). The rows and
columns are in the same order as the state labels.
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
df_qa_pes = df_qc.query(
'n_electrons == @n_electrons & basis_set == @basis_set'
)
mult_sys_test = df_qa_pes.iloc[0]['system']
state_mult = get_multiplicity(
df_qa_pes.query('system == @mult_sys_test'), excitation_level,
ignore_one_row=False
)
df_qa_pes = df_qa_pes.query('multiplicity == @state_mult')
df_sys_info = df_qa_pes.query('lambda_value == 0.0')
charge_sort = np.argsort(df_sys_info['charge'].values) # most negative to most positive
sys_labels = df_sys_info['system'].values[charge_sort]
sys_atomic_numbers = df_sys_info['atomic_numbers'].values[charge_sort]
sys_charges = df_sys_info['charge'].values[charge_sort]
# Gets data.
calc_labels = []
lambda_values = []
alchemical_energies = []
qats_energies = []
# Goes through all possible reference systems and calculates QATS-n predictions
# then computes the alchemical predictions and errors.
# Loops through all systems.
for i in range(len(sys_labels)):
sys_alchemical_energies = []
sys_qats_energies = []
target_label = sys_labels[i]
target_atomic_numbers = sys_atomic_numbers[i]
target_charge = sys_charges[i]
calc_labels.append(f'{target_label}.chrg{target_charge}.mult{state_mult}')
df_qats_ref = get_qa_refs(
df_qc, df_qats, target_label, n_electrons, basis_set=basis_set,
df_selection='qats', excitation_level=excitation_level,
considered_lambdas=None
)
charge_sort = np.argsort(df_qats_ref['charge'].values) # most negative to most positive
# Loops through all QATS references.
for j in charge_sort:
qats_row = df_qats_ref.iloc[j]
ref_sys_label = qats_row['system']
ref_atomic_numbers = qats_row['atomic_numbers']
ref_charge = qats_row['charge']
ref_poly_coeffs = qats_row['poly_coeffs']
lambda_value = get_lambda_value(
ref_atomic_numbers, target_atomic_numbers
)
# Predicted alchemical energy.
sys_alchemical_energies.append(
qa_predictions(
df_qc, ref_sys_label, ref_charge, excitation_level=excitation_level,
lambda_values=[lambda_value], basis_set=basis_set,
ignore_one_row=True
)[0]
)
# QATS prediction
sys_qats_energies.append(
qats_prediction(
ref_poly_coeffs, qats_order, lambda_value
)[0]
)
# Adds in alchemical energy and QATS reference
sys_alchemical_energies.insert(i, np.nan)
sys_qats_energies.insert(i, np.nan)
alchemical_energies.append(sys_alchemical_energies)
qats_energies.append(sys_qats_energies)
alchemical_energies = np.array(alchemical_energies)
qats_energies = np.array(qats_energies)
e_return = qats_energies
if not return_energies:
e_return -= alchemical_energies
# Converts nan to 0
e_return = np.nan_to_num(e_return)
return calc_labels, e_return
def error_change_charge_qats_atoms(
df_qc, df_qats, target_label, delta_charge, change_signs=False,
basis_set='aug-cc-pV5Z', target_initial_charge=0, use_ts=True,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False):
"""Automates the procedure of calculating errors for changing charges on
atoms.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
delta_charge : :obj:`str`
Overall change in the initial target system.
change_signs : :obj:`bool`, optional
Multiply all predictions by -1. Used to correct the sign for computing
electron affinities. Defaults to ``False``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
target_initial_charge : :obj:`int`
Specifies the initial charge state of the target system. For example,
the first ionization energy is the energy difference going from
charge ``0 -> 1``, so ``target_initial_charge`` must equal ``0``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation (with finite differences) to make
QATS-n predictions (where n is the order). Defaults to ``True``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``True``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
Defaults to ``None``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QA predictions; i.e., the error of
using a Taylor series with repsect to quantum alchemy.
Defaults to ``False``.
Returns
-------
:obj:`pandas.DataFrame`
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
qc_prediction = hartree_to_ev(
energy_change_charge_qc_atom(
df_qc, target_label, delta_charge,
target_initial_charge=target_initial_charge,
change_signs=change_signs, basis_set=basis_set
)
)
qats_predictions = energy_change_charge_qa_atom(
df_qc, df_qats, target_label, delta_charge,
target_initial_charge=target_initial_charge,
change_signs=change_signs, basis_set=basis_set,
use_ts=use_ts, ignore_one_row=ignore_one_row,
considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {
key:hartree_to_ev(value) for (key,value) in qats_predictions.items()
} # Converts to eV
if use_ts or return_qats_vs_qa:
qats_predictions = pd.DataFrame(
qats_predictions,
index=[f'QATS-{i}' for i in range(max_qats_order+1)]
)
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
)
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors
def error_change_charge_qats_dimer(
df_qc, df_qats, target_label, delta_charge, change_signs=False,
basis_set='cc-pV5Z', target_initial_charge=0, use_ts=True,
lambda_specific_atom=0, lambda_direction=None,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False,
n_points=2, poly_order=4, remove_outliers=False,
zscore_cutoff=3.0):
"""Computes QATS errors in change the charge of a system.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
delta_charge : :obj:`str`
Overall change in the initial target system.
change_signs : :obj:`bool`, optional
Multiply all predictions by -1. Used to correct the sign for computing
electron affinities. Defaults to ``False``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
target_initial_charge : :obj:`int`
Specifies the initial charge state of the target system. For example,
the first ionization energy is the energy difference going from
charge ``0 -> 1``, so ``target_initial_charge`` must equal ``0``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation (with finite differences) to make
QATS-n predictions (where n is the order). Defaults to ``True``.
lambda_specific_atom : :obj:`int`, optional
Applies the entire lambda change to a single atom in dimers. For
example, OH -> FH+ would be a lambda change of +1 only on the first
atom. Defaults to ``0``.
lambda_direction : :obj:`str`, optional
Defines the direction of lambda changes for dimers. ``'counter'`` is
is where one atom increases and the other decreases their nuclear
charge (e.g., CO -> BF).
If the atomic numbers of the reference are the same, the first atom's
nuclear charge is decreased and the second is increased. IF they are
different, the atom with the largest atomic number increases by lambda.
Defaults to ``None``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``True``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
Defaults to ``None``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QATS predictions; i.e., the error of
using a Taylor series approximation with repsect to the alchemical
potential energy surface. Defaults to ``False``.
n_points : :obj:`int`, optional
The number of surrounding points on either side of the minimum bond
length. Defaults to ``2``.
poly_order : :obj:`int`, optional
Maximum order of the fitted polynomial. Defaults to ``2``.
remove_outliers : :obj:`bool`, optional
Do not include bond lengths that are marked as outliers by their z
score. Defaults to ``False``.
zscore_cutoff : :obj:`float`, optional
Bond length energies that have a z score higher than this are
considered outliers. Defaults to ``3.0``.
Returns
-------
:obj:`pandas.DataFrame`
"""
qc_prediction = hartree_to_ev(
energy_change_charge_qc_dimer(
df_qc, target_label, delta_charge,
target_initial_charge=target_initial_charge,
change_signs=change_signs, basis_set=basis_set,
ignore_one_row=ignore_one_row, n_points=n_points,
poly_order=poly_order, remove_outliers=remove_outliers,
zscore_cutoff=zscore_cutoff
)
)
qats_predictions = energy_change_charge_qa_dimer(
df_qc, df_qats, target_label, delta_charge,
target_initial_charge=target_initial_charge, change_signs=change_signs,
basis_set=basis_set, use_ts=use_ts,
lambda_specific_atom=lambda_specific_atom, lambda_direction=lambda_direction,
ignore_one_row=ignore_one_row, poly_order=poly_order, n_points=n_points,
remove_outliers=remove_outliers, considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {
key:hartree_to_ev(value) for (key,value) in qats_predictions.items()
} # Converts to eV
if use_ts or return_qats_vs_qa:
qats_predictions = pd.DataFrame(
qats_predictions,
index=[f'QATS-{i}' for i in range(max_qats_order+1)]
)
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
)
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors
def error_mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=0,
basis_set='aug-cc-pV5Z', use_ts=True,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False):
"""Computes QATS errors in system multiplicity gaps.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
target_charge : :obj:`int`, optional
The system charge. Defaults to ``0``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation to make QATS-n predictions
(where n is the order). Defaults to ``True``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``False``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QATS predictions; i.e., the error of
using a Taylor series approximation with repsect to the alchemical
potential energy surface. Defaults to ``False``.
Returns
-------
:obj:`pandas.DataFrame`
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
qc_prediction = hartree_to_ev(
mult_gap_qc_atom(
df_qc, target_label, target_charge=target_charge,
basis_set=basis_set, ignore_one_row=ignore_one_row
)
)
qats_predictions = mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=target_charge,
basis_set=basis_set, use_ts=use_ts, ignore_one_row=ignore_one_row,
considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {key:hartree_to_ev(value) for (key,value) in qats_predictions.items()} # Converts to eV
if use_ts:
qats_predictions = pd.DataFrame(
qats_predictions, index=[f'QATS-{i}' for i in range(max_qats_order+1)]
) # Makes dataframe
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
) # Makes dataframe
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors | qa_tools/analysis.py |
import numpy as np
from qa_tools.utils import *
from qa_tools.prediction import *
def qa_pes_errors(
df_qc, n_electrons, excitation_level=0, basis_set='aug-cc-pV5Z',
bond_length=None, return_energies=False, energy_type='total'):
"""Computes the error associated with predicting a system's absolute
electronic energy using quantum alchemy.
In other words, this quantifies the error when using a quantum alchemy
reference and nuclear charge perturbation to model a target. For example,
how accurate is using C- basis set with a lambda of 1 to predict N.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
n_electrons : :obj:`int`
Total number of electrons for the quantum alchemical PES.
excitation_level : :obj:`int`, optional
Electronic state of the system with respect to the ground state. ``0``
represents the ground state, ``1`` the first excited state, etc.
Defaults to ground state.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
bond_length : :obj:`float`, optional
Desired bond length for dimers; must be specified.
return_energies : :obj:`bool`, optional
Return quantum alchemy energies instead of errors. Defaults to ``False``.
energy_type : :obj:`str`, optional
Species the energy type/contributions to examine. Can be ``'total'``
energies, ``'hf'`` for Hartree-Fock contributions, or ``'correlation'``
energies. Defaults to ``'total'``.
Returns
-------
:obj:`list` [:obj:`str`]
System and state labels (e.g., `'c.chrg0.mult1'`) in the order of
increasing atomic number (and charge).
:obj:`numpy.ndarray`
Quantum alchemy errors (or energies) with respect to standard quantum
chemistry.
"""
if energy_type == 'total':
df_energy_type = 'electronic_energy'
elif energy_type == 'hf':
df_energy_type = 'hf_energy'
elif energy_type == 'correlation':
df_energy_type = 'correlation_energy'
df_qa_pes = df_qc.query(
'n_electrons == @n_electrons'
'& basis_set == @basis_set'
)
sys_labels = list(set(df_qa_pes['system'].values))
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
is_dimer = True
else:
is_dimer = False
# Gets data.
sys_atomic_numbers = []
system_labels = []
calc_labels = []
lambda_values = []
energies = []
true_energies = []
for sys_label in sys_labels:
df_sys = df_qa_pes.query('system == @sys_label')
if is_dimer:
assert bond_length is not None
df_sys = df_sys.query('bond_length == @bond_length')
# Select multiplicity
df_state = select_state(
df_sys.query('lambda_value == 0.0'), excitation_level,
ignore_one_row=True
)
atomic_numbers = df_state.iloc[0]['atomic_numbers']
if is_dimer:
sys_atomic_numbers.append(atomic_numbers)
else:
sys_atomic_numbers.append(atomic_numbers[0])
state_mult = df_state.iloc[0]['multiplicity']
state_chrg = df_state.iloc[0]['charge']
true_energies.append(df_state.iloc[0][df_energy_type])
system_labels.append(sys_label)
calc_labels.append(f'{sys_label}.chrg{state_chrg}.mult{state_mult}')
df_sys = df_sys.query('multiplicity == @state_mult')
lambda_values.append(df_sys.lambda_value.values)
energies.append(df_sys[df_energy_type].values)
sys_atomic_numbers = np.array(sys_atomic_numbers)
lambda_values = np.array(lambda_values)
true_energies = np.array(true_energies)
energies = np.array(energies)
# Prepares stuff to organize data
## Lambdas
sys_min_lambda_values = lambda_values.min(axis=1).astype('int')
global_min_lambda_value = np.min(sys_min_lambda_values)
adjust_lambdas = global_min_lambda_value - sys_min_lambda_values
for i in range(len(adjust_lambdas)):
lambda_values[i] += adjust_lambdas[i]
if is_dimer:
sys_atomic_numbers_sum = np.sum(sys_atomic_numbers, axis=1)
if np.all(sys_atomic_numbers_sum==sys_atomic_numbers_sum.flatten()[0]):
sort_z = np.argsort(np.min(sys_atomic_numbers, axis=1))
else:
sort_z = np.argsort(sys_atomic_numbers_sum)
else:
sort_z = np.argsort(sys_atomic_numbers)
sys_energies = []
for target_idx,true_e_idx in zip(np.flip(sort_z), sort_z): # Smallest to largest lambda value
target_lambda = sys_min_lambda_values[target_idx]
true_energy = true_energies[true_e_idx]
errors = []
for ref_idx in sort_z:
lambda_idx = np.where(lambda_values[ref_idx] == target_lambda)[0]
if return_energies:
errors.append(energies[ref_idx][lambda_idx][0])
else:
errors.append(energies[ref_idx][lambda_idx][0] - true_energy)
sys_energies.append(errors)
sys_energies = np.array(sys_energies) # Hartree
return [calc_labels[i] for i in sort_z], sys_energies
def qats_pes_errors(
df_qc, df_qats, n_electrons, qats_order=2, excitation_level=0,
basis_set='aug-cc-pV5Z', return_energies=False):
"""Computes the error associated with using a Taylor series to approximate
the quantum alchemical potential energy surface.
Errors are in reference to quantum alchemy. Only atom dataframes are
supported.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
n_electrons : :obj:`int`
Total number of electrons for the quantum alchemical PES.
qats_order : :obj:`int`, optional
Desired Taylor series order to use. Defaults to ``2``.
excitation_level : :obj:`int`, optional
Electronic state of the system with respect to the ground state. ``0``
represents the ground state, ``1`` the first excited state, etc.
Defaults to ground state.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
return_energies : :obj:`bool`, optional
Return QATS energies instead of errors. Defaults to ``False``.
Returns
-------
:obj:`list` [:obj:`str`]
System and state labels (e.g., `'c.chrg0.mult1'`) in the order of
increasing atomic number.
:obj:`numpy.ndarray`
Alchemical energy errors due to modeling a target system by changing
the nuclear charge of a reference system (e.g., c -> n). The rows and
columns are in the same order as the state labels.
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
df_qa_pes = df_qc.query(
'n_electrons == @n_electrons & basis_set == @basis_set'
)
mult_sys_test = df_qa_pes.iloc[0]['system']
state_mult = get_multiplicity(
df_qa_pes.query('system == @mult_sys_test'), excitation_level,
ignore_one_row=False
)
df_qa_pes = df_qa_pes.query('multiplicity == @state_mult')
df_sys_info = df_qa_pes.query('lambda_value == 0.0')
charge_sort = np.argsort(df_sys_info['charge'].values) # most negative to most positive
sys_labels = df_sys_info['system'].values[charge_sort]
sys_atomic_numbers = df_sys_info['atomic_numbers'].values[charge_sort]
sys_charges = df_sys_info['charge'].values[charge_sort]
# Gets data.
calc_labels = []
lambda_values = []
alchemical_energies = []
qats_energies = []
# Goes through all possible reference systems and calculates QATS-n predictions
# then computes the alchemical predictions and errors.
# Loops through all systems.
for i in range(len(sys_labels)):
sys_alchemical_energies = []
sys_qats_energies = []
target_label = sys_labels[i]
target_atomic_numbers = sys_atomic_numbers[i]
target_charge = sys_charges[i]
calc_labels.append(f'{target_label}.chrg{target_charge}.mult{state_mult}')
df_qats_ref = get_qa_refs(
df_qc, df_qats, target_label, n_electrons, basis_set=basis_set,
df_selection='qats', excitation_level=excitation_level,
considered_lambdas=None
)
charge_sort = np.argsort(df_qats_ref['charge'].values) # most negative to most positive
# Loops through all QATS references.
for j in charge_sort:
qats_row = df_qats_ref.iloc[j]
ref_sys_label = qats_row['system']
ref_atomic_numbers = qats_row['atomic_numbers']
ref_charge = qats_row['charge']
ref_poly_coeffs = qats_row['poly_coeffs']
lambda_value = get_lambda_value(
ref_atomic_numbers, target_atomic_numbers
)
# Predicted alchemical energy.
sys_alchemical_energies.append(
qa_predictions(
df_qc, ref_sys_label, ref_charge, excitation_level=excitation_level,
lambda_values=[lambda_value], basis_set=basis_set,
ignore_one_row=True
)[0]
)
# QATS prediction
sys_qats_energies.append(
qats_prediction(
ref_poly_coeffs, qats_order, lambda_value
)[0]
)
# Adds in alchemical energy and QATS reference
sys_alchemical_energies.insert(i, np.nan)
sys_qats_energies.insert(i, np.nan)
alchemical_energies.append(sys_alchemical_energies)
qats_energies.append(sys_qats_energies)
alchemical_energies = np.array(alchemical_energies)
qats_energies = np.array(qats_energies)
e_return = qats_energies
if not return_energies:
e_return -= alchemical_energies
# Converts nan to 0
e_return = np.nan_to_num(e_return)
return calc_labels, e_return
def error_change_charge_qats_atoms(
df_qc, df_qats, target_label, delta_charge, change_signs=False,
basis_set='aug-cc-pV5Z', target_initial_charge=0, use_ts=True,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False):
"""Automates the procedure of calculating errors for changing charges on
atoms.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
delta_charge : :obj:`str`
Overall change in the initial target system.
change_signs : :obj:`bool`, optional
Multiply all predictions by -1. Used to correct the sign for computing
electron affinities. Defaults to ``False``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
target_initial_charge : :obj:`int`
Specifies the initial charge state of the target system. For example,
the first ionization energy is the energy difference going from
charge ``0 -> 1``, so ``target_initial_charge`` must equal ``0``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation (with finite differences) to make
QATS-n predictions (where n is the order). Defaults to ``True``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``True``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
Defaults to ``None``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QA predictions; i.e., the error of
using a Taylor series with repsect to quantum alchemy.
Defaults to ``False``.
Returns
-------
:obj:`pandas.DataFrame`
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
qc_prediction = hartree_to_ev(
energy_change_charge_qc_atom(
df_qc, target_label, delta_charge,
target_initial_charge=target_initial_charge,
change_signs=change_signs, basis_set=basis_set
)
)
qats_predictions = energy_change_charge_qa_atom(
df_qc, df_qats, target_label, delta_charge,
target_initial_charge=target_initial_charge,
change_signs=change_signs, basis_set=basis_set,
use_ts=use_ts, ignore_one_row=ignore_one_row,
considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {
key:hartree_to_ev(value) for (key,value) in qats_predictions.items()
} # Converts to eV
if use_ts or return_qats_vs_qa:
qats_predictions = pd.DataFrame(
qats_predictions,
index=[f'QATS-{i}' for i in range(max_qats_order+1)]
)
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
)
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors
def error_change_charge_qats_dimer(
df_qc, df_qats, target_label, delta_charge, change_signs=False,
basis_set='cc-pV5Z', target_initial_charge=0, use_ts=True,
lambda_specific_atom=0, lambda_direction=None,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False,
n_points=2, poly_order=4, remove_outliers=False,
zscore_cutoff=3.0):
"""Computes QATS errors in change the charge of a system.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
delta_charge : :obj:`str`
Overall change in the initial target system.
change_signs : :obj:`bool`, optional
Multiply all predictions by -1. Used to correct the sign for computing
electron affinities. Defaults to ``False``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
target_initial_charge : :obj:`int`
Specifies the initial charge state of the target system. For example,
the first ionization energy is the energy difference going from
charge ``0 -> 1``, so ``target_initial_charge`` must equal ``0``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation (with finite differences) to make
QATS-n predictions (where n is the order). Defaults to ``True``.
lambda_specific_atom : :obj:`int`, optional
Applies the entire lambda change to a single atom in dimers. For
example, OH -> FH+ would be a lambda change of +1 only on the first
atom. Defaults to ``0``.
lambda_direction : :obj:`str`, optional
Defines the direction of lambda changes for dimers. ``'counter'`` is
is where one atom increases and the other decreases their nuclear
charge (e.g., CO -> BF).
If the atomic numbers of the reference are the same, the first atom's
nuclear charge is decreased and the second is increased. IF they are
different, the atom with the largest atomic number increases by lambda.
Defaults to ``None``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``True``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
Defaults to ``None``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QATS predictions; i.e., the error of
using a Taylor series approximation with repsect to the alchemical
potential energy surface. Defaults to ``False``.
n_points : :obj:`int`, optional
The number of surrounding points on either side of the minimum bond
length. Defaults to ``2``.
poly_order : :obj:`int`, optional
Maximum order of the fitted polynomial. Defaults to ``2``.
remove_outliers : :obj:`bool`, optional
Do not include bond lengths that are marked as outliers by their z
score. Defaults to ``False``.
zscore_cutoff : :obj:`float`, optional
Bond length energies that have a z score higher than this are
considered outliers. Defaults to ``3.0``.
Returns
-------
:obj:`pandas.DataFrame`
"""
qc_prediction = hartree_to_ev(
energy_change_charge_qc_dimer(
df_qc, target_label, delta_charge,
target_initial_charge=target_initial_charge,
change_signs=change_signs, basis_set=basis_set,
ignore_one_row=ignore_one_row, n_points=n_points,
poly_order=poly_order, remove_outliers=remove_outliers,
zscore_cutoff=zscore_cutoff
)
)
qats_predictions = energy_change_charge_qa_dimer(
df_qc, df_qats, target_label, delta_charge,
target_initial_charge=target_initial_charge, change_signs=change_signs,
basis_set=basis_set, use_ts=use_ts,
lambda_specific_atom=lambda_specific_atom, lambda_direction=lambda_direction,
ignore_one_row=ignore_one_row, poly_order=poly_order, n_points=n_points,
remove_outliers=remove_outliers, considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {
key:hartree_to_ev(value) for (key,value) in qats_predictions.items()
} # Converts to eV
if use_ts or return_qats_vs_qa:
qats_predictions = pd.DataFrame(
qats_predictions,
index=[f'QATS-{i}' for i in range(max_qats_order+1)]
)
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
)
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors
def error_mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=0,
basis_set='aug-cc-pV5Z', use_ts=True,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False):
"""Computes QATS errors in system multiplicity gaps.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
target_charge : :obj:`int`, optional
The system charge. Defaults to ``0``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation to make QATS-n predictions
(where n is the order). Defaults to ``True``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``False``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QATS predictions; i.e., the error of
using a Taylor series approximation with repsect to the alchemical
potential energy surface. Defaults to ``False``.
Returns
-------
:obj:`pandas.DataFrame`
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
qc_prediction = hartree_to_ev(
mult_gap_qc_atom(
df_qc, target_label, target_charge=target_charge,
basis_set=basis_set, ignore_one_row=ignore_one_row
)
)
qats_predictions = mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=target_charge,
basis_set=basis_set, use_ts=use_ts, ignore_one_row=ignore_one_row,
considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {key:hartree_to_ev(value) for (key,value) in qats_predictions.items()} # Converts to eV
if use_ts:
qats_predictions = pd.DataFrame(
qats_predictions, index=[f'QATS-{i}' for i in range(max_qats_order+1)]
) # Makes dataframe
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
) # Makes dataframe
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors | 0.857321 | 0.607197 |
from django.conf import settings
from django_statsd.clients import statsd
from lib.geoip import GeoIP
import mkt
class RegionMiddleware(object):
"""Figure out the user's region and store it in a cookie."""
def __init__(self):
self.geoip = GeoIP(settings)
def region_from_request(self, request):
ip_reg = self.geoip.lookup(request.META.get('REMOTE_ADDR'))
return mkt.regions.REGIONS_DICT.get(ip_reg, mkt.regions.RESTOFWORLD)
def process_request(self, request):
regions = mkt.regions.REGION_LOOKUP
user_region = restofworld = mkt.regions.RESTOFWORLD
if not getattr(request, 'API', False):
request.REGION = restofworld
mkt.regions.set_region(restofworld)
return
# ?region= -> geoip -> lang
url_region = request.REQUEST.get('region')
if url_region in regions:
statsd.incr('z.regions.middleware.source.url')
user_region = regions[url_region]
else:
user_region = self.region_from_request(request)
# If the above fails, let's try `Accept-Language`.
if user_region == restofworld:
statsd.incr('z.regions.middleware.source.accept-lang')
if request.LANG == settings.LANGUAGE_CODE:
choices = mkt.regions.REGIONS_CHOICES[1:]
else:
choices = mkt.regions.REGIONS_CHOICES
if request.LANG:
for name, region in choices:
if name.lower() in request.LANG.lower():
user_region = region
break
# All else failed, try to match against our forced Language.
if user_region == mkt.regions.RESTOFWORLD:
# Try to find a suitable region.
for name, region in choices:
if region.default_language == request.LANG:
user_region = region
break
accept_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if (user_region == mkt.regions.US
and accept_language is not None
and not accept_language.startswith('en')):
# Let us default to restofworld if it's not English.
user_region = mkt.regions.RESTOFWORLD
else:
statsd.incr('z.regions.middleware.source.geoip')
# Only update the user's region if it changed.
amo_user = getattr(request, 'amo_user', None)
if amo_user and amo_user.region != user_region.slug:
amo_user.region = user_region.slug
amo_user.save()
request.REGION = user_region
mkt.regions.set_region(user_region) | mkt/regions/middleware.py | from django.conf import settings
from django_statsd.clients import statsd
from lib.geoip import GeoIP
import mkt
class RegionMiddleware(object):
"""Figure out the user's region and store it in a cookie."""
def __init__(self):
self.geoip = GeoIP(settings)
def region_from_request(self, request):
ip_reg = self.geoip.lookup(request.META.get('REMOTE_ADDR'))
return mkt.regions.REGIONS_DICT.get(ip_reg, mkt.regions.RESTOFWORLD)
def process_request(self, request):
regions = mkt.regions.REGION_LOOKUP
user_region = restofworld = mkt.regions.RESTOFWORLD
if not getattr(request, 'API', False):
request.REGION = restofworld
mkt.regions.set_region(restofworld)
return
# ?region= -> geoip -> lang
url_region = request.REQUEST.get('region')
if url_region in regions:
statsd.incr('z.regions.middleware.source.url')
user_region = regions[url_region]
else:
user_region = self.region_from_request(request)
# If the above fails, let's try `Accept-Language`.
if user_region == restofworld:
statsd.incr('z.regions.middleware.source.accept-lang')
if request.LANG == settings.LANGUAGE_CODE:
choices = mkt.regions.REGIONS_CHOICES[1:]
else:
choices = mkt.regions.REGIONS_CHOICES
if request.LANG:
for name, region in choices:
if name.lower() in request.LANG.lower():
user_region = region
break
# All else failed, try to match against our forced Language.
if user_region == mkt.regions.RESTOFWORLD:
# Try to find a suitable region.
for name, region in choices:
if region.default_language == request.LANG:
user_region = region
break
accept_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if (user_region == mkt.regions.US
and accept_language is not None
and not accept_language.startswith('en')):
# Let us default to restofworld if it's not English.
user_region = mkt.regions.RESTOFWORLD
else:
statsd.incr('z.regions.middleware.source.geoip')
# Only update the user's region if it changed.
amo_user = getattr(request, 'amo_user', None)
if amo_user and amo_user.region != user_region.slug:
amo_user.region = user_region.slug
amo_user.save()
request.REGION = user_region
mkt.regions.set_region(user_region) | 0.599837 | 0.163345 |
"""Khronos OpenGL gl.xml to C++ GL wrapper generator."""
import argparse
import json
import os
import re
import xml.etree.ElementTree as ET
from collections import defaultdict
from config import (
EXTENSION_SUFFIXES,
RESERVED_NAMES,
FUNCTION_SUFFIXES,
HANDLE_TYPES,
EXCLUDED_ENUMS,
EXTRA_ENUM_GROUPS
)
import templates
import util
class ParsedNode:
"""XML element parsed into a node."""
def _parse_elem_text(self, generator, elem):
"""Parse XML element."""
if elem.tag == 'ptype':
self.ptype = elem.text
if self.ptype == 'GLbitfield' and self.group_type == 'bitmask':
self.enum_type = self.group
self.node_type = generator.to_type_name(self.enum_type)
elif self.ptype == 'GLenum':
if self.group == '':
self.node_type = self.ptype
else:
self.enum_type = self.group
self.node_type = generator.to_type_name(self.enum_type)
generator.used_enum_groups.add(self.group)
else:
self.node_type = self.ptype
self.wrapper_type.append(self.node_type)
self.native_type.append(self.ptype)
elif elem.tag == 'name':
self.node_name = elem.text
else:
print(f"Warning: Unknown node element: '{elem.tag}'")
def __init__(self, generator, node):
"""Contructor for XML element parsed node."""
self.group = ''
self.node_type = ''
self.node_name = ''
self.enum_type = ''
self.group_type = 'basic'
self.ptype = ''
self.wrapper_type = []
self.native_type = []
if 'group' in node.attrib:
self.group = node.attrib['group']
if self.group and self.group in generator.bitmask_groups:
self.group_type = 'bitmask'
generator.used_enum_groups.add(self.group)
if node.text:
self.wrapper_type.append(node.text)
self.native_type.append(node.text)
for elem in node:
if elem.text:
self._parse_elem_text(generator, elem)
if elem.tail:
self.wrapper_type.append(elem.tail)
self.native_type.append(elem.tail)
class Node:
"""Node is either one argument to Khronos API function or return value."""
def __init__(self, generator, node):
"""Contructor for node."""
parsed = ParsedNode(generator, node)
self.group = parsed.group
self.wrapper_type = ''.join(parsed.wrapper_type).strip()
self.native_type = ''.join(parsed.native_type).strip()
self.name = parsed.node_name
self.node_type = parsed.node_type
self.element_type = parsed.ptype
self.is_pointer = False
# Patch internalformat as special case
self.format_string = f'{parsed.node_name} = {{}}'
if (parsed.group == 'InternalFormat' and parsed.ptype == 'GLint'):
# Treat like enum type;
# Cast native type to strongly typed enum type and use c_str()
format_entry = 'gl::c_str(static_cast<gl::{0}>({{}}))'.format(
generator.to_type_name(parsed.group)
)
elif (
parsed.ptype in generator.pointer_types
or '*' in self.native_type
or parsed.ptype in HANDLE_TYPES
):
# Pointer types are formatted with fmt::ptr()
self.is_pointer = True
format_entry = 'fmt::ptr(reinterpret_cast<const void*>({}))'
elif parsed.enum_type != '':
if (
parsed.ptype == 'GLbitfield'
or parsed.enum_type in generator.bitmask_enums
):
# Bitmask types use to_string() which builds a temporary string
format_entry = 'gl::to_string({})'
else:
# Enum types:
# Cast native type to strongly typed enum type and use c_str()
#format_entry = 'gl::c_str({})'
format_entry = 'gl::c_str(static_cast<gl::{0}>({{}}))'.format(
generator.to_type_name(parsed.group)
)
elif parsed.node_type == 'GLboolean':
format_entry = 'gl::c_str({})'
elif parsed.node_type == 'GLbitfield':
format_entry = "{}"
elif parsed.ptype == 'GLenum':
format_entry = 'gl::enum_string({})'
elif parsed.ptype:
format_entry = "{}"
else:
format_entry = ''
self.format_string = ''
self.format_entry = format_entry.format(parsed.node_name)
class GLGenerator:
"""Convert Khronos gl.xml to C++ GL wrapper."""
def _read_json(self, filename):
"""Read json file relative to script path."""
path = os.path.join(self.script_path, filename)
try:
with open(path) as file:
return json.load(file)
except Exception as exception:
print('Error parsing {}: {}'.format(path, str(exception)))
raise
def _choose_enum_names(self, items):
"""Pick enum name."""
suffix_items = set()
non_suffix_items = set()
for item in sorted(items):
suffix_found = False
for suffix in EXTENSION_SUFFIXES:
if item.endswith(suffix):
suffix_found = True
suffix_items.add((item, item[:-len(suffix)]))
break
if not suffix_found:
non_suffix_items.add(item)
res = set()
for item_tuple in suffix_items:
match_found = False
for non_suffix in non_suffix_items:
if item_tuple[1] == non_suffix:
res.add(item_tuple[1])
match_found = True
break
if not match_found:
if item_tuple[0] in self.enum_list:
res.add(item_tuple[0])
for non_suffix in non_suffix_items:
if non_suffix in self.enum_list:
res.add(non_suffix)
return sorted(res)
@staticmethod
def split_to_body_and_ext(text):
"""Split GL extension name to body and extension."""
for suffix in EXTENSION_SUFFIXES:
suffix = suffix[1:]
if text.endswith(suffix):
return (text[:-len(suffix)], suffix)
return (text, '')
def to_type_name(self, text) -> str:
"""Generate wrapper name for type."""
return util.to_snake_case(self.split_to_body_and_ext(text)[0]).capitalize()
@staticmethod
def wrapper_function_name(text):
"""Generate wrappe name for function."""
text = GLGenerator.split_to_body_and_ext(text)
body = text[0]
ext = text[1]
for suffix, replacement in FUNCTION_SUFFIXES.items():
if body.endswith(suffix):
body = body[:-len(suffix)] + replacement
break
text = body + ext
res = util.to_snake_case(text[2:])
return res
def __init__(self, outpath):
"""Constructor for GLGenerator."""
self.script_path = os.path.dirname(os.path.realpath(__file__))
self.outpath = outpath
self.func_prefix = 'gl'
self.all_enum_string_cases = []
self.command_list = []
self.command_required_by_feature = defaultdict(list)
self.command_removed_by_feature = defaultdict(list)
self.command_required_by_extension = defaultdict(list)
self.command_enum_declarations = ''
self.command_map_entries = ''
self.command_case_entries = ''
self.command_info_entries = ''
self.extensions = []
self.extension_enum_declarations = ''
self.extension_map_entries = ''
self.extension_case_entries = ''
self.enum_helper_definitions = []
self.enum_list = []
self.enum_required_by_feature = defaultdict(list)
self.enum_removed_by_feature = defaultdict(list)
self.enum_required_by_extension = defaultdict(list)
self.enum_name_to_value_str = {} # key: string, value: string
self.enum_name_to_value = {} # key: string, value: int
self.enum_string_function_declarations = []
self.enum_string_function_definitions = []
self.untyped_enum_string_function_declarations = []
self.untyped_enum_string_function_definitions = []
self.enum_base_zero_function_declarations = []
self.enum_base_zero_function_definitions = []
self.get_proc_address_calls = []
self.group_to_enum_list = defaultdict(list)
self.wrapper_function_declarations = []
self.wrapper_function_definitions = []
self.wrapper_enum_declarations = []
self.used_enum_groups = set()
self.bitmask_groups = []
self.bitmask_enums = []
self.map_make_entries = []
self.dynamic_function_declarations = []
self.dynamic_function_get_statements = []
self.dynamic_function_definitions = []
self.pointer_types = []
self.roots = []
for filename in [ 'gl.xml', 'gl_extra.xml' ]:
try:
xml_path = os.path.join(self.script_path, filename)
tree = ET.parse(xml_path)
self.roots.append(tree.getroot())
except Exception as exception:
print('Error parsing {}: {}'.format(xml_path, str(exception)))
raise
@staticmethod
def get_text(node) -> str:
"""Recursively build strint contents from XML node."""
result = ''
if node.text:
result = node.text
for elem in node:
result += GLGenerator.get_text(elem)
if node.tail:
result += node.tail
return result
@staticmethod
def get_name(node) -> str:
"""Get name for XML node."""
if 'name' in node.attrib:
return node.attrib['name']
for elem in node:
if elem.tag == 'name':
return elem.text
return ''
def _parse_types(self):
"""Parse GL types from XML."""
for root in self.roots:
for types in root.iter('types'):
for node in types.iter('type'):
type_name = GLGenerator.get_name(node)
text = GLGenerator.get_text(node).strip()
if '*' in text and not text.startswith('struct'):
self.pointer_types.append(type_name)
def _parse_groups(self):
"""Parse GL enum groups from XML."""
for root in self.roots:
for group in root.iter('group'):
group_name = group.attrib.get('name', '')
for enum in group.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.group_to_enum_list[group_name].append(enum_name)
def _parse_enums(self):
"""Parse GL enums from XML."""
for root in self.roots:
for enums in root.iter('enums'):
enums_group = enums.attrib.get('group', '')
group_type = enums.attrib.get('type', '')
for enum in enums.iter('enum'):
value_str = enum.attrib['value']
enum_value = util.to_int(value_str)
enum_name = enum.attrib['name']
enum_type = enum.attrib.get('type', '')
if enum_name in EXCLUDED_ENUMS:
continue
if enums_group:
group = enums_group
else:
group = enum.attrib.get('group', '')
if group_type == 'bitmask':
self.bitmask_groups.append(group)
self.bitmask_enums.append(enum_name)
elif enum_type == 'bitmask':
self.bitmask_enums.append(enum_name)
self.enum_name_to_value[enum_name] = enum_value
self.enum_name_to_value_str[enum_name] = value_str
self.group_to_enum_list[group].append(enum_name)
def _parse_features(self):
"""Parse GL features from XML."""
for root in self.roots:
for feature in root.iter('feature'):
api = feature.attrib.get('api', '')
feature_name = feature.attrib.get('name', '')
feature_number = int(float(feature.attrib.get('number', '')) * 10.0)
# filter by api
if api != 'gl':
continue
for require in feature.iter('require'):
require_profile = require.attrib.get('profile', '')
if require_profile and require_profile != 'core':
# filter by profile
continue
for enum in require.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.enum_list.append(enum_name)
self.enum_required_by_feature[enum_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': require_profile
})
for command in require.iter('command'):
command_name = command.attrib['name']
self.command_list.append(command_name)
self.command_required_by_feature[command_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': require_profile
})
for remove in feature.iter('remove'):
remove_profile = remove.attrib.get('profile', '')
if require_profile and require_profile != 'core':
# filter by profile
continue
for enum in remove.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.enum_removed_by_feature[enum_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': remove_profile
})
for command in remove.iter('command'):
command_name = command.attrib['name']
self.command_removed_by_feature[command_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': remove_profile
})
ext_re = re.compile(r'GL_([0-9A-Z]+)_[0-9a-zA-Z_]*')
def _parse_extensions(self):
"""Parse GL extensions from XML."""
for root in self.roots:
for extensions in root.iter('extensions'):
for extension in extensions.iter('extension'):
extension_name = extension.attrib.get('name', '')
self.extensions.append(extension_name)
extension_apis = extension.attrib.get('supported', '')
extension_api_list = set(extension_apis.split('|'))
# filter by api
if 'gl' not in extension_apis:
continue
for require in extension.iter('require'):
for enum in require.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.enum_list.append(enum_name)
self.enum_required_by_extension[enum_name].append({
"name": extension_name,
"api_list": extension_api_list})
for command in require.iter('command'):
command_name = command.attrib['name']
self.command_list.append(command_name)
self.command_required_by_extension[command_name].append({
"name": extension_name,
"api_list": extension_api_list})
def _add_extra_enums(self):
"""Add extra enums from EXTRA_ENUM_GROUPS."""
for group_name, group in EXTRA_ENUM_GROUPS.items():
self.used_enum_groups.add(group_name)
for enum in group:
self.enum_list.append(enum)
self.group_to_enum_list[group_name].append(enum)
def _parse_node(self, node):
"""Parse XML node."""
return Node(self, node)
def _case_value(self, enum_name) -> str:
"""Generate enum name case value."""
return self.enum_name_to_value_str[enum_name]
@staticmethod
def get_command_name(command_element) -> str:
"""Get name for GL command."""
proto_element = command_element.find('proto')
for name in proto_element.iter('name'):
return name.text
return ''
def _collect_command(self, command_element):
"""Collect GL command information."""
command_name = GLGenerator.get_command_name(command_element)
if command_name not in self.command_list:
return
if not self._command_version_check(command_name):
return
func_prefix_len = len(self.func_prefix)
proto_element = command_element.find('proto')
proto_info = self._parse_node(proto_element)
native_name = proto_info.name
short_name = native_name[func_prefix_len:]
wrapper_name = GLGenerator.wrapper_function_name(command_name)
native_return_type = proto_info.native_type
wrapper_return_type = proto_info.wrapper_type
capture_result = ''
native_return_statement = ''
wrapper_return_statement = ''
if native_return_type != 'void':
capture_result = 'auto res = '
native_return_statement = ' return res;\n'
wrapper_return_statement = f' return static_cast<{wrapper_return_type}>(res);\n'
native_params = []
wrapper_params = []
format_strings = []
format_entries = []
argument_list = []
native_arg_type_list = []
wrapper_arg_type_list = []
for param in command_element.findall('param'):
param_info = self._parse_node(param)
param_name = param_info.name
native_params.append(param_info.native_type + ' ' + param_name)
wrapper_params.append(param_info.wrapper_type + ' ' + param_name)
native_arg_type_list.append(param_info.native_type)
wrapper_arg_type_list.append(param_info.wrapper_type)
format_strings.append(param_info.format_string)
format_entries.append(param_info.format_entry)
if param_info.is_pointer:
argument_list.append(f'reinterpret_cast<{param_info.native_type}>({param_name})')
else:
argument_list.append(f'static_cast<{param_info.native_type}>({param_name})')
log_format_entries = ''
if len(format_entries) > 0:
separator = ',\n '
log_format_entries = separator + separator.join(format_entries)
formatting = {
'COMMAND_NAME': command_name,
'SHORT_NAME': short_name,
'NATIVE_RETURN_TYPE': native_return_type.strip(),
'NATIVE_NAME': native_name,
'NATIVE_ARGUMENTS': ', '.join(native_params),
'NATIVE_ARG_TYPE_LIST': ', '.join(native_arg_type_list),
'NATIVE_RETURN_STATEMENT': native_return_statement,
'COMMAND_VERSION': self._command_version(command_name),
'WRAPPER_RETURN_TYPE': wrapper_return_type.strip(),
'WRAPPER_NAME': wrapper_name,
'WRAPPER_ARGUMENTS': ', '.join(wrapper_params),
'WRAPPER_ARG_TYPE_LIST': ', '.join(wrapper_arg_type_list),
'WRAPPER_RETURN_STATEMENT': wrapper_return_statement,
'LOG_FORMAT_STRING': ', '.join(format_strings),
'LOG_FORMAT_ENTRIES': log_format_entries,
'CAPTURE_RESULT': capture_result,
'ARGUMENT_LIST': ', '.join(argument_list),
}
wrapper_function_declaration = templates.WRAPPER_FUNCTION_DECLARATION.format(**formatting)
self.wrapper_function_declarations.append(wrapper_function_declaration)
wrapper_function_definition = templates.WRAPPER_FUNCTION_DEFINITION.format(**formatting)
self.wrapper_function_definitions.append(wrapper_function_definition)
for feature in self.command_required_by_feature.get(command_name, []):
number = feature["number"]
self.command_info_entries += (
f' check_version(Command::Command_{command_name}, {number});\n'
)
for extension in self.command_required_by_extension.get(command_name, []):
extension_name = extension["name"]
self.command_info_entries += (
f' check_extension(Command::Command_{command_name}, '
f'Extension::Extension_{extension_name});\n'
)
decl_entry = templates.DYNAMIC_LOAD_FUNCTION_DECLARATION .format(**formatting)
defn_entry = templates.DYNAMIC_LOAD_FUNCTION_DEFINITION .format(**formatting)
get_entry = templates.DYNAMIC_LOAD_FUNCTION_GET_STATEMENT.format(**formatting)
self.dynamic_function_declarations .append(decl_entry)
self.dynamic_function_definitions .append(defn_entry)
self.dynamic_function_get_statements.append(get_entry)
def _parse_and_build_commands(self):
"""Parse and process GL commands from XML."""
for root in self.roots:
for commands in root.iter('commands'):
for command_element in commands.iter('command'):
try:
self._collect_command(command_element)
except Exception as exception:
command_name = GLGenerator.get_command_name(command_element)
print('Error processing command {}: {}'.format(command_name, str(exception)))
raise
extension_name_max_len = 0
for extension in self.extensions:
extension_name_max_len = max(extension_name_max_len, len(extension))
enum_value = 1
declarations = []
map_entries = []
case_entries = []
for extension in sorted(set(self.extensions)):
quoted_extension = '"' + extension + '"'
declaration = f' Extension_{extension:{extension_name_max_len}} = {enum_value:>6}'
map_entry = ' {{ {0:{1}}, Extension::Extension_{2:{3}} }}'.format(
quoted_extension, extension_name_max_len + 2, extension, extension_name_max_len
)
case_entry = ' case Extension::Extension_{0:{1}}: return "{0}";'.format(
extension, extension_name_max_len
)
declarations.append(declaration)
map_entries.append (map_entry)
case_entries.append(case_entry)
enum_value += 1
declarations.append(f' Extension_Count = {enum_value:>6}')
self.extension_enum_declarations = ',\n'.join(declarations)
self.extension_map_entries = ',\n'.join(map_entries)
self.extension_case_entries = '\n'.join(case_entries)
commands = set(self.command_list)
commands = sorted(commands)
command_name_max_len = 0
for command in commands:
command_name_max_len = max(command_name_max_len, len(command))
enum_value = 1
declarations = []
map_entries = []
case_entries = []
for command in commands:
declaration = f' Command_{command:{command_name_max_len}} = {enum_value:>6}'
map_entry = ' {{ "{0:{1}}", Command::Command_{0:{1}} }}'.format(
command, command_name_max_len
)
case_entry = ' case Command::Command_{0:{1}}: return "{0}";'.format(
command, command_name_max_len
)
declarations.append(declaration)
map_entries.append (map_entry)
case_entries.append(case_entry)
enum_value += 1
declarations.append(' Command_Count = {:>6}'.format(enum_value))
self.command_enum_declarations = ',\n'.join(declarations)
self.command_map_entries = ',\n'.join(map_entries)
self.command_case_entries = '\n'.join(case_entries)
def _enum_version_check(self, enum):
"""Check if GL enum is required and not removed."""
last_require_version = 0
for feature in self.enum_required_by_feature[enum]:
last_require_version = max(last_require_version, feature['number'])
last_remove_version = 0
for feature in self.enum_removed_by_feature[enum]:
last_remove_version = max(last_remove_version, feature['number'])
# filter by command not required by core profile
if last_require_version == 0:
return False
# filter by removed
if last_remove_version > last_require_version:
return False
return True
def _enum_version(self, name):
"""Get GL enum version."""
last_remove_version = 0
for feature in self.enum_removed_by_feature[name]:
last_remove_version = max(last_remove_version, feature['number'])
earliest_non_remove_version_number = 9999
for feature in self.enum_required_by_feature[name]:
number = feature['number']
if number > last_remove_version:
if number < earliest_non_remove_version_number:
earliest_non_remove_version_number = number
version = feature['name']
return version
def _command_version(self, name):
"""Get GL command version."""
last_remove_version = 0
for feature in self.command_removed_by_feature[name]:
last_remove_version = max(last_remove_version, feature['number'])
earliest_non_remove_version_number = 9999
version = ''
for feature in self.command_required_by_feature[name]:
number = feature['number']
if number > last_remove_version:
if number < earliest_non_remove_version_number:
earliest_non_remove_version_number = number
version = feature['name']
return version
def _command_version_check(self, command_name):
"""Check if GL command is required and not removed."""
last_require_version = 0
for feature in self.command_required_by_feature[command_name]:
last_require_version = max(last_require_version, feature['number'])
last_remove_version = 0
for feature in self.command_removed_by_feature[command_name]:
last_remove_version = max(last_remove_version, feature['number'])
# filter by command not required by core profile
if last_require_version == 0:
return False
# filter by removed
if last_remove_version > last_require_version:
return False
return True
def _build_all_enums(self):
"""Parse and process GL enums."""
uniq_enums = []
used_values = set()
enum_value_to_name_list = defaultdict(set) # key: int, value: list of strings (enum name
for enum in self.enum_list:
if enum in self.bitmask_enums:
continue
if enum not in self.enum_name_to_value:
print(f'Warning: enum {enum} has no value')
continue
if not self._enum_version_check(enum):
continue
value = self.enum_name_to_value[enum]
enum_value_to_name_list[value].add(enum)
if value in used_values:
continue
uniq_enums.append((value, enum))
used_values.add(value)
uniq_enums.sort()
for value, enum in uniq_enums:
name_list = self._choose_enum_names(enum_value_to_name_list[value])
if name_list:
list_str = ' / '.join(name_list)
enum_value_str = self._case_value(enum)
case = f' case {enum_value_str}: return "{list_str}";'
self.all_enum_string_cases.append(case)
self.map_make_entries.append(f' {{ "{enum}", {enum_value_str} }}')
def _build_enum_groups(self):
"""Parse and process GL enums by."""
for group in self.used_enum_groups:
if group not in self.group_to_enum_list:
print(f'Warning: Enum group {group} has no values defined.')
continue
group_type = 'basic'
if group in self.bitmask_groups:
group_type = 'bitmask'
values_used = set()
enum_name_list = self.group_to_enum_list[group]
enum_tuple_list = []
for enum_name in enum_name_list:
if enum_name not in self.enum_list:
continue
if enum_name not in self.enum_name_to_value:
print(f'Warning: enum {enum_name} has no value')
continue
if not self._enum_version_check(enum_name):
continue
enum_value = self.enum_name_to_value[enum_name]
if enum_value not in values_used:
enum_value_str = self._case_value(enum_name)
enum_tuple_list.append((enum_value, enum_value_str, enum_name))
values_used.add(enum_value)
if enum_name in self.bitmask_enums:
group_type = 'bitmask'
enum_tuple_list.sort()
group_max_len = 0
for enum_info in enum_tuple_list:
wrapper_enum_value_name = enum_info[2][3:].lower()
if wrapper_enum_value_name in RESERVED_NAMES:
wrapper_enum_value_name = wrapper_enum_value_name + '_'
if len(wrapper_enum_value_name) > group_max_len:
group_max_len = len(wrapper_enum_value_name)
group_enum_string_entries = []
group_enum_base_zero_entries = []
group_wrapper_enum_value_definitions = []
base_zero_value = 0
for enum_info in enum_tuple_list:
wrapper_enum_value_name = enum_info[2][3:].lower()
if wrapper_enum_value_name in RESERVED_NAMES:
wrapper_enum_value_name = wrapper_enum_value_name + '_'
formatting = {
'ENUM_VALUE': enum_info[1],
'ENUM_STRING': enum_info[2],
'ENUM_BASE_ZERO_VALUE': base_zero_value,
'ENUM_VERSION': self._enum_version(enum_info[2])
}
string_entry = templates.ENUM_STRING_MAKE_ENTRY[group_type].format(**formatting)
group_enum_string_entries.append(string_entry)
group_wrapper_enum_value_definitions.append(
' {:{}} = {:>6}u /* {} */'.format(
wrapper_enum_value_name,
group_max_len,
enum_info[1],
self._enum_version(enum_info[2])))
if group_type == 'basic':
base_zero_make_entry = templates.ENUM_BASE_ZERO_MAKE_ENTRY.format(**formatting)
group_enum_base_zero_entries.append(base_zero_make_entry)
base_zero_value = base_zero_value + 1
if enum_tuple_list:
wrapper_enum_name = self.to_type_name(group)
definitions = ',\n'.join(sorted(group_wrapper_enum_value_definitions))
string_entries = '\n'.join(sorted(group_enum_string_entries))
formatting = {
'WRAPPER_ENUM_TYPE_NAME': wrapper_enum_name,
'WRAPPER_ENUM_STRING_FN_NAME': self.split_to_body_and_ext(group)[0],
'GROUP_NAME': group,
'WRAPPER_ENUM_VALUE_DEFINITIONS': definitions,
'GROUP_ENUM_STRING_ENTRIES': string_entries,
'GROUP_ENUM_BASE_ZERO_ENTRIES': '\n'.join(group_enum_base_zero_entries),
}
if group_type == 'basic':
self.enum_base_zero_function_declarations.append(
templates.ENUM_BASE_ZERO_FUNCTION_DECLARATION.format(**formatting)
)
self.enum_base_zero_function_definitions.append(
templates.ENUM_BASE_ZERO_FUNCTION_DEFINITION.format(**formatting)
)
self.untyped_enum_string_function_declarations.append(
templates.UNTYPED_ENUM_STRING_FUNCTION_DECLARATION.format(**formatting)
)
self.untyped_enum_string_function_definitions.append(
templates.UNTYPED_ENUM_STRING_FUNCTION_DEFINITION.format(**formatting)
)
self.enum_string_function_declarations.append(
templates.ENUM_STRING_FUNCTION_DECLARATION[group_type].format(**formatting)
)
self.enum_string_function_definitions.append(
templates.ENUM_STRING_FUNCTION_DEFINITION[group_type].format(**formatting)
)
self.wrapper_enum_declarations.append(
templates.WRAPPER_ENUM_DECLARATION.format(**formatting)
)
self.enum_helper_definitions.append(
templates.ENUM_HELPER_DEFINITION[group_type].format(**formatting)
)
def _generate_files(self):
"""Write output files."""
formatters = {
'AUTOGENERATION_WARNING': templates.AUTOGENERATION_WARNING,
'COMMAND_INFO_H': 'command_info.h',
'MAP_MAKE_ENTRIES': ',\n'.join(sorted(self.map_make_entries)),
'WRAPPER_FUNCTION_DEFINITIONS': '\n'.join(self.wrapper_function_definitions),
'WRAPPER_FUNCTION_DECLARATIONS': '\n'.join(self.wrapper_function_declarations),
'WRAPPER_ENUM_DECLARATIONS': util.sjoin(self.wrapper_enum_declarations),
'ENUM_STRING_FUNCTION_DECLARATIONS': util.sjoin(self.enum_string_function_declarations),
'ENUM_STRING_FUNCTION_DEFINITIONS': util.sjoin(self.enum_string_function_definitions),
'UNTYPED_ENUM_STRING_FUNCTION_DECLARATIONS': util.sjoin(self.untyped_enum_string_function_declarations),
'UNTYPED_ENUM_STRING_FUNCTION_DEFINITIONS': util.sjoin(self.untyped_enum_string_function_definitions),
'ENUM_BASE_ZERO_FUNCTION_DECLARATIONS': util.sjoin(self.enum_base_zero_function_declarations),
'ENUM_BASE_ZERO_FUNCTION_DEFINITIONS': util.sjoin(self.enum_base_zero_function_definitions),
'ENUM_HELPER_DEFINITIONS': util.sjoin(self.enum_helper_definitions),
'ALL_ENUM_STRING_CASES': util.sjoin(self.all_enum_string_cases),
'DYNAMIC_FUNCTION_DECLARATIONS': '\n'.join(self.dynamic_function_declarations),
'DYNAMIC_FUNCTION_DEFINITIONS': '\n'.join(self.dynamic_function_definitions),
'DYNAMIC_FUNCTION_GET_STATEMENTS': '\n '.join(self.dynamic_function_get_statements),
'EXTENSION_ENUM_DECLARATIONS': self.extension_enum_declarations,
'EXTENSION_MAP_ENTRIES': self.extension_map_entries,
'EXTENSION_CASE_ENTRIES': self.extension_case_entries,
'COMMAND_ENUM_DECLARATIONS': self.command_enum_declarations,
'COMMAND_MAP_ENTRIES': self.command_map_entries,
'COMMAND_CASE_ENTRIES': self.command_case_entries,
'COMMAND_INFO_ENTRIES': self.command_info_entries,
}
content = {
'command_info.cpp': templates.COMMAND_INFO_CPP,
'command_info.hpp': templates.COMMAND_INFO_HPP,
'dynamic_load.hpp': templates.DYNAMIC_LOAD_HPP,
'dynamic_load.cpp': templates.DYNAMIC_LOAD_CPP,
'enum_base_zero_functions.hpp': templates.ENUM_BASE_ZERO_FUNCTIONS_HPP,
'enum_base_zero_functions.cpp': templates.ENUM_BASE_ZERO_FUNCTIONS_CPP,
'enum_string_functions.hpp': templates.ENUM_STRING_FUNCTIONS_HPP,
'enum_string_functions.cpp': templates.ENUM_STRING_FUNCTIONS_CPP,
'wrapper_enums.hpp': templates.WRAPPER_ENUMS_HPP,
'wrapper_functions.cpp': templates.WRAPPER_FUNCTIONS_CPP,
'wrapper_functions.hpp': templates.WRAPPER_FUNCTIONS_HPP
}
os.makedirs(self.outpath, exist_ok=True)
for filename, template in content.items():
filename = os.path.join(self.outpath, filename)
print('GEN\t{}'.format(os.path.basename(filename)))
try:
with open(filename, 'w') as out_file:
try:
out_file.write(template.format(**formatters))
except Exception:
print(f'template = {template}')
raise
except Exception as exception:
print('Writing {} failed: {}'.format(filename, (exception)))
raise
def generate(self):
"""Pipeline parsing input XML and generating output."""
try:
self._parse_groups()
self._parse_types()
self._parse_enums()
self._parse_features()
self._parse_extensions()
self._add_extra_enums()
self._parse_and_build_commands()
self._build_all_enums()
self._build_enum_groups()
self._generate_files()
except Exception as exception:
print('Generate failed: {}'.format(str(exception)))
raise
def main():
"""Entry function."""
parser = argparse.ArgumentParser(description='Generate GL wrapper from gl.xml')
parser.add_argument('outpath', help='Output path')
args = parser.parse_args()
generator = GLGenerator(args.outpath)
generator.generate()
print('Done.')
main() | src/erhe/gl/generate_sources.py | """Khronos OpenGL gl.xml to C++ GL wrapper generator."""
import argparse
import json
import os
import re
import xml.etree.ElementTree as ET
from collections import defaultdict
from config import (
EXTENSION_SUFFIXES,
RESERVED_NAMES,
FUNCTION_SUFFIXES,
HANDLE_TYPES,
EXCLUDED_ENUMS,
EXTRA_ENUM_GROUPS
)
import templates
import util
class ParsedNode:
"""XML element parsed into a node."""
def _parse_elem_text(self, generator, elem):
"""Parse XML element."""
if elem.tag == 'ptype':
self.ptype = elem.text
if self.ptype == 'GLbitfield' and self.group_type == 'bitmask':
self.enum_type = self.group
self.node_type = generator.to_type_name(self.enum_type)
elif self.ptype == 'GLenum':
if self.group == '':
self.node_type = self.ptype
else:
self.enum_type = self.group
self.node_type = generator.to_type_name(self.enum_type)
generator.used_enum_groups.add(self.group)
else:
self.node_type = self.ptype
self.wrapper_type.append(self.node_type)
self.native_type.append(self.ptype)
elif elem.tag == 'name':
self.node_name = elem.text
else:
print(f"Warning: Unknown node element: '{elem.tag}'")
def __init__(self, generator, node):
"""Contructor for XML element parsed node."""
self.group = ''
self.node_type = ''
self.node_name = ''
self.enum_type = ''
self.group_type = 'basic'
self.ptype = ''
self.wrapper_type = []
self.native_type = []
if 'group' in node.attrib:
self.group = node.attrib['group']
if self.group and self.group in generator.bitmask_groups:
self.group_type = 'bitmask'
generator.used_enum_groups.add(self.group)
if node.text:
self.wrapper_type.append(node.text)
self.native_type.append(node.text)
for elem in node:
if elem.text:
self._parse_elem_text(generator, elem)
if elem.tail:
self.wrapper_type.append(elem.tail)
self.native_type.append(elem.tail)
class Node:
"""Node is either one argument to Khronos API function or return value."""
def __init__(self, generator, node):
"""Contructor for node."""
parsed = ParsedNode(generator, node)
self.group = parsed.group
self.wrapper_type = ''.join(parsed.wrapper_type).strip()
self.native_type = ''.join(parsed.native_type).strip()
self.name = parsed.node_name
self.node_type = parsed.node_type
self.element_type = parsed.ptype
self.is_pointer = False
# Patch internalformat as special case
self.format_string = f'{parsed.node_name} = {{}}'
if (parsed.group == 'InternalFormat' and parsed.ptype == 'GLint'):
# Treat like enum type;
# Cast native type to strongly typed enum type and use c_str()
format_entry = 'gl::c_str(static_cast<gl::{0}>({{}}))'.format(
generator.to_type_name(parsed.group)
)
elif (
parsed.ptype in generator.pointer_types
or '*' in self.native_type
or parsed.ptype in HANDLE_TYPES
):
# Pointer types are formatted with fmt::ptr()
self.is_pointer = True
format_entry = 'fmt::ptr(reinterpret_cast<const void*>({}))'
elif parsed.enum_type != '':
if (
parsed.ptype == 'GLbitfield'
or parsed.enum_type in generator.bitmask_enums
):
# Bitmask types use to_string() which builds a temporary string
format_entry = 'gl::to_string({})'
else:
# Enum types:
# Cast native type to strongly typed enum type and use c_str()
#format_entry = 'gl::c_str({})'
format_entry = 'gl::c_str(static_cast<gl::{0}>({{}}))'.format(
generator.to_type_name(parsed.group)
)
elif parsed.node_type == 'GLboolean':
format_entry = 'gl::c_str({})'
elif parsed.node_type == 'GLbitfield':
format_entry = "{}"
elif parsed.ptype == 'GLenum':
format_entry = 'gl::enum_string({})'
elif parsed.ptype:
format_entry = "{}"
else:
format_entry = ''
self.format_string = ''
self.format_entry = format_entry.format(parsed.node_name)
class GLGenerator:
"""Convert Khronos gl.xml to C++ GL wrapper."""
def _read_json(self, filename):
"""Read json file relative to script path."""
path = os.path.join(self.script_path, filename)
try:
with open(path) as file:
return json.load(file)
except Exception as exception:
print('Error parsing {}: {}'.format(path, str(exception)))
raise
def _choose_enum_names(self, items):
"""Pick enum name."""
suffix_items = set()
non_suffix_items = set()
for item in sorted(items):
suffix_found = False
for suffix in EXTENSION_SUFFIXES:
if item.endswith(suffix):
suffix_found = True
suffix_items.add((item, item[:-len(suffix)]))
break
if not suffix_found:
non_suffix_items.add(item)
res = set()
for item_tuple in suffix_items:
match_found = False
for non_suffix in non_suffix_items:
if item_tuple[1] == non_suffix:
res.add(item_tuple[1])
match_found = True
break
if not match_found:
if item_tuple[0] in self.enum_list:
res.add(item_tuple[0])
for non_suffix in non_suffix_items:
if non_suffix in self.enum_list:
res.add(non_suffix)
return sorted(res)
@staticmethod
def split_to_body_and_ext(text):
"""Split GL extension name to body and extension."""
for suffix in EXTENSION_SUFFIXES:
suffix = suffix[1:]
if text.endswith(suffix):
return (text[:-len(suffix)], suffix)
return (text, '')
def to_type_name(self, text) -> str:
"""Generate wrapper name for type."""
return util.to_snake_case(self.split_to_body_and_ext(text)[0]).capitalize()
@staticmethod
def wrapper_function_name(text):
"""Generate wrappe name for function."""
text = GLGenerator.split_to_body_and_ext(text)
body = text[0]
ext = text[1]
for suffix, replacement in FUNCTION_SUFFIXES.items():
if body.endswith(suffix):
body = body[:-len(suffix)] + replacement
break
text = body + ext
res = util.to_snake_case(text[2:])
return res
def __init__(self, outpath):
"""Constructor for GLGenerator."""
self.script_path = os.path.dirname(os.path.realpath(__file__))
self.outpath = outpath
self.func_prefix = 'gl'
self.all_enum_string_cases = []
self.command_list = []
self.command_required_by_feature = defaultdict(list)
self.command_removed_by_feature = defaultdict(list)
self.command_required_by_extension = defaultdict(list)
self.command_enum_declarations = ''
self.command_map_entries = ''
self.command_case_entries = ''
self.command_info_entries = ''
self.extensions = []
self.extension_enum_declarations = ''
self.extension_map_entries = ''
self.extension_case_entries = ''
self.enum_helper_definitions = []
self.enum_list = []
self.enum_required_by_feature = defaultdict(list)
self.enum_removed_by_feature = defaultdict(list)
self.enum_required_by_extension = defaultdict(list)
self.enum_name_to_value_str = {} # key: string, value: string
self.enum_name_to_value = {} # key: string, value: int
self.enum_string_function_declarations = []
self.enum_string_function_definitions = []
self.untyped_enum_string_function_declarations = []
self.untyped_enum_string_function_definitions = []
self.enum_base_zero_function_declarations = []
self.enum_base_zero_function_definitions = []
self.get_proc_address_calls = []
self.group_to_enum_list = defaultdict(list)
self.wrapper_function_declarations = []
self.wrapper_function_definitions = []
self.wrapper_enum_declarations = []
self.used_enum_groups = set()
self.bitmask_groups = []
self.bitmask_enums = []
self.map_make_entries = []
self.dynamic_function_declarations = []
self.dynamic_function_get_statements = []
self.dynamic_function_definitions = []
self.pointer_types = []
self.roots = []
for filename in [ 'gl.xml', 'gl_extra.xml' ]:
try:
xml_path = os.path.join(self.script_path, filename)
tree = ET.parse(xml_path)
self.roots.append(tree.getroot())
except Exception as exception:
print('Error parsing {}: {}'.format(xml_path, str(exception)))
raise
@staticmethod
def get_text(node) -> str:
"""Recursively build strint contents from XML node."""
result = ''
if node.text:
result = node.text
for elem in node:
result += GLGenerator.get_text(elem)
if node.tail:
result += node.tail
return result
@staticmethod
def get_name(node) -> str:
"""Get name for XML node."""
if 'name' in node.attrib:
return node.attrib['name']
for elem in node:
if elem.tag == 'name':
return elem.text
return ''
def _parse_types(self):
"""Parse GL types from XML."""
for root in self.roots:
for types in root.iter('types'):
for node in types.iter('type'):
type_name = GLGenerator.get_name(node)
text = GLGenerator.get_text(node).strip()
if '*' in text and not text.startswith('struct'):
self.pointer_types.append(type_name)
def _parse_groups(self):
"""Parse GL enum groups from XML."""
for root in self.roots:
for group in root.iter('group'):
group_name = group.attrib.get('name', '')
for enum in group.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.group_to_enum_list[group_name].append(enum_name)
def _parse_enums(self):
"""Parse GL enums from XML."""
for root in self.roots:
for enums in root.iter('enums'):
enums_group = enums.attrib.get('group', '')
group_type = enums.attrib.get('type', '')
for enum in enums.iter('enum'):
value_str = enum.attrib['value']
enum_value = util.to_int(value_str)
enum_name = enum.attrib['name']
enum_type = enum.attrib.get('type', '')
if enum_name in EXCLUDED_ENUMS:
continue
if enums_group:
group = enums_group
else:
group = enum.attrib.get('group', '')
if group_type == 'bitmask':
self.bitmask_groups.append(group)
self.bitmask_enums.append(enum_name)
elif enum_type == 'bitmask':
self.bitmask_enums.append(enum_name)
self.enum_name_to_value[enum_name] = enum_value
self.enum_name_to_value_str[enum_name] = value_str
self.group_to_enum_list[group].append(enum_name)
def _parse_features(self):
"""Parse GL features from XML."""
for root in self.roots:
for feature in root.iter('feature'):
api = feature.attrib.get('api', '')
feature_name = feature.attrib.get('name', '')
feature_number = int(float(feature.attrib.get('number', '')) * 10.0)
# filter by api
if api != 'gl':
continue
for require in feature.iter('require'):
require_profile = require.attrib.get('profile', '')
if require_profile and require_profile != 'core':
# filter by profile
continue
for enum in require.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.enum_list.append(enum_name)
self.enum_required_by_feature[enum_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': require_profile
})
for command in require.iter('command'):
command_name = command.attrib['name']
self.command_list.append(command_name)
self.command_required_by_feature[command_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': require_profile
})
for remove in feature.iter('remove'):
remove_profile = remove.attrib.get('profile', '')
if require_profile and require_profile != 'core':
# filter by profile
continue
for enum in remove.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.enum_removed_by_feature[enum_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': remove_profile
})
for command in remove.iter('command'):
command_name = command.attrib['name']
self.command_removed_by_feature[command_name].append({
'api': api,
'name': feature_name,
'number': feature_number,
'profile': remove_profile
})
ext_re = re.compile(r'GL_([0-9A-Z]+)_[0-9a-zA-Z_]*')
def _parse_extensions(self):
"""Parse GL extensions from XML."""
for root in self.roots:
for extensions in root.iter('extensions'):
for extension in extensions.iter('extension'):
extension_name = extension.attrib.get('name', '')
self.extensions.append(extension_name)
extension_apis = extension.attrib.get('supported', '')
extension_api_list = set(extension_apis.split('|'))
# filter by api
if 'gl' not in extension_apis:
continue
for require in extension.iter('require'):
for enum in require.iter('enum'):
enum_name = enum.attrib.get('name', '')
self.enum_list.append(enum_name)
self.enum_required_by_extension[enum_name].append({
"name": extension_name,
"api_list": extension_api_list})
for command in require.iter('command'):
command_name = command.attrib['name']
self.command_list.append(command_name)
self.command_required_by_extension[command_name].append({
"name": extension_name,
"api_list": extension_api_list})
def _add_extra_enums(self):
"""Add extra enums from EXTRA_ENUM_GROUPS."""
for group_name, group in EXTRA_ENUM_GROUPS.items():
self.used_enum_groups.add(group_name)
for enum in group:
self.enum_list.append(enum)
self.group_to_enum_list[group_name].append(enum)
def _parse_node(self, node):
"""Parse XML node."""
return Node(self, node)
def _case_value(self, enum_name) -> str:
"""Generate enum name case value."""
return self.enum_name_to_value_str[enum_name]
@staticmethod
def get_command_name(command_element) -> str:
"""Get name for GL command."""
proto_element = command_element.find('proto')
for name in proto_element.iter('name'):
return name.text
return ''
def _collect_command(self, command_element):
"""Collect GL command information."""
command_name = GLGenerator.get_command_name(command_element)
if command_name not in self.command_list:
return
if not self._command_version_check(command_name):
return
func_prefix_len = len(self.func_prefix)
proto_element = command_element.find('proto')
proto_info = self._parse_node(proto_element)
native_name = proto_info.name
short_name = native_name[func_prefix_len:]
wrapper_name = GLGenerator.wrapper_function_name(command_name)
native_return_type = proto_info.native_type
wrapper_return_type = proto_info.wrapper_type
capture_result = ''
native_return_statement = ''
wrapper_return_statement = ''
if native_return_type != 'void':
capture_result = 'auto res = '
native_return_statement = ' return res;\n'
wrapper_return_statement = f' return static_cast<{wrapper_return_type}>(res);\n'
native_params = []
wrapper_params = []
format_strings = []
format_entries = []
argument_list = []
native_arg_type_list = []
wrapper_arg_type_list = []
for param in command_element.findall('param'):
param_info = self._parse_node(param)
param_name = param_info.name
native_params.append(param_info.native_type + ' ' + param_name)
wrapper_params.append(param_info.wrapper_type + ' ' + param_name)
native_arg_type_list.append(param_info.native_type)
wrapper_arg_type_list.append(param_info.wrapper_type)
format_strings.append(param_info.format_string)
format_entries.append(param_info.format_entry)
if param_info.is_pointer:
argument_list.append(f'reinterpret_cast<{param_info.native_type}>({param_name})')
else:
argument_list.append(f'static_cast<{param_info.native_type}>({param_name})')
log_format_entries = ''
if len(format_entries) > 0:
separator = ',\n '
log_format_entries = separator + separator.join(format_entries)
formatting = {
'COMMAND_NAME': command_name,
'SHORT_NAME': short_name,
'NATIVE_RETURN_TYPE': native_return_type.strip(),
'NATIVE_NAME': native_name,
'NATIVE_ARGUMENTS': ', '.join(native_params),
'NATIVE_ARG_TYPE_LIST': ', '.join(native_arg_type_list),
'NATIVE_RETURN_STATEMENT': native_return_statement,
'COMMAND_VERSION': self._command_version(command_name),
'WRAPPER_RETURN_TYPE': wrapper_return_type.strip(),
'WRAPPER_NAME': wrapper_name,
'WRAPPER_ARGUMENTS': ', '.join(wrapper_params),
'WRAPPER_ARG_TYPE_LIST': ', '.join(wrapper_arg_type_list),
'WRAPPER_RETURN_STATEMENT': wrapper_return_statement,
'LOG_FORMAT_STRING': ', '.join(format_strings),
'LOG_FORMAT_ENTRIES': log_format_entries,
'CAPTURE_RESULT': capture_result,
'ARGUMENT_LIST': ', '.join(argument_list),
}
wrapper_function_declaration = templates.WRAPPER_FUNCTION_DECLARATION.format(**formatting)
self.wrapper_function_declarations.append(wrapper_function_declaration)
wrapper_function_definition = templates.WRAPPER_FUNCTION_DEFINITION.format(**formatting)
self.wrapper_function_definitions.append(wrapper_function_definition)
for feature in self.command_required_by_feature.get(command_name, []):
number = feature["number"]
self.command_info_entries += (
f' check_version(Command::Command_{command_name}, {number});\n'
)
for extension in self.command_required_by_extension.get(command_name, []):
extension_name = extension["name"]
self.command_info_entries += (
f' check_extension(Command::Command_{command_name}, '
f'Extension::Extension_{extension_name});\n'
)
decl_entry = templates.DYNAMIC_LOAD_FUNCTION_DECLARATION .format(**formatting)
defn_entry = templates.DYNAMIC_LOAD_FUNCTION_DEFINITION .format(**formatting)
get_entry = templates.DYNAMIC_LOAD_FUNCTION_GET_STATEMENT.format(**formatting)
self.dynamic_function_declarations .append(decl_entry)
self.dynamic_function_definitions .append(defn_entry)
self.dynamic_function_get_statements.append(get_entry)
def _parse_and_build_commands(self):
"""Parse and process GL commands from XML."""
for root in self.roots:
for commands in root.iter('commands'):
for command_element in commands.iter('command'):
try:
self._collect_command(command_element)
except Exception as exception:
command_name = GLGenerator.get_command_name(command_element)
print('Error processing command {}: {}'.format(command_name, str(exception)))
raise
extension_name_max_len = 0
for extension in self.extensions:
extension_name_max_len = max(extension_name_max_len, len(extension))
enum_value = 1
declarations = []
map_entries = []
case_entries = []
for extension in sorted(set(self.extensions)):
quoted_extension = '"' + extension + '"'
declaration = f' Extension_{extension:{extension_name_max_len}} = {enum_value:>6}'
map_entry = ' {{ {0:{1}}, Extension::Extension_{2:{3}} }}'.format(
quoted_extension, extension_name_max_len + 2, extension, extension_name_max_len
)
case_entry = ' case Extension::Extension_{0:{1}}: return "{0}";'.format(
extension, extension_name_max_len
)
declarations.append(declaration)
map_entries.append (map_entry)
case_entries.append(case_entry)
enum_value += 1
declarations.append(f' Extension_Count = {enum_value:>6}')
self.extension_enum_declarations = ',\n'.join(declarations)
self.extension_map_entries = ',\n'.join(map_entries)
self.extension_case_entries = '\n'.join(case_entries)
commands = set(self.command_list)
commands = sorted(commands)
command_name_max_len = 0
for command in commands:
command_name_max_len = max(command_name_max_len, len(command))
enum_value = 1
declarations = []
map_entries = []
case_entries = []
for command in commands:
declaration = f' Command_{command:{command_name_max_len}} = {enum_value:>6}'
map_entry = ' {{ "{0:{1}}", Command::Command_{0:{1}} }}'.format(
command, command_name_max_len
)
case_entry = ' case Command::Command_{0:{1}}: return "{0}";'.format(
command, command_name_max_len
)
declarations.append(declaration)
map_entries.append (map_entry)
case_entries.append(case_entry)
enum_value += 1
declarations.append(' Command_Count = {:>6}'.format(enum_value))
self.command_enum_declarations = ',\n'.join(declarations)
self.command_map_entries = ',\n'.join(map_entries)
self.command_case_entries = '\n'.join(case_entries)
def _enum_version_check(self, enum):
"""Check if GL enum is required and not removed."""
last_require_version = 0
for feature in self.enum_required_by_feature[enum]:
last_require_version = max(last_require_version, feature['number'])
last_remove_version = 0
for feature in self.enum_removed_by_feature[enum]:
last_remove_version = max(last_remove_version, feature['number'])
# filter by command not required by core profile
if last_require_version == 0:
return False
# filter by removed
if last_remove_version > last_require_version:
return False
return True
def _enum_version(self, name):
"""Get GL enum version."""
last_remove_version = 0
for feature in self.enum_removed_by_feature[name]:
last_remove_version = max(last_remove_version, feature['number'])
earliest_non_remove_version_number = 9999
for feature in self.enum_required_by_feature[name]:
number = feature['number']
if number > last_remove_version:
if number < earliest_non_remove_version_number:
earliest_non_remove_version_number = number
version = feature['name']
return version
def _command_version(self, name):
"""Get GL command version."""
last_remove_version = 0
for feature in self.command_removed_by_feature[name]:
last_remove_version = max(last_remove_version, feature['number'])
earliest_non_remove_version_number = 9999
version = ''
for feature in self.command_required_by_feature[name]:
number = feature['number']
if number > last_remove_version:
if number < earliest_non_remove_version_number:
earliest_non_remove_version_number = number
version = feature['name']
return version
def _command_version_check(self, command_name):
"""Check if GL command is required and not removed."""
last_require_version = 0
for feature in self.command_required_by_feature[command_name]:
last_require_version = max(last_require_version, feature['number'])
last_remove_version = 0
for feature in self.command_removed_by_feature[command_name]:
last_remove_version = max(last_remove_version, feature['number'])
# filter by command not required by core profile
if last_require_version == 0:
return False
# filter by removed
if last_remove_version > last_require_version:
return False
return True
def _build_all_enums(self):
"""Parse and process GL enums."""
uniq_enums = []
used_values = set()
enum_value_to_name_list = defaultdict(set) # key: int, value: list of strings (enum name
for enum in self.enum_list:
if enum in self.bitmask_enums:
continue
if enum not in self.enum_name_to_value:
print(f'Warning: enum {enum} has no value')
continue
if not self._enum_version_check(enum):
continue
value = self.enum_name_to_value[enum]
enum_value_to_name_list[value].add(enum)
if value in used_values:
continue
uniq_enums.append((value, enum))
used_values.add(value)
uniq_enums.sort()
for value, enum in uniq_enums:
name_list = self._choose_enum_names(enum_value_to_name_list[value])
if name_list:
list_str = ' / '.join(name_list)
enum_value_str = self._case_value(enum)
case = f' case {enum_value_str}: return "{list_str}";'
self.all_enum_string_cases.append(case)
self.map_make_entries.append(f' {{ "{enum}", {enum_value_str} }}')
def _build_enum_groups(self):
"""Parse and process GL enums by."""
for group in self.used_enum_groups:
if group not in self.group_to_enum_list:
print(f'Warning: Enum group {group} has no values defined.')
continue
group_type = 'basic'
if group in self.bitmask_groups:
group_type = 'bitmask'
values_used = set()
enum_name_list = self.group_to_enum_list[group]
enum_tuple_list = []
for enum_name in enum_name_list:
if enum_name not in self.enum_list:
continue
if enum_name not in self.enum_name_to_value:
print(f'Warning: enum {enum_name} has no value')
continue
if not self._enum_version_check(enum_name):
continue
enum_value = self.enum_name_to_value[enum_name]
if enum_value not in values_used:
enum_value_str = self._case_value(enum_name)
enum_tuple_list.append((enum_value, enum_value_str, enum_name))
values_used.add(enum_value)
if enum_name in self.bitmask_enums:
group_type = 'bitmask'
enum_tuple_list.sort()
group_max_len = 0
for enum_info in enum_tuple_list:
wrapper_enum_value_name = enum_info[2][3:].lower()
if wrapper_enum_value_name in RESERVED_NAMES:
wrapper_enum_value_name = wrapper_enum_value_name + '_'
if len(wrapper_enum_value_name) > group_max_len:
group_max_len = len(wrapper_enum_value_name)
group_enum_string_entries = []
group_enum_base_zero_entries = []
group_wrapper_enum_value_definitions = []
base_zero_value = 0
for enum_info in enum_tuple_list:
wrapper_enum_value_name = enum_info[2][3:].lower()
if wrapper_enum_value_name in RESERVED_NAMES:
wrapper_enum_value_name = wrapper_enum_value_name + '_'
formatting = {
'ENUM_VALUE': enum_info[1],
'ENUM_STRING': enum_info[2],
'ENUM_BASE_ZERO_VALUE': base_zero_value,
'ENUM_VERSION': self._enum_version(enum_info[2])
}
string_entry = templates.ENUM_STRING_MAKE_ENTRY[group_type].format(**formatting)
group_enum_string_entries.append(string_entry)
group_wrapper_enum_value_definitions.append(
' {:{}} = {:>6}u /* {} */'.format(
wrapper_enum_value_name,
group_max_len,
enum_info[1],
self._enum_version(enum_info[2])))
if group_type == 'basic':
base_zero_make_entry = templates.ENUM_BASE_ZERO_MAKE_ENTRY.format(**formatting)
group_enum_base_zero_entries.append(base_zero_make_entry)
base_zero_value = base_zero_value + 1
if enum_tuple_list:
wrapper_enum_name = self.to_type_name(group)
definitions = ',\n'.join(sorted(group_wrapper_enum_value_definitions))
string_entries = '\n'.join(sorted(group_enum_string_entries))
formatting = {
'WRAPPER_ENUM_TYPE_NAME': wrapper_enum_name,
'WRAPPER_ENUM_STRING_FN_NAME': self.split_to_body_and_ext(group)[0],
'GROUP_NAME': group,
'WRAPPER_ENUM_VALUE_DEFINITIONS': definitions,
'GROUP_ENUM_STRING_ENTRIES': string_entries,
'GROUP_ENUM_BASE_ZERO_ENTRIES': '\n'.join(group_enum_base_zero_entries),
}
if group_type == 'basic':
self.enum_base_zero_function_declarations.append(
templates.ENUM_BASE_ZERO_FUNCTION_DECLARATION.format(**formatting)
)
self.enum_base_zero_function_definitions.append(
templates.ENUM_BASE_ZERO_FUNCTION_DEFINITION.format(**formatting)
)
self.untyped_enum_string_function_declarations.append(
templates.UNTYPED_ENUM_STRING_FUNCTION_DECLARATION.format(**formatting)
)
self.untyped_enum_string_function_definitions.append(
templates.UNTYPED_ENUM_STRING_FUNCTION_DEFINITION.format(**formatting)
)
self.enum_string_function_declarations.append(
templates.ENUM_STRING_FUNCTION_DECLARATION[group_type].format(**formatting)
)
self.enum_string_function_definitions.append(
templates.ENUM_STRING_FUNCTION_DEFINITION[group_type].format(**formatting)
)
self.wrapper_enum_declarations.append(
templates.WRAPPER_ENUM_DECLARATION.format(**formatting)
)
self.enum_helper_definitions.append(
templates.ENUM_HELPER_DEFINITION[group_type].format(**formatting)
)
def _generate_files(self):
"""Write output files."""
formatters = {
'AUTOGENERATION_WARNING': templates.AUTOGENERATION_WARNING,
'COMMAND_INFO_H': 'command_info.h',
'MAP_MAKE_ENTRIES': ',\n'.join(sorted(self.map_make_entries)),
'WRAPPER_FUNCTION_DEFINITIONS': '\n'.join(self.wrapper_function_definitions),
'WRAPPER_FUNCTION_DECLARATIONS': '\n'.join(self.wrapper_function_declarations),
'WRAPPER_ENUM_DECLARATIONS': util.sjoin(self.wrapper_enum_declarations),
'ENUM_STRING_FUNCTION_DECLARATIONS': util.sjoin(self.enum_string_function_declarations),
'ENUM_STRING_FUNCTION_DEFINITIONS': util.sjoin(self.enum_string_function_definitions),
'UNTYPED_ENUM_STRING_FUNCTION_DECLARATIONS': util.sjoin(self.untyped_enum_string_function_declarations),
'UNTYPED_ENUM_STRING_FUNCTION_DEFINITIONS': util.sjoin(self.untyped_enum_string_function_definitions),
'ENUM_BASE_ZERO_FUNCTION_DECLARATIONS': util.sjoin(self.enum_base_zero_function_declarations),
'ENUM_BASE_ZERO_FUNCTION_DEFINITIONS': util.sjoin(self.enum_base_zero_function_definitions),
'ENUM_HELPER_DEFINITIONS': util.sjoin(self.enum_helper_definitions),
'ALL_ENUM_STRING_CASES': util.sjoin(self.all_enum_string_cases),
'DYNAMIC_FUNCTION_DECLARATIONS': '\n'.join(self.dynamic_function_declarations),
'DYNAMIC_FUNCTION_DEFINITIONS': '\n'.join(self.dynamic_function_definitions),
'DYNAMIC_FUNCTION_GET_STATEMENTS': '\n '.join(self.dynamic_function_get_statements),
'EXTENSION_ENUM_DECLARATIONS': self.extension_enum_declarations,
'EXTENSION_MAP_ENTRIES': self.extension_map_entries,
'EXTENSION_CASE_ENTRIES': self.extension_case_entries,
'COMMAND_ENUM_DECLARATIONS': self.command_enum_declarations,
'COMMAND_MAP_ENTRIES': self.command_map_entries,
'COMMAND_CASE_ENTRIES': self.command_case_entries,
'COMMAND_INFO_ENTRIES': self.command_info_entries,
}
content = {
'command_info.cpp': templates.COMMAND_INFO_CPP,
'command_info.hpp': templates.COMMAND_INFO_HPP,
'dynamic_load.hpp': templates.DYNAMIC_LOAD_HPP,
'dynamic_load.cpp': templates.DYNAMIC_LOAD_CPP,
'enum_base_zero_functions.hpp': templates.ENUM_BASE_ZERO_FUNCTIONS_HPP,
'enum_base_zero_functions.cpp': templates.ENUM_BASE_ZERO_FUNCTIONS_CPP,
'enum_string_functions.hpp': templates.ENUM_STRING_FUNCTIONS_HPP,
'enum_string_functions.cpp': templates.ENUM_STRING_FUNCTIONS_CPP,
'wrapper_enums.hpp': templates.WRAPPER_ENUMS_HPP,
'wrapper_functions.cpp': templates.WRAPPER_FUNCTIONS_CPP,
'wrapper_functions.hpp': templates.WRAPPER_FUNCTIONS_HPP
}
os.makedirs(self.outpath, exist_ok=True)
for filename, template in content.items():
filename = os.path.join(self.outpath, filename)
print('GEN\t{}'.format(os.path.basename(filename)))
try:
with open(filename, 'w') as out_file:
try:
out_file.write(template.format(**formatters))
except Exception:
print(f'template = {template}')
raise
except Exception as exception:
print('Writing {} failed: {}'.format(filename, (exception)))
raise
def generate(self):
"""Pipeline parsing input XML and generating output."""
try:
self._parse_groups()
self._parse_types()
self._parse_enums()
self._parse_features()
self._parse_extensions()
self._add_extra_enums()
self._parse_and_build_commands()
self._build_all_enums()
self._build_enum_groups()
self._generate_files()
except Exception as exception:
print('Generate failed: {}'.format(str(exception)))
raise
def main():
"""Entry function."""
parser = argparse.ArgumentParser(description='Generate GL wrapper from gl.xml')
parser.add_argument('outpath', help='Output path')
args = parser.parse_args()
generator = GLGenerator(args.outpath)
generator.generate()
print('Done.')
main() | 0.704973 | 0.122549 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ContactModel import ContactModel
class AlipayOpenAgentCreateModel(object):
def __init__(self):
self._account = None
self._contact_info = None
self._order_ticket = None
@property
def account(self):
return self._account
@account.setter
def account(self, value):
self._account = value
@property
def contact_info(self):
return self._contact_info
@contact_info.setter
def contact_info(self, value):
if isinstance(value, ContactModel):
self._contact_info = value
else:
self._contact_info = ContactModel.from_alipay_dict(value)
@property
def order_ticket(self):
return self._order_ticket
@order_ticket.setter
def order_ticket(self, value):
self._order_ticket = value
def to_alipay_dict(self):
params = dict()
if self.account:
if hasattr(self.account, 'to_alipay_dict'):
params['account'] = self.account.to_alipay_dict()
else:
params['account'] = self.account
if self.contact_info:
if hasattr(self.contact_info, 'to_alipay_dict'):
params['contact_info'] = self.contact_info.to_alipay_dict()
else:
params['contact_info'] = self.contact_info
if self.order_ticket:
if hasattr(self.order_ticket, 'to_alipay_dict'):
params['order_ticket'] = self.order_ticket.to_alipay_dict()
else:
params['order_ticket'] = self.order_ticket
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenAgentCreateModel()
if 'account' in d:
o.account = d['account']
if 'contact_info' in d:
o.contact_info = d['contact_info']
if 'order_ticket' in d:
o.order_ticket = d['order_ticket']
return o | alipay/aop/api/domain/AlipayOpenAgentCreateModel.py | import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ContactModel import ContactModel
class AlipayOpenAgentCreateModel(object):
def __init__(self):
self._account = None
self._contact_info = None
self._order_ticket = None
@property
def account(self):
return self._account
@account.setter
def account(self, value):
self._account = value
@property
def contact_info(self):
return self._contact_info
@contact_info.setter
def contact_info(self, value):
if isinstance(value, ContactModel):
self._contact_info = value
else:
self._contact_info = ContactModel.from_alipay_dict(value)
@property
def order_ticket(self):
return self._order_ticket
@order_ticket.setter
def order_ticket(self, value):
self._order_ticket = value
def to_alipay_dict(self):
params = dict()
if self.account:
if hasattr(self.account, 'to_alipay_dict'):
params['account'] = self.account.to_alipay_dict()
else:
params['account'] = self.account
if self.contact_info:
if hasattr(self.contact_info, 'to_alipay_dict'):
params['contact_info'] = self.contact_info.to_alipay_dict()
else:
params['contact_info'] = self.contact_info
if self.order_ticket:
if hasattr(self.order_ticket, 'to_alipay_dict'):
params['order_ticket'] = self.order_ticket.to_alipay_dict()
else:
params['order_ticket'] = self.order_ticket
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenAgentCreateModel()
if 'account' in d:
o.account = d['account']
if 'contact_info' in d:
o.contact_info = d['contact_info']
if 'order_ticket' in d:
o.order_ticket = d['order_ticket']
return o | 0.437703 | 0.07333 |
from multiprocessing import Process, Pool
from time import sleep, time
from express.database import *
from express.settings import *
from express.logging import Log, f
from express.prices import update_pricelist
from express.config import *
from express.client import Client
from express.offer import Offer, valuate
from express.utils import to_refined, refinedify
import socketio
def run(bot: dict) -> None:
try:
client = Client(bot)
client.login()
log = Log(bot["name"])
processed = []
values = {}
log.info(f"Polling offers every {TIMEOUT} seconds")
while True:
log = Log(bot["name"])
log.debug("Polling offers...")
offers = client.get_offers()
for offer in offers:
offer_id = offer["tradeofferid"]
if offer_id not in processed:
log = Log(bot["name"], offer_id)
trade = Offer(offer)
steam_id = trade.get_partner()
if trade.is_active() and not trade.is_our_offer():
log.trade(f"Received a new offer from {f.YELLOW + steam_id}")
if trade.is_from_owner():
log.trade("Offer is from owner")
client.accept(offer_id)
elif trade.is_gift() and accept_donations:
log.trade("User is trying to give items")
client.accept(offer_id)
elif trade.is_scam() and decline_scam_offers:
log.trade("User is trying to take items")
client.decline(offer_id)
elif trade.is_valid():
log.trade("Processing offer...")
their_items = offer["items_to_receive"]
our_items = offer["items_to_give"]
items = get_items()
their_value = valuate(their_items, "buy", items)
our_value = valuate(our_items, "sell", items)
item_amount = len(their_items) + len(our_items)
log.trade(f"Offer contains {item_amount} items")
difference = to_refined(their_value - our_value)
summary = "User value: {} ref, our value: {} ref, difference: {} ref"
log.trade(
summary.format(
to_refined(their_value),
to_refined(our_value),
refinedify(difference),
)
)
if their_value >= our_value:
values[offer_id] = {
"our_value": to_refined(our_value),
"their_value": to_refined(their_value),
}
client.accept(offer_id)
else:
if decline_bad_trade:
client.decline(offer_id)
else:
log.trade(
"Ignoring offer as automatic decline is disabled"
)
else:
log.trade("Offer is invalid")
else:
log.trade("Offer is not active")
processed.append(offer_id)
del offers
for offer_id in processed:
offer = client.get_offer(offer_id)
trade = Offer(offer)
log = Log(bot["name"], offer_id)
if not trade.is_active():
state = trade.get_state()
log.trade(f"Offer state changed to {f.YELLOW + state}")
if trade.is_accepted() and "tradeid" in offer:
if save_trades:
Log().info("Saving offer data...")
if offer_id in values:
offer["our_value"] = values[offer_id]["our_value"]
offer["their_value"] = values[offer_id]["their_value"]
offer["receipt"] = client.get_receipt(offer["tradeid"])
add_trade(offer)
if offer_id in values:
values.pop(offer_id)
processed.remove(offer_id)
sleep(TIMEOUT)
except BaseException as e:
log.info(f"Caught {type(e).__name__}")
try:
client.logout()
except:
pass
log.info(f"Stopped")
def database() -> None:
try:
items_in_database = get_items()
log = Log()
while True:
if not items_in_database == get_items():
log.info("Item(s) were added or removed from the database")
items_in_database = get_items()
update_pricelist(items_in_database)
log.info("Successfully updated all prices")
sleep(10)
except BaseException:
pass
if __name__ == "__main__":
t1 = time()
log = Log()
try:
socket = socketio.Client()
items = get_items()
update_pricelist(items)
log.info("Successfully updated all prices")
del items
@socket.event
def connect():
socket.emit("authentication")
log.info("Successfully connected to Prices.TF socket server")
@socket.event
def authenticated(data):
pass
@socket.event
def price(data):
if data["name"] in get_items():
update_price(data["name"], True, data["buy"], data["sell"])
@socket.event
def unauthorized(sid):
pass
socket.connect("https://api.prices.tf")
log.info("Listening to Prices.TF for price updates")
process = Process(target=database)
process.start()
with Pool(len(BOTS)) as p:
p.map(run, BOTS)
except BaseException as e:
if e:
log.error(e)
finally:
socket.disconnect()
process.terminate()
t2 = time()
log.info(f"Done. Bot ran for {round(t2-t1, 1)} seconds")
log.close()
quit() | main.py | from multiprocessing import Process, Pool
from time import sleep, time
from express.database import *
from express.settings import *
from express.logging import Log, f
from express.prices import update_pricelist
from express.config import *
from express.client import Client
from express.offer import Offer, valuate
from express.utils import to_refined, refinedify
import socketio
def run(bot: dict) -> None:
try:
client = Client(bot)
client.login()
log = Log(bot["name"])
processed = []
values = {}
log.info(f"Polling offers every {TIMEOUT} seconds")
while True:
log = Log(bot["name"])
log.debug("Polling offers...")
offers = client.get_offers()
for offer in offers:
offer_id = offer["tradeofferid"]
if offer_id not in processed:
log = Log(bot["name"], offer_id)
trade = Offer(offer)
steam_id = trade.get_partner()
if trade.is_active() and not trade.is_our_offer():
log.trade(f"Received a new offer from {f.YELLOW + steam_id}")
if trade.is_from_owner():
log.trade("Offer is from owner")
client.accept(offer_id)
elif trade.is_gift() and accept_donations:
log.trade("User is trying to give items")
client.accept(offer_id)
elif trade.is_scam() and decline_scam_offers:
log.trade("User is trying to take items")
client.decline(offer_id)
elif trade.is_valid():
log.trade("Processing offer...")
their_items = offer["items_to_receive"]
our_items = offer["items_to_give"]
items = get_items()
their_value = valuate(their_items, "buy", items)
our_value = valuate(our_items, "sell", items)
item_amount = len(their_items) + len(our_items)
log.trade(f"Offer contains {item_amount} items")
difference = to_refined(their_value - our_value)
summary = "User value: {} ref, our value: {} ref, difference: {} ref"
log.trade(
summary.format(
to_refined(their_value),
to_refined(our_value),
refinedify(difference),
)
)
if their_value >= our_value:
values[offer_id] = {
"our_value": to_refined(our_value),
"their_value": to_refined(their_value),
}
client.accept(offer_id)
else:
if decline_bad_trade:
client.decline(offer_id)
else:
log.trade(
"Ignoring offer as automatic decline is disabled"
)
else:
log.trade("Offer is invalid")
else:
log.trade("Offer is not active")
processed.append(offer_id)
del offers
for offer_id in processed:
offer = client.get_offer(offer_id)
trade = Offer(offer)
log = Log(bot["name"], offer_id)
if not trade.is_active():
state = trade.get_state()
log.trade(f"Offer state changed to {f.YELLOW + state}")
if trade.is_accepted() and "tradeid" in offer:
if save_trades:
Log().info("Saving offer data...")
if offer_id in values:
offer["our_value"] = values[offer_id]["our_value"]
offer["their_value"] = values[offer_id]["their_value"]
offer["receipt"] = client.get_receipt(offer["tradeid"])
add_trade(offer)
if offer_id in values:
values.pop(offer_id)
processed.remove(offer_id)
sleep(TIMEOUT)
except BaseException as e:
log.info(f"Caught {type(e).__name__}")
try:
client.logout()
except:
pass
log.info(f"Stopped")
def database() -> None:
try:
items_in_database = get_items()
log = Log()
while True:
if not items_in_database == get_items():
log.info("Item(s) were added or removed from the database")
items_in_database = get_items()
update_pricelist(items_in_database)
log.info("Successfully updated all prices")
sleep(10)
except BaseException:
pass
if __name__ == "__main__":
t1 = time()
log = Log()
try:
socket = socketio.Client()
items = get_items()
update_pricelist(items)
log.info("Successfully updated all prices")
del items
@socket.event
def connect():
socket.emit("authentication")
log.info("Successfully connected to Prices.TF socket server")
@socket.event
def authenticated(data):
pass
@socket.event
def price(data):
if data["name"] in get_items():
update_price(data["name"], True, data["buy"], data["sell"])
@socket.event
def unauthorized(sid):
pass
socket.connect("https://api.prices.tf")
log.info("Listening to Prices.TF for price updates")
process = Process(target=database)
process.start()
with Pool(len(BOTS)) as p:
p.map(run, BOTS)
except BaseException as e:
if e:
log.error(e)
finally:
socket.disconnect()
process.terminate()
t2 = time()
log.info(f"Done. Bot ran for {round(t2-t1, 1)} seconds")
log.close()
quit() | 0.287068 | 0.161949 |
import bcrypt
from datetime import datetime
from app.database import BaseMixin, db
class User(BaseMixin, db.Model):
__tablename__ = 'users'
userID = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, nullable=False)
_password = db.Column(db.Binary(60))
vorname = db.Column(db.String)
nachname = db.Column(db.String)
email = db.Column(db.String, nullable=False)
user_key = db.Column(db.String)
is_admin = db.Column(db.Boolean, default=False)
is_active = db.Column(db.Boolean, default=True)
coffee_count = db.Column(db.Integer, default=0)
coffee_hist = db.relationship('CoffeeHistory', backref='users', lazy=True)
rechnungen = db.relationship('Rechnung', backref='users', lazy=True)
def __init__(self, username, password, email):
self.username = username
self.password = self._<PASSWORD>(password).encode('utf-8')
self.email = self.email
def _hash_pw(self, password):
return bcrypt.hash_pw(password, bcrypt.gensalt(12))
def check_pw(self, password, hashed_pw):
return bcrypt.check_pw(password, hashed_pw)
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
def json(self):
return {
"id": str(self.id),
"username": self.username,
"vorname": self.vorname,
"nachname": self.nachname,
"email": self.email,
"user_key": self.user_key,
"is_admin": self.is_admin,
"coffee_count": self.coffee_count,
}
class CoffeeHistory(BaseMixin, db.Model):
__tablename__ = 'coffeeHistory'
coffeeHistID = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime, default=datetime.now())
coffee_count = db.Column(db.Integer)
amount = db.Column(db.Float)
user_id = db.Column(db.Integer, db.ForeignKey('users.userID'))
def __init__(self, coffee_count, amount):
self.coffee_count = coffee_count
self.amount = amount
def json(self):
return {
"id": str(self.id),
"date": self.date.strftime('%a, %d, %B, %Y'),
"coffee_count": self.coffee_count,
"amount": self.amount
} | server/app/api/user/models.py | import bcrypt
from datetime import datetime
from app.database import BaseMixin, db
class User(BaseMixin, db.Model):
__tablename__ = 'users'
userID = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, nullable=False)
_password = db.Column(db.Binary(60))
vorname = db.Column(db.String)
nachname = db.Column(db.String)
email = db.Column(db.String, nullable=False)
user_key = db.Column(db.String)
is_admin = db.Column(db.Boolean, default=False)
is_active = db.Column(db.Boolean, default=True)
coffee_count = db.Column(db.Integer, default=0)
coffee_hist = db.relationship('CoffeeHistory', backref='users', lazy=True)
rechnungen = db.relationship('Rechnung', backref='users', lazy=True)
def __init__(self, username, password, email):
self.username = username
self.password = self._<PASSWORD>(password).encode('utf-8')
self.email = self.email
def _hash_pw(self, password):
return bcrypt.hash_pw(password, bcrypt.gensalt(12))
def check_pw(self, password, hashed_pw):
return bcrypt.check_pw(password, hashed_pw)
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
def json(self):
return {
"id": str(self.id),
"username": self.username,
"vorname": self.vorname,
"nachname": self.nachname,
"email": self.email,
"user_key": self.user_key,
"is_admin": self.is_admin,
"coffee_count": self.coffee_count,
}
class CoffeeHistory(BaseMixin, db.Model):
__tablename__ = 'coffeeHistory'
coffeeHistID = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime, default=datetime.now())
coffee_count = db.Column(db.Integer)
amount = db.Column(db.Float)
user_id = db.Column(db.Integer, db.ForeignKey('users.userID'))
def __init__(self, coffee_count, amount):
self.coffee_count = coffee_count
self.amount = amount
def json(self):
return {
"id": str(self.id),
"date": self.date.strftime('%a, %d, %B, %Y'),
"coffee_count": self.coffee_count,
"amount": self.amount
} | 0.332202 | 0.083965 |
import logging
import os
from dataclasses import dataclass
from typing import Mapping, Optional, Tuple
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.base.specs import Specs
from pants.base.specs_parser import SpecsParser
from pants.base.workunit import WorkUnit
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine.internals.native import Native
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.unions import UnionMembership
from pants.goal.run_tracker import RunTracker
from pants.help.help_info_extracter import HelpInfoExtracter
from pants.help.help_printer import HelpPrinter
from pants.init.engine_initializer import EngineInitializer, GraphScheduler, GraphSession
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.init.specs_calculator import calculate_specs
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.subsystem import Subsystem
from pants.reporting.streaming_workunit_handler import StreamingWorkunitHandler
from pants.util.contextutil import maybe_profiled
logger = logging.getLogger(__name__)
@dataclass
class LocalPantsRunner:
"""Handles a single pants invocation running in the process-local context.
build_root: The build root for this run.
options: The parsed options for this run.
options_bootstrapper: The OptionsBootstrapper instance to use.
build_config: The parsed build configuration for this run.
specs: The specs for this run, i.e. either the address or filesystem specs.
graph_session: A LegacyGraphSession instance for graph reuse.
profile_path: The profile path - if any (from from the `PANTS_PROFILE` env var).
"""
build_root: str
options: Options
options_bootstrapper: OptionsBootstrapper
build_config: BuildConfiguration
specs: Specs
graph_session: GraphSession
union_membership: UnionMembership
profile_path: Optional[str]
_run_tracker: RunTracker
@staticmethod
def parse_options(
options_bootstrapper: OptionsBootstrapper,
) -> Tuple[Options, BuildConfiguration]:
build_config = BuildConfigInitializer.get(options_bootstrapper)
options = OptionsInitializer.create(options_bootstrapper, build_config)
return options, build_config
@staticmethod
def _init_graph_session(
options_bootstrapper: OptionsBootstrapper,
build_config: BuildConfiguration,
options: Options,
scheduler: Optional[GraphScheduler] = None,
) -> GraphSession:
native = Native()
native.set_panic_handler()
graph_scheduler_helper = scheduler or EngineInitializer.setup_graph(
options_bootstrapper, build_config
)
global_scope = options.for_global_scope()
dynamic_ui = global_scope.dynamic_ui if global_scope.v2 else False
use_colors = global_scope.get("colors", True)
stream_workunits = len(options.for_global_scope().streaming_workunits_handlers) != 0
return graph_scheduler_helper.new_session(
RunTracker.global_instance().run_id,
dynamic_ui=dynamic_ui,
use_colors=use_colors,
should_report_workunits=stream_workunits,
)
@classmethod
def create(
cls,
env: Mapping[str, str],
options_bootstrapper: OptionsBootstrapper,
scheduler: Optional[GraphScheduler] = None,
) -> "LocalPantsRunner":
"""Creates a new LocalPantsRunner instance by parsing options.
By the time this method runs, logging will already have been initialized in either
PantsRunner or DaemonPantsRunner.
:param env: The environment (e.g. os.environ) for this run.
:param options_bootstrapper: The OptionsBootstrapper instance to reuse.
:param scheduler: If being called from the daemon, a warmed scheduler to use.
"""
build_root = get_buildroot()
global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
options, build_config = LocalPantsRunner.parse_options(options_bootstrapper)
# Option values are usually computed lazily on demand,
# but command line options are eagerly computed for validation.
for scope in options.scope_to_flags.keys():
options.for_scope(scope)
# Verify configs.
if global_bootstrap_options.verify_config:
options.verify_configs(options_bootstrapper.config)
union_membership = UnionMembership.from_rules(build_config.union_rules)
# If we're running with the daemon, we'll be handed a warmed Scheduler, which we use
# to initialize a session here.
graph_session = cls._init_graph_session(
options_bootstrapper, build_config, options, scheduler
)
specs = calculate_specs(
options_bootstrapper=options_bootstrapper,
options=options,
build_root=build_root,
session=graph_session.scheduler_session,
)
profile_path = env.get("PANTS_PROFILE")
return cls(
build_root=build_root,
options=options,
options_bootstrapper=options_bootstrapper,
build_config=build_config,
specs=specs,
graph_session=graph_session,
union_membership=union_membership,
profile_path=profile_path,
_run_tracker=RunTracker.global_instance(),
)
def _set_start_time(self, start_time: float) -> None:
# Propagates parent_build_id to pants runs that may be called from this pants run.
os.environ["PANTS_PARENT_BUILD_ID"] = self._run_tracker.run_id
self._run_tracker.start(self.options, run_start_time=start_time)
spec_parser = SpecsParser(get_buildroot())
specs = [str(spec_parser.parse_spec(spec)) for spec in self.options.specs]
# Note: This will not include values from `--changed-*` flags.
self._run_tracker.run_info.add_info("specs_from_command_line", specs, stringify=False)
def _run_v2(self) -> ExitCode:
goals = self.options.goals
self._run_tracker.set_v2_goal_rule_names(tuple(goals))
if not goals:
return PANTS_SUCCEEDED_EXIT_CODE
global_options = self.options.for_global_scope()
if not global_options.get("loop", False):
return self._maybe_run_v2_body(goals, poll=False)
iterations = global_options.loop_max
exit_code = PANTS_SUCCEEDED_EXIT_CODE
while iterations:
# NB: We generate a new "run id" per iteration of the loop in order to allow us to
# observe fresh values for Goals. See notes in `scheduler.rs`.
self.graph_session.scheduler_session.new_run_id()
try:
exit_code = self._maybe_run_v2_body(goals, poll=True)
except ExecutionError as e:
logger.warning(e)
iterations -= 1
return exit_code
def _maybe_run_v2_body(self, goals, poll: bool) -> ExitCode:
return self.graph_session.run_goal_rules(
options_bootstrapper=self.options_bootstrapper,
union_membership=self.union_membership,
goals=goals,
specs=self.specs,
poll=poll,
poll_delay=(0.1 if poll else None),
)
@staticmethod
def _merge_exit_codes(code: ExitCode, *codes: ExitCode) -> ExitCode:
"""Returns the exit code with higher abs value in case of negative values."""
max_code = code
for code in codes:
if abs(max_code) < abs(code):
max_code = code
return max_code
def _finish_run(self, code: ExitCode) -> ExitCode:
"""Checks that the RunTracker is in good shape to exit, and then returns its exit code.
TODO: The RunTracker's exit code will likely not be relevant in v2: the exit codes of
individual `@goal_rule`s are everything in that case.
"""
run_tracker_result = PANTS_SUCCEEDED_EXIT_CODE
scheduler_session = self.graph_session.scheduler_session
try:
metrics = scheduler_session.metrics()
self._run_tracker.pantsd_stats.set_scheduler_metrics(metrics)
outcome = WorkUnit.SUCCESS if code == PANTS_SUCCEEDED_EXIT_CODE else WorkUnit.FAILURE
self._run_tracker.set_root_outcome(outcome)
run_tracker_result = self._run_tracker.end()
except ValueError as e:
# If we have been interrupted by a signal, calling .end() sometimes writes to a closed
# file, so we just log that fact here and keep going.
ExceptionSink.log_exception(exc=e)
return run_tracker_result
def run(self, start_time: float) -> ExitCode:
self._set_start_time(start_time)
with maybe_profiled(self.profile_path):
global_options = self.options.for_global_scope()
streaming_handlers = global_options.streaming_workunits_handlers
report_interval = global_options.streaming_workunits_report_interval
callbacks = Subsystem.get_streaming_workunit_callbacks(streaming_handlers)
streaming_reporter = StreamingWorkunitHandler(
self.graph_session.scheduler_session,
callbacks=callbacks,
report_interval_seconds=report_interval,
)
if self.options.help_request:
all_help_info = HelpInfoExtracter.get_all_help_info(
self.options,
self.union_membership,
self.graph_session.goal_consumed_subsystem_scopes,
)
help_printer = HelpPrinter(
bin_name=global_options.pants_bin_name,
help_request=self.options.help_request,
all_help_info=all_help_info,
use_color=global_options.colors,
)
return help_printer.print_help()
with streaming_reporter.session():
engine_result = PANTS_FAILED_EXIT_CODE
try:
engine_result = self._run_v2()
except Exception as e:
ExceptionSink.log_exception(e)
run_tracker_result = self._finish_run(engine_result)
return self._merge_exit_codes(engine_result, run_tracker_result) | src/python/pants/bin/local_pants_runner.py |
import logging
import os
from dataclasses import dataclass
from typing import Mapping, Optional, Tuple
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.base.specs import Specs
from pants.base.specs_parser import SpecsParser
from pants.base.workunit import WorkUnit
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine.internals.native import Native
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.unions import UnionMembership
from pants.goal.run_tracker import RunTracker
from pants.help.help_info_extracter import HelpInfoExtracter
from pants.help.help_printer import HelpPrinter
from pants.init.engine_initializer import EngineInitializer, GraphScheduler, GraphSession
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.init.specs_calculator import calculate_specs
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.subsystem import Subsystem
from pants.reporting.streaming_workunit_handler import StreamingWorkunitHandler
from pants.util.contextutil import maybe_profiled
logger = logging.getLogger(__name__)
@dataclass
class LocalPantsRunner:
"""Handles a single pants invocation running in the process-local context.
build_root: The build root for this run.
options: The parsed options for this run.
options_bootstrapper: The OptionsBootstrapper instance to use.
build_config: The parsed build configuration for this run.
specs: The specs for this run, i.e. either the address or filesystem specs.
graph_session: A LegacyGraphSession instance for graph reuse.
profile_path: The profile path - if any (from from the `PANTS_PROFILE` env var).
"""
build_root: str
options: Options
options_bootstrapper: OptionsBootstrapper
build_config: BuildConfiguration
specs: Specs
graph_session: GraphSession
union_membership: UnionMembership
profile_path: Optional[str]
_run_tracker: RunTracker
@staticmethod
def parse_options(
options_bootstrapper: OptionsBootstrapper,
) -> Tuple[Options, BuildConfiguration]:
build_config = BuildConfigInitializer.get(options_bootstrapper)
options = OptionsInitializer.create(options_bootstrapper, build_config)
return options, build_config
@staticmethod
def _init_graph_session(
options_bootstrapper: OptionsBootstrapper,
build_config: BuildConfiguration,
options: Options,
scheduler: Optional[GraphScheduler] = None,
) -> GraphSession:
native = Native()
native.set_panic_handler()
graph_scheduler_helper = scheduler or EngineInitializer.setup_graph(
options_bootstrapper, build_config
)
global_scope = options.for_global_scope()
dynamic_ui = global_scope.dynamic_ui if global_scope.v2 else False
use_colors = global_scope.get("colors", True)
stream_workunits = len(options.for_global_scope().streaming_workunits_handlers) != 0
return graph_scheduler_helper.new_session(
RunTracker.global_instance().run_id,
dynamic_ui=dynamic_ui,
use_colors=use_colors,
should_report_workunits=stream_workunits,
)
@classmethod
def create(
cls,
env: Mapping[str, str],
options_bootstrapper: OptionsBootstrapper,
scheduler: Optional[GraphScheduler] = None,
) -> "LocalPantsRunner":
"""Creates a new LocalPantsRunner instance by parsing options.
By the time this method runs, logging will already have been initialized in either
PantsRunner or DaemonPantsRunner.
:param env: The environment (e.g. os.environ) for this run.
:param options_bootstrapper: The OptionsBootstrapper instance to reuse.
:param scheduler: If being called from the daemon, a warmed scheduler to use.
"""
build_root = get_buildroot()
global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
options, build_config = LocalPantsRunner.parse_options(options_bootstrapper)
# Option values are usually computed lazily on demand,
# but command line options are eagerly computed for validation.
for scope in options.scope_to_flags.keys():
options.for_scope(scope)
# Verify configs.
if global_bootstrap_options.verify_config:
options.verify_configs(options_bootstrapper.config)
union_membership = UnionMembership.from_rules(build_config.union_rules)
# If we're running with the daemon, we'll be handed a warmed Scheduler, which we use
# to initialize a session here.
graph_session = cls._init_graph_session(
options_bootstrapper, build_config, options, scheduler
)
specs = calculate_specs(
options_bootstrapper=options_bootstrapper,
options=options,
build_root=build_root,
session=graph_session.scheduler_session,
)
profile_path = env.get("PANTS_PROFILE")
return cls(
build_root=build_root,
options=options,
options_bootstrapper=options_bootstrapper,
build_config=build_config,
specs=specs,
graph_session=graph_session,
union_membership=union_membership,
profile_path=profile_path,
_run_tracker=RunTracker.global_instance(),
)
def _set_start_time(self, start_time: float) -> None:
# Propagates parent_build_id to pants runs that may be called from this pants run.
os.environ["PANTS_PARENT_BUILD_ID"] = self._run_tracker.run_id
self._run_tracker.start(self.options, run_start_time=start_time)
spec_parser = SpecsParser(get_buildroot())
specs = [str(spec_parser.parse_spec(spec)) for spec in self.options.specs]
# Note: This will not include values from `--changed-*` flags.
self._run_tracker.run_info.add_info("specs_from_command_line", specs, stringify=False)
def _run_v2(self) -> ExitCode:
goals = self.options.goals
self._run_tracker.set_v2_goal_rule_names(tuple(goals))
if not goals:
return PANTS_SUCCEEDED_EXIT_CODE
global_options = self.options.for_global_scope()
if not global_options.get("loop", False):
return self._maybe_run_v2_body(goals, poll=False)
iterations = global_options.loop_max
exit_code = PANTS_SUCCEEDED_EXIT_CODE
while iterations:
# NB: We generate a new "run id" per iteration of the loop in order to allow us to
# observe fresh values for Goals. See notes in `scheduler.rs`.
self.graph_session.scheduler_session.new_run_id()
try:
exit_code = self._maybe_run_v2_body(goals, poll=True)
except ExecutionError as e:
logger.warning(e)
iterations -= 1
return exit_code
def _maybe_run_v2_body(self, goals, poll: bool) -> ExitCode:
return self.graph_session.run_goal_rules(
options_bootstrapper=self.options_bootstrapper,
union_membership=self.union_membership,
goals=goals,
specs=self.specs,
poll=poll,
poll_delay=(0.1 if poll else None),
)
@staticmethod
def _merge_exit_codes(code: ExitCode, *codes: ExitCode) -> ExitCode:
"""Returns the exit code with higher abs value in case of negative values."""
max_code = code
for code in codes:
if abs(max_code) < abs(code):
max_code = code
return max_code
def _finish_run(self, code: ExitCode) -> ExitCode:
"""Checks that the RunTracker is in good shape to exit, and then returns its exit code.
TODO: The RunTracker's exit code will likely not be relevant in v2: the exit codes of
individual `@goal_rule`s are everything in that case.
"""
run_tracker_result = PANTS_SUCCEEDED_EXIT_CODE
scheduler_session = self.graph_session.scheduler_session
try:
metrics = scheduler_session.metrics()
self._run_tracker.pantsd_stats.set_scheduler_metrics(metrics)
outcome = WorkUnit.SUCCESS if code == PANTS_SUCCEEDED_EXIT_CODE else WorkUnit.FAILURE
self._run_tracker.set_root_outcome(outcome)
run_tracker_result = self._run_tracker.end()
except ValueError as e:
# If we have been interrupted by a signal, calling .end() sometimes writes to a closed
# file, so we just log that fact here and keep going.
ExceptionSink.log_exception(exc=e)
return run_tracker_result
def run(self, start_time: float) -> ExitCode:
self._set_start_time(start_time)
with maybe_profiled(self.profile_path):
global_options = self.options.for_global_scope()
streaming_handlers = global_options.streaming_workunits_handlers
report_interval = global_options.streaming_workunits_report_interval
callbacks = Subsystem.get_streaming_workunit_callbacks(streaming_handlers)
streaming_reporter = StreamingWorkunitHandler(
self.graph_session.scheduler_session,
callbacks=callbacks,
report_interval_seconds=report_interval,
)
if self.options.help_request:
all_help_info = HelpInfoExtracter.get_all_help_info(
self.options,
self.union_membership,
self.graph_session.goal_consumed_subsystem_scopes,
)
help_printer = HelpPrinter(
bin_name=global_options.pants_bin_name,
help_request=self.options.help_request,
all_help_info=all_help_info,
use_color=global_options.colors,
)
return help_printer.print_help()
with streaming_reporter.session():
engine_result = PANTS_FAILED_EXIT_CODE
try:
engine_result = self._run_v2()
except Exception as e:
ExceptionSink.log_exception(e)
run_tracker_result = self._finish_run(engine_result)
return self._merge_exit_codes(engine_result, run_tracker_result) | 0.868172 | 0.085671 |
from akashic.arules.transpiler import Transpiler
from akashic.ads.data_provider import DataProvider
from akashic.env_provider import EnvProvider
from akashic.bridges.data_bridge import DataBridge
from akashic.bridges.time_bridge import TimeBridge
from os.path import join, dirname, abspath
import json
def test_rule_transpiler():
# Create new env_provider
env_provider = EnvProvider()
# Setup User data provider
this_folder = dirname(__file__)
sample_path = abspath(join(this_folder, '..', 'test', 'samples', 'ads', 'user_dsd.json'))
dsd_string = None
with open(sample_path, 'r') as sample:
dsd_string = sample.read()
user_data_provider = DataProvider(env_provider)
user_data_provider.load(dsd_string)
user_data_provider.setup()
# Setup Course data provider
this_folder = dirname(__file__)
sample_path = abspath(join(this_folder, '..', 'test', 'samples', 'ads', 'course_dsd.json'))
dsd_string = None
with open(sample_path, 'r') as sample:
dsd_string = sample.read()
course_data_provider = DataProvider(env_provider)
course_data_provider.load(dsd_string)
course_data_provider.setup()
# Insert data providers in env provider
env_provider.insert_data_provider(user_data_provider)
env_provider.insert_data_provider(course_data_provider)
# Setup akashic transpiler
transpiler = Transpiler(env_provider)
# Load Akashic rule
#--------------------
# simple_return
# time_return
# rhs_return
# rhs_create
# rhs_update
# rhs_update_pure
# rhs_delete
# test_assistance
# test_count
this_folder = dirname(__file__)
sample_path = abspath(join(this_folder, '..', 'test', 'samples', 'arules', 'test_assistance.json'))
with open(sample_path, 'r') as sample:
akashic_rule = sample.read()
transpiler.load(akashic_rule)
# Print transpiled LHS commands
print("\n----------------")
print("Transpiled Rule:")
print()
print(transpiler.tranpiled_rule)
print("\n----------------")
print("\n")
# Insert transpiled rule in env_provider
env_provider.insert_rule(transpiler.rule.rule_name, transpiler.tranpiled_rule)
##### ADD FACTS FROM THE WEB
# Read users from DS
multiple_courses = course_data_provider.read_multiple()
# Generate CLIPS facts from JSON objects
course_clips_facts = course_data_provider.generate_multiple_clips_facts(multiple_courses, 5)
# Insert CLIPS facts in env_provider
for u in course_clips_facts:
env_provider.insert_fact(u)
rule = env_provider.env.find_rule("Test_assistance")
print("DELETABLE: " + str(rule.deletable))
rule.undefine()
###### RUN CLIPS ENGINE
print("\n\n-> RUN 1\n*********************************" \
"***************************************" \
"***************************************" )
print("*********************************" \
"***************************************" \
"***************************************\n" )
# Run CLIPS engine
env_provider.run()
print("\n\nREUTRN DATA: ")
for e in env_provider.get_return_data():
print(e)
print("------------------------------")
print("\n")
print("\n")
print("RULES: ")
print("-------------------------START")
for r in env_provider.env.rules():
print(r)
print("-------------------------END")
print("\n")
print("FACTS: ")
print("-------------------------START")
for f in env_provider.env.facts():
print(f)
print("-------------------------END")
print("\n\n-> RUN 2\n*********************************" \
"***************************************" \
"***************************************" )
print("*********************************" \
"***************************************" \
"***************************************\n" )
# Run CLIPS engine
env_provider.run()
print("\n\nREUTRN DATA: ")
for e in env_provider.get_return_data():
print(e)
print("------------------------------")
print("\n")
print("\n")
print("RULES: ")
print("-------------------------START")
for r in env_provider.env.rules():
print(r)
print("-------------------------END")
print("\n")
print("FACTS: ")
print("-------------------------START")
for f in env_provider.env.facts():
print(f)
print("-------------------------END")
if __name__ == "__main__":
test_rule_transpiler() | test/main.py | from akashic.arules.transpiler import Transpiler
from akashic.ads.data_provider import DataProvider
from akashic.env_provider import EnvProvider
from akashic.bridges.data_bridge import DataBridge
from akashic.bridges.time_bridge import TimeBridge
from os.path import join, dirname, abspath
import json
def test_rule_transpiler():
# Create new env_provider
env_provider = EnvProvider()
# Setup User data provider
this_folder = dirname(__file__)
sample_path = abspath(join(this_folder, '..', 'test', 'samples', 'ads', 'user_dsd.json'))
dsd_string = None
with open(sample_path, 'r') as sample:
dsd_string = sample.read()
user_data_provider = DataProvider(env_provider)
user_data_provider.load(dsd_string)
user_data_provider.setup()
# Setup Course data provider
this_folder = dirname(__file__)
sample_path = abspath(join(this_folder, '..', 'test', 'samples', 'ads', 'course_dsd.json'))
dsd_string = None
with open(sample_path, 'r') as sample:
dsd_string = sample.read()
course_data_provider = DataProvider(env_provider)
course_data_provider.load(dsd_string)
course_data_provider.setup()
# Insert data providers in env provider
env_provider.insert_data_provider(user_data_provider)
env_provider.insert_data_provider(course_data_provider)
# Setup akashic transpiler
transpiler = Transpiler(env_provider)
# Load Akashic rule
#--------------------
# simple_return
# time_return
# rhs_return
# rhs_create
# rhs_update
# rhs_update_pure
# rhs_delete
# test_assistance
# test_count
this_folder = dirname(__file__)
sample_path = abspath(join(this_folder, '..', 'test', 'samples', 'arules', 'test_assistance.json'))
with open(sample_path, 'r') as sample:
akashic_rule = sample.read()
transpiler.load(akashic_rule)
# Print transpiled LHS commands
print("\n----------------")
print("Transpiled Rule:")
print()
print(transpiler.tranpiled_rule)
print("\n----------------")
print("\n")
# Insert transpiled rule in env_provider
env_provider.insert_rule(transpiler.rule.rule_name, transpiler.tranpiled_rule)
##### ADD FACTS FROM THE WEB
# Read users from DS
multiple_courses = course_data_provider.read_multiple()
# Generate CLIPS facts from JSON objects
course_clips_facts = course_data_provider.generate_multiple_clips_facts(multiple_courses, 5)
# Insert CLIPS facts in env_provider
for u in course_clips_facts:
env_provider.insert_fact(u)
rule = env_provider.env.find_rule("Test_assistance")
print("DELETABLE: " + str(rule.deletable))
rule.undefine()
###### RUN CLIPS ENGINE
print("\n\n-> RUN 1\n*********************************" \
"***************************************" \
"***************************************" )
print("*********************************" \
"***************************************" \
"***************************************\n" )
# Run CLIPS engine
env_provider.run()
print("\n\nREUTRN DATA: ")
for e in env_provider.get_return_data():
print(e)
print("------------------------------")
print("\n")
print("\n")
print("RULES: ")
print("-------------------------START")
for r in env_provider.env.rules():
print(r)
print("-------------------------END")
print("\n")
print("FACTS: ")
print("-------------------------START")
for f in env_provider.env.facts():
print(f)
print("-------------------------END")
print("\n\n-> RUN 2\n*********************************" \
"***************************************" \
"***************************************" )
print("*********************************" \
"***************************************" \
"***************************************\n" )
# Run CLIPS engine
env_provider.run()
print("\n\nREUTRN DATA: ")
for e in env_provider.get_return_data():
print(e)
print("------------------------------")
print("\n")
print("\n")
print("RULES: ")
print("-------------------------START")
for r in env_provider.env.rules():
print(r)
print("-------------------------END")
print("\n")
print("FACTS: ")
print("-------------------------START")
for f in env_provider.env.facts():
print(f)
print("-------------------------END")
if __name__ == "__main__":
test_rule_transpiler() | 0.41324 | 0.267214 |
from RLBench import Bench, BenchConfig
from RLBench.bench import BenchRun
from RLBench.algo import PolicyGradient
from RLBench.envs import LinearCar
from mock import Mock, MagicMock, patch
from unittest2 import TestCase
import logging
logger = logging.getLogger(__name__)
class TestBench(TestCase):
"""Bench tests."""
def test_bench_init(self):
"""Test: BENCH: initialization."""
bench = Bench()
self.assertIsInstance(bench.config, BenchConfig)
self.assertIsInstance(bench.runs, list)
bench = Bench(BenchConfig())
self.assertIsInstance(bench.config, BenchConfig)
self.assertIsInstance(bench.runs, list)
@patch('RLBench.bench.BenchRun')
def test_bench_benchmark(self, bench_run_mock):
"""Test: BENCH: benchmark invokation."""
# setup mocks
bench_run_obj_mock = Mock()
bench_conf_mock = MagicMock(spec=BenchConfig)
def create_run_obj_mock(a, b, c, d):
return bench_run_obj_mock
bench_run_mock.side_effect = create_run_obj_mock
bench_conf_mock.__iter__.return_value = [(Mock(), Mock(), {}, {})]
bench = Bench(bench_conf_mock)
bench()
bench_run_obj_mock.alg.optimize.assert_called_once_with()
class TestBenchConfig(TestCase):
"""BenchConfig tests."""
# setup test configuration
alg_config = [[
(PolicyGradient, [{}]),
(PolicyGradient, {})
], [
(PolicyGradient, {})
]]
env_config = [
(LinearCar, {'horizon': 100}),
(LinearCar, {'horizon': 200})
]
alg_config_add = [
(PolicyGradient, [{}, {}]),
]
env_config_add = [
(LinearCar, {'horizon': 100}),
(LinearCar, {'horizon': 200})
]
@staticmethod
def _check_structure(lst):
# loop through entire structure checking types.
assert(isinstance(lst, list))
for lst_elem in lst:
assert(isinstance(lst_elem, list))
for tup_elem in lst_elem:
assert(isinstance(tup_elem, tuple))
assert (tup_elem[0] is PolicyGradient
or tup_elem[0] is LinearCar)
assert(isinstance(tup_elem[1], list))
for dict_elem in tup_elem[1]:
assert(isinstance(dict_elem, dict))
def test_benchconfig_init(self):
"""Test: BENCHCONFIG: initialization structure."""
# apply test configuration
config = BenchConfig(self.alg_config, self.env_config)
# verify structure
self._check_structure(config.algs)
self._check_structure(config.envs)
def test_benchconfig_add_tests(self):
"""Test: BENCHCONFIG: add_tests."""
# setup test configuration
config = BenchConfig()
# apply test configuration
config.add_tests(self.alg_config_add, self.env_config_add)
# verify structure
self._check_structure(config.algs)
self._check_structure(config.envs)
def test_benchconfig_exceptions(self):
"""Test: BENCHCONFIG: exceptions."""
# setup bad test configurations
alg_bad_tuple = [PolicyGradient, {}]
env_bad_tuple = (LinearCar, {})
bad_tuple = [alg_bad_tuple, env_bad_tuple]
alg_bad_alg = [(Mock(), {})]
env_bad_alg = [(LinearCar, {})]
bad_alg = [alg_bad_alg, env_bad_alg]
alg_bad_env = [(PolicyGradient, {})]
env_bad_env = [(Mock, {})]
bad_env = [alg_bad_env, env_bad_env]
alg_bad_len = [(PolicyGradient, {})]
env_bad_len = []
bad_len = [alg_bad_len, env_bad_len]
tests = [bad_tuple, bad_alg, bad_env, bad_len]
# apply tests
for test in tests:
with self.subTest(test=test):
self.assertRaises(ValueError, BenchConfig, *test)
def test_benchconfig_iterator(self):
"""Test: BENCHCONFIG: Iterator."""
conf = BenchConfig(self.alg_config, self.env_config)
for alg, env, alg_conf, env_conf in conf:
assert alg is PolicyGradient
assert env is LinearCar
self.assertIsInstance(alg_conf, dict)
self.assertIsInstance(env_conf, dict)
class TestBenchRun(TestCase):
"""Test BenchRun class."""
def test_benchrun_init(self):
"""Test: BENCHRUN: initialization."""
args = [MagicMock() for i in range(4)]
attr = ['alg', 'env', 'alg_conf', 'env_conf']
run = BenchRun(*args)
for a, m in zip(attr, args):
assert getattr(run, a) is m
def test_benchrun_get_monitor(self):
"""Test: BENCHRUN: monitor getters."""
env = LinearCar()
alg = PolicyGradient(env, Mock())
run = BenchRun(alg, env, None, None)
alg_monitor = run.get_alg_monitor()
self.assertEqual(alg_monitor, alg.monitor)
env_monitor = run.get_env_monitor()
self.assertEqual(env_monitor, env.monitor) | RLBench/test/test_bench.py | from RLBench import Bench, BenchConfig
from RLBench.bench import BenchRun
from RLBench.algo import PolicyGradient
from RLBench.envs import LinearCar
from mock import Mock, MagicMock, patch
from unittest2 import TestCase
import logging
logger = logging.getLogger(__name__)
class TestBench(TestCase):
"""Bench tests."""
def test_bench_init(self):
"""Test: BENCH: initialization."""
bench = Bench()
self.assertIsInstance(bench.config, BenchConfig)
self.assertIsInstance(bench.runs, list)
bench = Bench(BenchConfig())
self.assertIsInstance(bench.config, BenchConfig)
self.assertIsInstance(bench.runs, list)
@patch('RLBench.bench.BenchRun')
def test_bench_benchmark(self, bench_run_mock):
"""Test: BENCH: benchmark invokation."""
# setup mocks
bench_run_obj_mock = Mock()
bench_conf_mock = MagicMock(spec=BenchConfig)
def create_run_obj_mock(a, b, c, d):
return bench_run_obj_mock
bench_run_mock.side_effect = create_run_obj_mock
bench_conf_mock.__iter__.return_value = [(Mock(), Mock(), {}, {})]
bench = Bench(bench_conf_mock)
bench()
bench_run_obj_mock.alg.optimize.assert_called_once_with()
class TestBenchConfig(TestCase):
"""BenchConfig tests."""
# setup test configuration
alg_config = [[
(PolicyGradient, [{}]),
(PolicyGradient, {})
], [
(PolicyGradient, {})
]]
env_config = [
(LinearCar, {'horizon': 100}),
(LinearCar, {'horizon': 200})
]
alg_config_add = [
(PolicyGradient, [{}, {}]),
]
env_config_add = [
(LinearCar, {'horizon': 100}),
(LinearCar, {'horizon': 200})
]
@staticmethod
def _check_structure(lst):
# loop through entire structure checking types.
assert(isinstance(lst, list))
for lst_elem in lst:
assert(isinstance(lst_elem, list))
for tup_elem in lst_elem:
assert(isinstance(tup_elem, tuple))
assert (tup_elem[0] is PolicyGradient
or tup_elem[0] is LinearCar)
assert(isinstance(tup_elem[1], list))
for dict_elem in tup_elem[1]:
assert(isinstance(dict_elem, dict))
def test_benchconfig_init(self):
"""Test: BENCHCONFIG: initialization structure."""
# apply test configuration
config = BenchConfig(self.alg_config, self.env_config)
# verify structure
self._check_structure(config.algs)
self._check_structure(config.envs)
def test_benchconfig_add_tests(self):
"""Test: BENCHCONFIG: add_tests."""
# setup test configuration
config = BenchConfig()
# apply test configuration
config.add_tests(self.alg_config_add, self.env_config_add)
# verify structure
self._check_structure(config.algs)
self._check_structure(config.envs)
def test_benchconfig_exceptions(self):
"""Test: BENCHCONFIG: exceptions."""
# setup bad test configurations
alg_bad_tuple = [PolicyGradient, {}]
env_bad_tuple = (LinearCar, {})
bad_tuple = [alg_bad_tuple, env_bad_tuple]
alg_bad_alg = [(Mock(), {})]
env_bad_alg = [(LinearCar, {})]
bad_alg = [alg_bad_alg, env_bad_alg]
alg_bad_env = [(PolicyGradient, {})]
env_bad_env = [(Mock, {})]
bad_env = [alg_bad_env, env_bad_env]
alg_bad_len = [(PolicyGradient, {})]
env_bad_len = []
bad_len = [alg_bad_len, env_bad_len]
tests = [bad_tuple, bad_alg, bad_env, bad_len]
# apply tests
for test in tests:
with self.subTest(test=test):
self.assertRaises(ValueError, BenchConfig, *test)
def test_benchconfig_iterator(self):
"""Test: BENCHCONFIG: Iterator."""
conf = BenchConfig(self.alg_config, self.env_config)
for alg, env, alg_conf, env_conf in conf:
assert alg is PolicyGradient
assert env is LinearCar
self.assertIsInstance(alg_conf, dict)
self.assertIsInstance(env_conf, dict)
class TestBenchRun(TestCase):
"""Test BenchRun class."""
def test_benchrun_init(self):
"""Test: BENCHRUN: initialization."""
args = [MagicMock() for i in range(4)]
attr = ['alg', 'env', 'alg_conf', 'env_conf']
run = BenchRun(*args)
for a, m in zip(attr, args):
assert getattr(run, a) is m
def test_benchrun_get_monitor(self):
"""Test: BENCHRUN: monitor getters."""
env = LinearCar()
alg = PolicyGradient(env, Mock())
run = BenchRun(alg, env, None, None)
alg_monitor = run.get_alg_monitor()
self.assertEqual(alg_monitor, alg.monitor)
env_monitor = run.get_env_monitor()
self.assertEqual(env_monitor, env.monitor) | 0.898514 | 0.453201 |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def train_test_compare(train_df, test_df):
"""
Comparing the details of train and test datasets
PARAMETERS
train_df : Training set pandas dataframe (dataframe)
test_df : Testing set pandas dataframe (dataframe)
OUTPUTS :
1. Returns a dataframe with the necessary comparisons
2. Columns - # columns, # instances,
3. Identifies the Target feature
4. Identifies missing values in each data set
5. Display summary statistics for each data set
"""
# Generating the Comparison table
data = ["Train Set", "Test Set"]
columns = [train_df.shape[1], test_df.shape[1]]
instances = [train_df.shape[0], test_df.shape[0]]
temp_dict = {
"Type_of_Data":data,
"Number_of_Columns":columns,
"Number_of_Instances":instances
}
print("The Comparison Table :")
print(pd.DataFrame(temp_dict).T)
print("------------------------------")
# Identifying the Target feature
train_cols = set(train_df.columns)
test_cols = set(test_df.columns)
print("\nPotential Target :", train_cols-test_cols)
print("------------------------------")
# Identifying Missing values
print("\nMissing Value Count in Train Set :")
print(train_df.isna().sum())
print("\nMissing Value Count in Test Set :")
print(test_df.isna().sum())
print("------------------------------")
# Displaying Summary Statistics
print("\nSummary Statistics for Train Set :")
print(train_df.describe())
print("\nSummary Statistics for Test Set :")
print(test_df.describe())
print("------------------------------")
return None
def train_test_dist(train_df, test_df, shade_train=False, shade_test=False):
"""
Comparing the Density distribution plots
of train and test sets
PARAMETERS :
train_df : Training set pandas dataframe (dataframe)
test_df : Testing set pandas dataframe (dataframe)
shade_train : Specifies if the train density plot
needs to be shaded (boolean)
shade_test : Specifies if the test density plot
needs to be shaded (boolean)
OUTPUTS :
Kinetic Density Plots
"""
# the descriptor columns (all except Target)
cols = list(set(train_df.columns).intersection(set(test_df.columns)))
for c in cols:
sns.kdeplot(train_df[c], label="Train Set", shade=shade_train)
sns.kdeplot(test_df[c], label="Test Set", shade=shade_test)
plt.title("Train vs Test Distribution for "+c)
plt.xlabel("Samples")
plt.ylabel("Probability")
plt.show()
return None | utils/data_background.py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def train_test_compare(train_df, test_df):
"""
Comparing the details of train and test datasets
PARAMETERS
train_df : Training set pandas dataframe (dataframe)
test_df : Testing set pandas dataframe (dataframe)
OUTPUTS :
1. Returns a dataframe with the necessary comparisons
2. Columns - # columns, # instances,
3. Identifies the Target feature
4. Identifies missing values in each data set
5. Display summary statistics for each data set
"""
# Generating the Comparison table
data = ["Train Set", "Test Set"]
columns = [train_df.shape[1], test_df.shape[1]]
instances = [train_df.shape[0], test_df.shape[0]]
temp_dict = {
"Type_of_Data":data,
"Number_of_Columns":columns,
"Number_of_Instances":instances
}
print("The Comparison Table :")
print(pd.DataFrame(temp_dict).T)
print("------------------------------")
# Identifying the Target feature
train_cols = set(train_df.columns)
test_cols = set(test_df.columns)
print("\nPotential Target :", train_cols-test_cols)
print("------------------------------")
# Identifying Missing values
print("\nMissing Value Count in Train Set :")
print(train_df.isna().sum())
print("\nMissing Value Count in Test Set :")
print(test_df.isna().sum())
print("------------------------------")
# Displaying Summary Statistics
print("\nSummary Statistics for Train Set :")
print(train_df.describe())
print("\nSummary Statistics for Test Set :")
print(test_df.describe())
print("------------------------------")
return None
def train_test_dist(train_df, test_df, shade_train=False, shade_test=False):
"""
Comparing the Density distribution plots
of train and test sets
PARAMETERS :
train_df : Training set pandas dataframe (dataframe)
test_df : Testing set pandas dataframe (dataframe)
shade_train : Specifies if the train density plot
needs to be shaded (boolean)
shade_test : Specifies if the test density plot
needs to be shaded (boolean)
OUTPUTS :
Kinetic Density Plots
"""
# the descriptor columns (all except Target)
cols = list(set(train_df.columns).intersection(set(test_df.columns)))
for c in cols:
sns.kdeplot(train_df[c], label="Train Set", shade=shade_train)
sns.kdeplot(test_df[c], label="Test Set", shade=shade_test)
plt.title("Train vs Test Distribution for "+c)
plt.xlabel("Samples")
plt.ylabel("Probability")
plt.show()
return None | 0.602763 | 0.700312 |
import json
from datatypes import Metrics
def loadMetrics(metricsFilename):
metrics = {}
try:
with open(metricsFilename, 'r') as f:
js = json.load(f)
# expecting dict of prj name to sub-dict
for prj_name, prj_dict in js.items():
prj_metrics = {}
# expecting prj_dict to be dict of sp name to metrics dict
for sp_name, sp_metrics_dict in prj_dict.items():
sp_metrics = Metrics()
sp_metrics._prj_name = prj_name
sp_metrics._sp_name = sp_name
sp_metrics._state_category = sp_metrics_dict.get("state-category", "unknown")
sp_metrics._unpacked_files = sp_metrics_dict.get("unpacked-files", 0)
sp_metrics._num_repos = sp_metrics_dict.get("num-repos", 0)
sp_metrics._instances_veryhigh = sp_metrics_dict.get("instances-veryhigh", 0)
sp_metrics._instances_high = sp_metrics_dict.get("instances-high", 0)
sp_metrics._instances_medium = sp_metrics_dict.get("instances-medium", 0)
sp_metrics._instances_low = sp_metrics_dict.get("instances-low", 0)
sp_metrics._files_veryhigh = sp_metrics_dict.get("files-veryhigh", 0)
sp_metrics._files_high = sp_metrics_dict.get("files-high", 0)
sp_metrics._files_medium = sp_metrics_dict.get("files-medium", 0)
sp_metrics._files_low = sp_metrics_dict.get("files-low", 0)
# validate state category
if sp_metrics._state_category not in ["unknown", "inproc", "analyzed", "uploaded", "delivered", "stopped"]:
sp_metrics._state_category = "unknown"
prj_metrics[sp_name] = sp_metrics
metrics[prj_name] = prj_metrics
return metrics
except json.decoder.JSONDecodeError as e:
print(f'Error loading or parsing {metricsFilename}: {str(e)}')
return {}
class MetricsJSONEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, Metrics):
return {
"state-category": o._state_category,
"unpacked-files": o._unpacked_files,
"num-repos": o._num_repos,
"instances-veryhigh": o._instances_veryhigh,
"instances-high": o._instances_high,
"instances-medium": o._instances_medium,
"instances-low": o._instances_low,
"files-veryhigh": o._files_veryhigh,
"files-high": o._files_high,
"files-medium": o._files_medium,
"files-low": o._files_low,
}
else:
return {'__{}__'.format(o.__class__.__name__): o.__dict__}
def saveMetrics(metricsFilename, metrics):
with open(metricsFilename, "w") as f:
json.dump(metrics, f, indent=4, cls=MetricsJSONEncoder) | metricsfile.py |
import json
from datatypes import Metrics
def loadMetrics(metricsFilename):
metrics = {}
try:
with open(metricsFilename, 'r') as f:
js = json.load(f)
# expecting dict of prj name to sub-dict
for prj_name, prj_dict in js.items():
prj_metrics = {}
# expecting prj_dict to be dict of sp name to metrics dict
for sp_name, sp_metrics_dict in prj_dict.items():
sp_metrics = Metrics()
sp_metrics._prj_name = prj_name
sp_metrics._sp_name = sp_name
sp_metrics._state_category = sp_metrics_dict.get("state-category", "unknown")
sp_metrics._unpacked_files = sp_metrics_dict.get("unpacked-files", 0)
sp_metrics._num_repos = sp_metrics_dict.get("num-repos", 0)
sp_metrics._instances_veryhigh = sp_metrics_dict.get("instances-veryhigh", 0)
sp_metrics._instances_high = sp_metrics_dict.get("instances-high", 0)
sp_metrics._instances_medium = sp_metrics_dict.get("instances-medium", 0)
sp_metrics._instances_low = sp_metrics_dict.get("instances-low", 0)
sp_metrics._files_veryhigh = sp_metrics_dict.get("files-veryhigh", 0)
sp_metrics._files_high = sp_metrics_dict.get("files-high", 0)
sp_metrics._files_medium = sp_metrics_dict.get("files-medium", 0)
sp_metrics._files_low = sp_metrics_dict.get("files-low", 0)
# validate state category
if sp_metrics._state_category not in ["unknown", "inproc", "analyzed", "uploaded", "delivered", "stopped"]:
sp_metrics._state_category = "unknown"
prj_metrics[sp_name] = sp_metrics
metrics[prj_name] = prj_metrics
return metrics
except json.decoder.JSONDecodeError as e:
print(f'Error loading or parsing {metricsFilename}: {str(e)}')
return {}
class MetricsJSONEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, Metrics):
return {
"state-category": o._state_category,
"unpacked-files": o._unpacked_files,
"num-repos": o._num_repos,
"instances-veryhigh": o._instances_veryhigh,
"instances-high": o._instances_high,
"instances-medium": o._instances_medium,
"instances-low": o._instances_low,
"files-veryhigh": o._files_veryhigh,
"files-high": o._files_high,
"files-medium": o._files_medium,
"files-low": o._files_low,
}
else:
return {'__{}__'.format(o.__class__.__name__): o.__dict__}
def saveMetrics(metricsFilename, metrics):
with open(metricsFilename, "w") as f:
json.dump(metrics, f, indent=4, cls=MetricsJSONEncoder) | 0.386069 | 0.168925 |
from sys import stdin, stdout
from copy import deepcopy
def extendAtlas(atlas):
global showAtlas
innerAtlas = deepcopy(atlas)
incrementLine = (lambda line: list(map((lambda number: number+1 if number < 9 else 1), line)))
incrementAtlas = (lambda atlas: list(map(incrementLine, atlas)))
for i in range(len(innerAtlas)):
line = innerAtlas[i]
for counter in [0]*4:
line = incrementLine(line)
for number in line:
innerAtlas[i].append(number)
incrementedAtlas = incrementAtlas(innerAtlas)
for counter in [0]*4:
for i in incrementedAtlas:
innerAtlas.append(i)
incrementedAtlas = incrementAtlas(incrementedAtlas)
return innerAtlas
def main():
global showAtlas
stdout.write("Please enter input filename: \n")
inputFileName = (str(stdin.readline())).replace("\n", "")
inputText = (open(inputFileName, "r").read()).split("\n")
showAtlas = lambda atlas: [print(line) for line in atlas]
atlas = extendAtlas(list(map((lambda line: list(map(int, line))), inputText)))
dijkstra = list(map((lambda line: list(map((lambda char: 1000), line))), atlas))
oldDijkstra = deepcopy(dijkstra)
dijkstra[-1][-1] = atlas[-1][-1]
while oldDijkstra != dijkstra:
#print("iter")
oldDijkstra = deepcopy(dijkstra)
for i in range(len(atlas)-1, -1, -1):
for j in range(len(atlas[0])-1, -1, -1):
if i == len(atlas)-1 and j == len(atlas[0])-1:
continue
if i < len(atlas)-1 and j < len(atlas[0])-1 and i > 0 and j > 0:
dijkstra[i][j] = min([dijkstra[i+1][j], dijkstra[i][j+1], dijkstra[i-1][j], dijkstra[i][j-1]]) + atlas[i][j]
elif i == len(atlas)-1:
if j == 0:
dijkstra[i][j] = min([dijkstra[i][j+1], dijkstra[i-1][j]]) + atlas[i][j]
else:
dijkstra[i][j] = min([dijkstra[i][j-1], dijkstra[i][j+1], dijkstra[i-1][j]]) + atlas[i][j]
elif j == len(atlas[0])-1:
if i == 0:
dijkstra[i][j] = min([dijkstra[i+1][j], dijkstra[i][j-1]]) + atlas[i][j]
else:
dijkstra[i][j] = min([dijkstra[i-1][j], dijkstra[i+1][j], dijkstra[i][j-1]]) + atlas[i][j]
elif i == 0:
if j == 0:
dijkstra[i][j] = min([dijkstra[i+1][j], dijkstra[i][j+1]]) + atlas[i][j]
else:
dijkstra[i][j] = min([dijkstra[i][j-1], dijkstra[i+1][j], dijkstra[i][j+1]]) + atlas[i][j]
elif j == 0:
dijkstra[i][j] = min([dijkstra[i-1][j], dijkstra[i][j+1], dijkstra[i+1][j]]) + atlas[i][j]
#showAtlas(dijkstra)
stdout.write(f"{dijkstra[0][0] - atlas[0][0]}\n")
if __name__ == "__main__":
main() | 2021/day15/part2/main.py | from sys import stdin, stdout
from copy import deepcopy
def extendAtlas(atlas):
global showAtlas
innerAtlas = deepcopy(atlas)
incrementLine = (lambda line: list(map((lambda number: number+1 if number < 9 else 1), line)))
incrementAtlas = (lambda atlas: list(map(incrementLine, atlas)))
for i in range(len(innerAtlas)):
line = innerAtlas[i]
for counter in [0]*4:
line = incrementLine(line)
for number in line:
innerAtlas[i].append(number)
incrementedAtlas = incrementAtlas(innerAtlas)
for counter in [0]*4:
for i in incrementedAtlas:
innerAtlas.append(i)
incrementedAtlas = incrementAtlas(incrementedAtlas)
return innerAtlas
def main():
global showAtlas
stdout.write("Please enter input filename: \n")
inputFileName = (str(stdin.readline())).replace("\n", "")
inputText = (open(inputFileName, "r").read()).split("\n")
showAtlas = lambda atlas: [print(line) for line in atlas]
atlas = extendAtlas(list(map((lambda line: list(map(int, line))), inputText)))
dijkstra = list(map((lambda line: list(map((lambda char: 1000), line))), atlas))
oldDijkstra = deepcopy(dijkstra)
dijkstra[-1][-1] = atlas[-1][-1]
while oldDijkstra != dijkstra:
#print("iter")
oldDijkstra = deepcopy(dijkstra)
for i in range(len(atlas)-1, -1, -1):
for j in range(len(atlas[0])-1, -1, -1):
if i == len(atlas)-1 and j == len(atlas[0])-1:
continue
if i < len(atlas)-1 and j < len(atlas[0])-1 and i > 0 and j > 0:
dijkstra[i][j] = min([dijkstra[i+1][j], dijkstra[i][j+1], dijkstra[i-1][j], dijkstra[i][j-1]]) + atlas[i][j]
elif i == len(atlas)-1:
if j == 0:
dijkstra[i][j] = min([dijkstra[i][j+1], dijkstra[i-1][j]]) + atlas[i][j]
else:
dijkstra[i][j] = min([dijkstra[i][j-1], dijkstra[i][j+1], dijkstra[i-1][j]]) + atlas[i][j]
elif j == len(atlas[0])-1:
if i == 0:
dijkstra[i][j] = min([dijkstra[i+1][j], dijkstra[i][j-1]]) + atlas[i][j]
else:
dijkstra[i][j] = min([dijkstra[i-1][j], dijkstra[i+1][j], dijkstra[i][j-1]]) + atlas[i][j]
elif i == 0:
if j == 0:
dijkstra[i][j] = min([dijkstra[i+1][j], dijkstra[i][j+1]]) + atlas[i][j]
else:
dijkstra[i][j] = min([dijkstra[i][j-1], dijkstra[i+1][j], dijkstra[i][j+1]]) + atlas[i][j]
elif j == 0:
dijkstra[i][j] = min([dijkstra[i-1][j], dijkstra[i][j+1], dijkstra[i+1][j]]) + atlas[i][j]
#showAtlas(dijkstra)
stdout.write(f"{dijkstra[0][0] - atlas[0][0]}\n")
if __name__ == "__main__":
main() | 0.06101 | 0.409634 |
import utilAlgorithm
from numpy import *
from logger import logger
from utilfile import *
from utilconfigration import cfg
class utilAlg_Mean(utilAlgorithm.utilAlgorithm):
def __init__(self):
print('utilAlg_Mean __init__', self.__class__.__name__)
def trainData(self, trainX, trainY, train_attri_dict, crxvalX, crxvalY):
logger.info("%s trainData", self.__class__.__name__)
trainCarSell = {}
num_of_car_month = 0
for idx in range(shape(trainX)[0]):
cartype = int(trainX[idx, train_attri_dict[CLASS_ID]])
month = int(trainX[idx, train_attri_dict[SALE_DATE]])
if cartype in trainCarSell:
if month in trainCarSell[cartype]:
trainCarSell[cartype][month] += trainY[idx][0]
else:
trainCarSell[cartype][month] = trainY[idx][0]
num_of_car_month += 1
else:
trainCarSell[cartype] = {}
trainCarSell[cartype][month] = trainY[idx][0]
num_of_car_month += 1
conditionX = zeros((num_of_car_month, 2));sellcountY = zeros((num_of_car_month, 1))
trainW = {}
totalsell = 0
totalcarmonthcnt = 0
totalcartype_num = 0
idx_of_car_month = 0
for cartype, selldict in trainCarSell.items():
monthnum = 0
sellsum = 0
for month, monthsell in selldict.items():
monthnum += 1
sellsum += monthsell
if 1 == cfg.getint("mean_method", "genfile"):
conditionX[idx_of_car_month][PREDICT_IDX_CLASS_ID] = cartype
conditionX[idx_of_car_month][PREDICT_IDX_DATE] = month
sellcountY[idx_of_car_month][0] = monthsell
idx_of_car_month += 1
trainW[cartype] = sellsum/monthnum
totalsell = totalsell + sellsum
totalcarmonthcnt = totalcarmonthcnt + monthnum
totalcartype_num += 1
trainW[0] = totalsell/totalcarmonthcnt
if 1 == cfg.getint("mean_method", "genfile"):
output_file_path = cfg.get("mean_method", "outputfile")
utilf = utilfile("", "", output_file_path,"")
callabels = [SALE_DATE, CLASS_ID, SALE_QUANTITY]
utilf.writePredictData(conditionX, callabels, sellcountY)
logger.info("total car type num is %d, total car*month is %d" % (totalcartype_num, num_of_car_month))
return trainW
def predictData(self, trainW, predictX):
logger.info("%s predictData", self.__class__.__name__)
predictY = ones((shape(predictX)[0], 1))*trainW[0]
for idx in range(shape(predictX)[0]):
if predictX[idx, PREDICT_IDX_CLASS_ID] in trainW:
car_idx = predictX[idx, PREDICT_IDX_CLASS_ID]
predictY[idx, 0] = round(float(trainW[car_idx][0][0]))
else:
predictY[idx, 0] = round(trainW[0][0][0])
logger.info('car type %d is not in trainW, using mean data instead',predictX[idx][train_attri_dict[CLASS_ID]])
return predictY | Algorithm/MachineLearning/TianChi/CarSellPredict/src/utilAlg_Mean.py | import utilAlgorithm
from numpy import *
from logger import logger
from utilfile import *
from utilconfigration import cfg
class utilAlg_Mean(utilAlgorithm.utilAlgorithm):
def __init__(self):
print('utilAlg_Mean __init__', self.__class__.__name__)
def trainData(self, trainX, trainY, train_attri_dict, crxvalX, crxvalY):
logger.info("%s trainData", self.__class__.__name__)
trainCarSell = {}
num_of_car_month = 0
for idx in range(shape(trainX)[0]):
cartype = int(trainX[idx, train_attri_dict[CLASS_ID]])
month = int(trainX[idx, train_attri_dict[SALE_DATE]])
if cartype in trainCarSell:
if month in trainCarSell[cartype]:
trainCarSell[cartype][month] += trainY[idx][0]
else:
trainCarSell[cartype][month] = trainY[idx][0]
num_of_car_month += 1
else:
trainCarSell[cartype] = {}
trainCarSell[cartype][month] = trainY[idx][0]
num_of_car_month += 1
conditionX = zeros((num_of_car_month, 2));sellcountY = zeros((num_of_car_month, 1))
trainW = {}
totalsell = 0
totalcarmonthcnt = 0
totalcartype_num = 0
idx_of_car_month = 0
for cartype, selldict in trainCarSell.items():
monthnum = 0
sellsum = 0
for month, monthsell in selldict.items():
monthnum += 1
sellsum += monthsell
if 1 == cfg.getint("mean_method", "genfile"):
conditionX[idx_of_car_month][PREDICT_IDX_CLASS_ID] = cartype
conditionX[idx_of_car_month][PREDICT_IDX_DATE] = month
sellcountY[idx_of_car_month][0] = monthsell
idx_of_car_month += 1
trainW[cartype] = sellsum/monthnum
totalsell = totalsell + sellsum
totalcarmonthcnt = totalcarmonthcnt + monthnum
totalcartype_num += 1
trainW[0] = totalsell/totalcarmonthcnt
if 1 == cfg.getint("mean_method", "genfile"):
output_file_path = cfg.get("mean_method", "outputfile")
utilf = utilfile("", "", output_file_path,"")
callabels = [SALE_DATE, CLASS_ID, SALE_QUANTITY]
utilf.writePredictData(conditionX, callabels, sellcountY)
logger.info("total car type num is %d, total car*month is %d" % (totalcartype_num, num_of_car_month))
return trainW
def predictData(self, trainW, predictX):
logger.info("%s predictData", self.__class__.__name__)
predictY = ones((shape(predictX)[0], 1))*trainW[0]
for idx in range(shape(predictX)[0]):
if predictX[idx, PREDICT_IDX_CLASS_ID] in trainW:
car_idx = predictX[idx, PREDICT_IDX_CLASS_ID]
predictY[idx, 0] = round(float(trainW[car_idx][0][0]))
else:
predictY[idx, 0] = round(trainW[0][0][0])
logger.info('car type %d is not in trainW, using mean data instead',predictX[idx][train_attri_dict[CLASS_ID]])
return predictY | 0.250179 | 0.188175 |
import tensorflow as tf
import numpy as np
class VGG19:
def __init__(self,VGG19_Model_Path = None):
self.wDict = np.load(VGG19_Model_Path, encoding="bytes").item()
def build(self,picture):
self.conv1_1 = tf.nn.conv2d(
input=picture,
filter=self.wDict['conv1_1'][0],
strides=[1,1,1,1],
padding='SAME',
name='conv1_1'
)
self.relu1_1 = tf.nn.relu(tf.nn.bias_add(self.conv1_1,self.wDict['conv1_1'][1]))
self.conv1_2 = tf.nn.conv2d(
input=self.relu1_1,
filter=self.wDict['conv1_2'][0],
strides=[1,1,1,1],
padding='SAME',
name='conv1_1'
)
self.relu1_2 = tf.nn.relu(tf.nn.bias_add(self.conv1_2, self.wDict['conv1_2'][1]))
self.pool1 = tf.layers.max_pooling2d(
inputs=self.relu1_2,
pool_size=2,
strides=2,
name='pool1'
)
# block 2
self.conv2_1 = tf.nn.conv2d(
input=self.pool1,
filter=self.wDict['conv2_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv2_1'
)
self.relu2_1 = tf.nn.relu(tf.nn.bias_add(self.conv2_1, self.wDict['conv2_1'][1]))
self.conv2_2 = tf.nn.conv2d(
input=self.relu2_1,
filter=self.wDict['conv2_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv2_2'
)
self.relu2_2 = tf.nn.relu(tf.nn.bias_add(self.conv2_2, self.wDict['conv2_2'][1]))
self.pool2 = tf.layers.max_pooling2d(
inputs=self.relu2_2,
pool_size=2,
strides=2,
name='pool2'
)
# block 3
self.conv3_1 = tf.nn.conv2d(
input=self.pool2,
filter=self.wDict['conv3_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_1'
)
self.relu3_1 = tf.nn.relu(tf.nn.bias_add(self.conv3_1, self.wDict['conv3_1'][1]))
self.conv3_2 = tf.nn.conv2d(
input=self.relu3_1,
filter=self.wDict['conv3_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_2'
)
self.relu3_2 = tf.nn.relu(tf.nn.bias_add(self.conv3_2, self.wDict['conv3_2'][1]))
self.conv3_3 = tf.nn.conv2d(
input=self.relu3_2,
filter=self.wDict['conv3_3'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_3'
)
self.relu3_3 = tf.nn.relu(tf.nn.bias_add(self.conv3_3, self.wDict['conv3_3'][1]))
self.conv3_4 = tf.nn.conv2d(
input=self.relu3_3,
filter=self.wDict['conv3_4'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_4'
)
self.relu3_4 = tf.nn.relu(tf.nn.bias_add(self.conv3_4, self.wDict['conv3_4'][1]))
self.pool3 = tf.layers.max_pooling2d(
inputs=self.relu3_4,
pool_size=2,
strides=2,
name='pool3'
)
# block 4
self.conv4_1 = tf.nn.conv2d(
input=self.pool3,
filter=self.wDict['conv4_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_1'
)
self.relu4_1 = tf.nn.relu(tf.nn.bias_add(self.conv4_1, self.wDict['conv4_1'][1]))
self.conv4_2 = tf.nn.conv2d(
input=self.relu4_1,
filter=self.wDict['conv4_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_2'
)
self.relu4_2 = tf.nn.relu(tf.nn.bias_add(self.conv4_2, self.wDict['conv4_2'][1]))
self.conv4_3 = tf.nn.conv2d(
input=self.relu4_2,
filter=self.wDict['conv4_3'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_3'
)
self.relu4_3 = tf.nn.relu(tf.nn.bias_add(self.conv4_3, self.wDict['conv4_3'][1]))
self.conv4_4 = tf.nn.conv2d(
input=self.relu4_3,
filter=self.wDict['conv4_4'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_4'
)
self.relu4_4 = tf.nn.relu(tf.nn.bias_add(self.conv4_4, self.wDict['conv4_4'][1]))
self.pool4 = tf.layers.max_pooling2d(
inputs=self.relu4_4,
pool_size=2,
strides=2,
name='pool4'
)
# block 5
self.conv5_1 = tf.nn.conv2d(
input=self.pool4,
filter=self.wDict['conv5_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_1'
)
self.relu5_1 = tf.nn.relu(tf.nn.bias_add(self.conv5_1, self.wDict['conv5_1'][1]))
self.conv5_2 = tf.nn.conv2d(
input=self.relu5_1,
filter=self.wDict['conv5_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_2'
)
self.relu5_2 = tf.nn.relu(tf.nn.bias_add(self.conv5_2, self.wDict['conv5_2'][1]))
self.conv5_3 = tf.nn.conv2d(
input=self.relu5_2,
filter=self.wDict['conv5_3'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_3'
)
self.relu5_3 = tf.nn.relu(tf.nn.bias_add(self.conv5_3, self.wDict['conv5_3'][1]))
self.conv5_4 = tf.nn.conv2d(
input=self.relu5_3,
filter=self.wDict['conv5_4'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_4'
)
self.relu5_4 = tf.nn.relu(tf.nn.bias_add(self.conv5_4, self.wDict['conv5_4'][1]))
self.pool5 = tf.layers.max_pooling2d(
inputs=self.relu5_4,
pool_size=2,
strides=2,
name='pool5'
)
# self.fc_in = tf.layers.flatten(self.pool5)
# self.fc6 = tf.layers.dense(
# inputs=self.fc_in,
# units=4096,
# activation=tf.nn.relu,
# name='fc6'
# )
# self.dropout1 = tf.layers.dropout(self.fc6,rate=0.5)
# self.fc7 = tf.layers.dense(
# inputs=self.dropout1,
# units=4096,
# activation=tf.nn.relu,
# name='fc7'
# )
# self.dropout2 = tf.layers.dropout(self.fc7, rate=0.5)
# self.fc8 = tf.layers.dense(
# inputs=self.dropout2,
# units=1000,
# activation=tf.nn.relu,
# name='fc8'
# ) | models/vgg19_tf.py | import tensorflow as tf
import numpy as np
class VGG19:
def __init__(self,VGG19_Model_Path = None):
self.wDict = np.load(VGG19_Model_Path, encoding="bytes").item()
def build(self,picture):
self.conv1_1 = tf.nn.conv2d(
input=picture,
filter=self.wDict['conv1_1'][0],
strides=[1,1,1,1],
padding='SAME',
name='conv1_1'
)
self.relu1_1 = tf.nn.relu(tf.nn.bias_add(self.conv1_1,self.wDict['conv1_1'][1]))
self.conv1_2 = tf.nn.conv2d(
input=self.relu1_1,
filter=self.wDict['conv1_2'][0],
strides=[1,1,1,1],
padding='SAME',
name='conv1_1'
)
self.relu1_2 = tf.nn.relu(tf.nn.bias_add(self.conv1_2, self.wDict['conv1_2'][1]))
self.pool1 = tf.layers.max_pooling2d(
inputs=self.relu1_2,
pool_size=2,
strides=2,
name='pool1'
)
# block 2
self.conv2_1 = tf.nn.conv2d(
input=self.pool1,
filter=self.wDict['conv2_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv2_1'
)
self.relu2_1 = tf.nn.relu(tf.nn.bias_add(self.conv2_1, self.wDict['conv2_1'][1]))
self.conv2_2 = tf.nn.conv2d(
input=self.relu2_1,
filter=self.wDict['conv2_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv2_2'
)
self.relu2_2 = tf.nn.relu(tf.nn.bias_add(self.conv2_2, self.wDict['conv2_2'][1]))
self.pool2 = tf.layers.max_pooling2d(
inputs=self.relu2_2,
pool_size=2,
strides=2,
name='pool2'
)
# block 3
self.conv3_1 = tf.nn.conv2d(
input=self.pool2,
filter=self.wDict['conv3_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_1'
)
self.relu3_1 = tf.nn.relu(tf.nn.bias_add(self.conv3_1, self.wDict['conv3_1'][1]))
self.conv3_2 = tf.nn.conv2d(
input=self.relu3_1,
filter=self.wDict['conv3_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_2'
)
self.relu3_2 = tf.nn.relu(tf.nn.bias_add(self.conv3_2, self.wDict['conv3_2'][1]))
self.conv3_3 = tf.nn.conv2d(
input=self.relu3_2,
filter=self.wDict['conv3_3'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_3'
)
self.relu3_3 = tf.nn.relu(tf.nn.bias_add(self.conv3_3, self.wDict['conv3_3'][1]))
self.conv3_4 = tf.nn.conv2d(
input=self.relu3_3,
filter=self.wDict['conv3_4'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv3_4'
)
self.relu3_4 = tf.nn.relu(tf.nn.bias_add(self.conv3_4, self.wDict['conv3_4'][1]))
self.pool3 = tf.layers.max_pooling2d(
inputs=self.relu3_4,
pool_size=2,
strides=2,
name='pool3'
)
# block 4
self.conv4_1 = tf.nn.conv2d(
input=self.pool3,
filter=self.wDict['conv4_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_1'
)
self.relu4_1 = tf.nn.relu(tf.nn.bias_add(self.conv4_1, self.wDict['conv4_1'][1]))
self.conv4_2 = tf.nn.conv2d(
input=self.relu4_1,
filter=self.wDict['conv4_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_2'
)
self.relu4_2 = tf.nn.relu(tf.nn.bias_add(self.conv4_2, self.wDict['conv4_2'][1]))
self.conv4_3 = tf.nn.conv2d(
input=self.relu4_2,
filter=self.wDict['conv4_3'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_3'
)
self.relu4_3 = tf.nn.relu(tf.nn.bias_add(self.conv4_3, self.wDict['conv4_3'][1]))
self.conv4_4 = tf.nn.conv2d(
input=self.relu4_3,
filter=self.wDict['conv4_4'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv4_4'
)
self.relu4_4 = tf.nn.relu(tf.nn.bias_add(self.conv4_4, self.wDict['conv4_4'][1]))
self.pool4 = tf.layers.max_pooling2d(
inputs=self.relu4_4,
pool_size=2,
strides=2,
name='pool4'
)
# block 5
self.conv5_1 = tf.nn.conv2d(
input=self.pool4,
filter=self.wDict['conv5_1'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_1'
)
self.relu5_1 = tf.nn.relu(tf.nn.bias_add(self.conv5_1, self.wDict['conv5_1'][1]))
self.conv5_2 = tf.nn.conv2d(
input=self.relu5_1,
filter=self.wDict['conv5_2'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_2'
)
self.relu5_2 = tf.nn.relu(tf.nn.bias_add(self.conv5_2, self.wDict['conv5_2'][1]))
self.conv5_3 = tf.nn.conv2d(
input=self.relu5_2,
filter=self.wDict['conv5_3'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_3'
)
self.relu5_3 = tf.nn.relu(tf.nn.bias_add(self.conv5_3, self.wDict['conv5_3'][1]))
self.conv5_4 = tf.nn.conv2d(
input=self.relu5_3,
filter=self.wDict['conv5_4'][0],
strides=[1, 1, 1, 1],
padding='SAME',
name='conv5_4'
)
self.relu5_4 = tf.nn.relu(tf.nn.bias_add(self.conv5_4, self.wDict['conv5_4'][1]))
self.pool5 = tf.layers.max_pooling2d(
inputs=self.relu5_4,
pool_size=2,
strides=2,
name='pool5'
)
# self.fc_in = tf.layers.flatten(self.pool5)
# self.fc6 = tf.layers.dense(
# inputs=self.fc_in,
# units=4096,
# activation=tf.nn.relu,
# name='fc6'
# )
# self.dropout1 = tf.layers.dropout(self.fc6,rate=0.5)
# self.fc7 = tf.layers.dense(
# inputs=self.dropout1,
# units=4096,
# activation=tf.nn.relu,
# name='fc7'
# )
# self.dropout2 = tf.layers.dropout(self.fc7, rate=0.5)
# self.fc8 = tf.layers.dense(
# inputs=self.dropout2,
# units=1000,
# activation=tf.nn.relu,
# name='fc8'
# ) | 0.549761 | 0.322673 |
import subprocess
import time
import datetime
import os
import threading
import pandas as pd
'''
shop_code = 'kkakka001'
acc = 'qtumai'
passwd = '<PASSWORD>'
ip = '192.168.0.59'
port = '554'
ch = 'stream_ch00_0'
add = 'rtsp://' + acc + ':' + passwd + '@' + ip + ':' + port + '/' + ch
save_path = './save_video/test.avi'
time = datetime.datetime.now()
file_name = '_' + shop_code + '_' + ch
cmd = 'ffmpeg.exe -y -i "' + add + '" -t 1800 -an "' + save_path +'"'
subprocess.check_output(cmd)
'''
class video_recoding(threading.Thread):
def __init__(self, shop_code, acc, pw, ip, port, ch, open_t, close_t):
threading.Thread.__init__(self)
self.shop_code = shop_code
self.acc = acc
self.pw = pw
self.ip = ip
self.port = str(port)
self.ch = str(ch)
self.open_t = open_t
self.close_t = close_t
def get_rtsp_addr(self):
#rtsp://admin:qtumai123456@192.168.1.111:554/cam/realmonitor?channel=1
add = "rtsp://" + self.acc + ":" + self.pw + "@" + self.ip + ":" + self.port + "/\"" + self.ch + "\""
#add = 'rtsp://' + self.acc + ':' + self.pw + '@' + self.ip + '/' + self.ch
return add
def get_save_path(self):
save_path = os.path.join('/home/pi/workspace/stream', 'save_video')
#save_path = './save_video'
if not os.path.exists(save_path):
os.mkdir(save_path)
return save_path
def get_file_name(self):
file_name = ''
ntime = datetime.datetime.now()
file_name = ntime.strftime('%Y%m%d%H%M%S%f')
#file_name = file_name + '_' + self.shop_code + '_' + self.ch + '.avi'
file_name = file_name + '_' + self.shop_code + '.avi'
return file_name
def working_hours(self):
now_t = datetime.datetime.now()
open_t = datetime.datetime(now_t.year, now_t.month, now_t.day, self.open_t[0], self.open_t[1], 0)
close_t = datetime.datetime(now_t.year, now_t.month, now_t.day, self.close_t[0], self.close_t[1], 0)
self.shutdown = close_t.strftime('%H%M')
if open_t < now_t:
if close_t > now_t:
return True
else:
return False
def recode(self):
add = self.get_rtsp_addr()
save_path = self.get_save_path()
while True:
if self.working_hours() == True:
file_name = self.get_file_name()
#rtsp://admin:qtumai123456@192.168.1.111:554/cam/realmonitor?channel=1: Invalid data found when processing input
#ffmpeg -y -hide_banner -rtsp_transport tcp -i rtsp://admin:qtumai123456@192.168.1.110:554/cam/realmonitor?channel=1&subtype=0 -t 1800 -an /home/pi/workspace/stream/save_video/20210313123108032940_JJIN-ch1_cam/realmonitor?channel=1&subtype=0.avi
cmd ="ffmpeg -y -r 30 -stimeout 10000000 -hide_banner -rtsp_transport tcp -i %s -vcodec copy -t 1800 -an %s/%s" %(add, save_path, file_name)
print(cmd)
subprocess.check_output(cmd, shell=True, universal_newlines=True)
else:
print('Not working time')
time.sleep(60)
def run(self):
while True:
try:
self.recode()
except:
pass
if __name__ == '__main__':
def get_dvr_info(idx):
df = pd.read_csv('/home/pi/workspace/stream/config.txt')
#df = pd.read_csv('./config.txt')
shop_code = df.loc[idx, 'shop_code']
acc = df.loc[idx, 'acc']
pw = df.loc[idx, 'pw']
ip = df.loc[idx, 'dvr_ip']
port = df.loc[idx, 'port']
ch = df.loc[idx, 'dvr_ch']
return shop_code, acc, pw, ip, port, ch
config = pd.read_csv('/home/pi/workspace/stream/config.txt')
#config = pd.read_csv('./config.txt')
open_t = (0,0)
close_t = (23,59)
for i in config.index:
shop_code, acc, pw, ip, port, ch = get_dvr_info(i)
main = video_recoding(shop_code, acc, pw, ip, port, ch, open_t, close_t)
main.start()
time.sleep(0.01) | B2C/video_recoding.py | import subprocess
import time
import datetime
import os
import threading
import pandas as pd
'''
shop_code = 'kkakka001'
acc = 'qtumai'
passwd = '<PASSWORD>'
ip = '192.168.0.59'
port = '554'
ch = 'stream_ch00_0'
add = 'rtsp://' + acc + ':' + passwd + '@' + ip + ':' + port + '/' + ch
save_path = './save_video/test.avi'
time = datetime.datetime.now()
file_name = '_' + shop_code + '_' + ch
cmd = 'ffmpeg.exe -y -i "' + add + '" -t 1800 -an "' + save_path +'"'
subprocess.check_output(cmd)
'''
class video_recoding(threading.Thread):
def __init__(self, shop_code, acc, pw, ip, port, ch, open_t, close_t):
threading.Thread.__init__(self)
self.shop_code = shop_code
self.acc = acc
self.pw = pw
self.ip = ip
self.port = str(port)
self.ch = str(ch)
self.open_t = open_t
self.close_t = close_t
def get_rtsp_addr(self):
#rtsp://admin:qtumai123456@192.168.1.111:554/cam/realmonitor?channel=1
add = "rtsp://" + self.acc + ":" + self.pw + "@" + self.ip + ":" + self.port + "/\"" + self.ch + "\""
#add = 'rtsp://' + self.acc + ':' + self.pw + '@' + self.ip + '/' + self.ch
return add
def get_save_path(self):
save_path = os.path.join('/home/pi/workspace/stream', 'save_video')
#save_path = './save_video'
if not os.path.exists(save_path):
os.mkdir(save_path)
return save_path
def get_file_name(self):
file_name = ''
ntime = datetime.datetime.now()
file_name = ntime.strftime('%Y%m%d%H%M%S%f')
#file_name = file_name + '_' + self.shop_code + '_' + self.ch + '.avi'
file_name = file_name + '_' + self.shop_code + '.avi'
return file_name
def working_hours(self):
now_t = datetime.datetime.now()
open_t = datetime.datetime(now_t.year, now_t.month, now_t.day, self.open_t[0], self.open_t[1], 0)
close_t = datetime.datetime(now_t.year, now_t.month, now_t.day, self.close_t[0], self.close_t[1], 0)
self.shutdown = close_t.strftime('%H%M')
if open_t < now_t:
if close_t > now_t:
return True
else:
return False
def recode(self):
add = self.get_rtsp_addr()
save_path = self.get_save_path()
while True:
if self.working_hours() == True:
file_name = self.get_file_name()
#rtsp://admin:qtumai123456@192.168.1.111:554/cam/realmonitor?channel=1: Invalid data found when processing input
#ffmpeg -y -hide_banner -rtsp_transport tcp -i rtsp://admin:qtumai123456@192.168.1.110:554/cam/realmonitor?channel=1&subtype=0 -t 1800 -an /home/pi/workspace/stream/save_video/20210313123108032940_JJIN-ch1_cam/realmonitor?channel=1&subtype=0.avi
cmd ="ffmpeg -y -r 30 -stimeout 10000000 -hide_banner -rtsp_transport tcp -i %s -vcodec copy -t 1800 -an %s/%s" %(add, save_path, file_name)
print(cmd)
subprocess.check_output(cmd, shell=True, universal_newlines=True)
else:
print('Not working time')
time.sleep(60)
def run(self):
while True:
try:
self.recode()
except:
pass
if __name__ == '__main__':
def get_dvr_info(idx):
df = pd.read_csv('/home/pi/workspace/stream/config.txt')
#df = pd.read_csv('./config.txt')
shop_code = df.loc[idx, 'shop_code']
acc = df.loc[idx, 'acc']
pw = df.loc[idx, 'pw']
ip = df.loc[idx, 'dvr_ip']
port = df.loc[idx, 'port']
ch = df.loc[idx, 'dvr_ch']
return shop_code, acc, pw, ip, port, ch
config = pd.read_csv('/home/pi/workspace/stream/config.txt')
#config = pd.read_csv('./config.txt')
open_t = (0,0)
close_t = (23,59)
for i in config.index:
shop_code, acc, pw, ip, port, ch = get_dvr_info(i)
main = video_recoding(shop_code, acc, pw, ip, port, ch, open_t, close_t)
main.start()
time.sleep(0.01) | 0.112808 | 0.05498 |
import random
# Call comes in
call = ''
# Good morning, Thistle Hyundai computer speaking, how can I direct your call?
print('Good morning, <NAME>, this is computer speaking.\n\nHow can I direct your call?')
call = input()
# Sales call
if call.lower() == 'sales':
print('Thanks, please hold for just a moment and I will see who is available\n')
print('***Places on HOLD***')
print('***Announce on PA or just look to see***\n')
availableSales = random.randint(0, 9)
if availableSales >= 5:
print('Jake is available\n')
print('***Call is sent to Jake***\n\n')
print('Good morning, <NAME>undai, Jake speaking!')
else:
print('No one is available\n')
print('*Takes name and number, will call you back*')
# THIS IS WHAT IS HAPPENING NOW
# print('Sales phone, "Ring ring Sales ring.."\n')
#
# pickup = random.randint(0, 9)
#
# if pickup >= 5:
# print('Hello! Would you like to buy a car?')
#
# else:
# print('No answer -> goes to directly to voicemail.\n')
#############################################
# Service call
# TODO
# Appointment (common) - Amber/Mandy)?
# Breakdown (rare) - Place on hold - Go find someone to pickup!
# Update - Switch to direct call
# Warranty - Switch to direct call
# General inquiry - Ring all - Voicemail or flip back to reception?
# Perhaps these can be divided into direct calls and appointments?
#############################################
# Parts
if call.lower() == 'parts':
print('Thanks, I will connect you to the parts department\n')
availableParts = random.randint(0, 9)
if availableParts >= 5:
print('Good morning, parts department speaking!')
else:
print('Our parts department are currently assisting other customers/nPress 1 to leave a message that will be responded to ASAP')
# Direct call
if call.lower() == 'direct':
print('Thanks, I will connect you directly\n')
availableDirect = random.randint(0, 9)
if availableDirect >= 5:
print('Hello, you have reached me directly!')
else:
print('Direct is unavailable..\nPress 1 to record a voicemail\nPress 2 to be connected to the next available department member')
#Lunch mode
#Service press 1, Sales press 2, Parts press 3
#Ring all
#Or just have someone on the desk
#After 5
#Ring all sales
#After close
#I think is already handled, but let's find out | callTest.py | import random
# Call comes in
call = ''
# Good morning, Thistle Hyundai computer speaking, how can I direct your call?
print('Good morning, <NAME>, this is computer speaking.\n\nHow can I direct your call?')
call = input()
# Sales call
if call.lower() == 'sales':
print('Thanks, please hold for just a moment and I will see who is available\n')
print('***Places on HOLD***')
print('***Announce on PA or just look to see***\n')
availableSales = random.randint(0, 9)
if availableSales >= 5:
print('Jake is available\n')
print('***Call is sent to Jake***\n\n')
print('Good morning, <NAME>undai, Jake speaking!')
else:
print('No one is available\n')
print('*Takes name and number, will call you back*')
# THIS IS WHAT IS HAPPENING NOW
# print('Sales phone, "Ring ring Sales ring.."\n')
#
# pickup = random.randint(0, 9)
#
# if pickup >= 5:
# print('Hello! Would you like to buy a car?')
#
# else:
# print('No answer -> goes to directly to voicemail.\n')
#############################################
# Service call
# TODO
# Appointment (common) - Amber/Mandy)?
# Breakdown (rare) - Place on hold - Go find someone to pickup!
# Update - Switch to direct call
# Warranty - Switch to direct call
# General inquiry - Ring all - Voicemail or flip back to reception?
# Perhaps these can be divided into direct calls and appointments?
#############################################
# Parts
if call.lower() == 'parts':
print('Thanks, I will connect you to the parts department\n')
availableParts = random.randint(0, 9)
if availableParts >= 5:
print('Good morning, parts department speaking!')
else:
print('Our parts department are currently assisting other customers/nPress 1 to leave a message that will be responded to ASAP')
# Direct call
if call.lower() == 'direct':
print('Thanks, I will connect you directly\n')
availableDirect = random.randint(0, 9)
if availableDirect >= 5:
print('Hello, you have reached me directly!')
else:
print('Direct is unavailable..\nPress 1 to record a voicemail\nPress 2 to be connected to the next available department member')
#Lunch mode
#Service press 1, Sales press 2, Parts press 3
#Ring all
#Or just have someone on the desk
#After 5
#Ring all sales
#After close
#I think is already handled, but let's find out | 0.043043 | 0.051201 |
from env.tic_tac_toe_env import TicTacToe
from agent.agent import Agent
import random
import numpy as np
from PIL import Image
class TicTacToeGameManager():
def __init__(self, strategy=None, saved_model=None):
self.game = TicTacToe()
self.agent_first_cmap = {0: 177, 1: 255, 2: 0}
self.agent_last_cmap = {0: 177, 1: 0, 2: 255}
def reset(self):
"""Reset game board and return empty board or board
with initial move played, depending on who's starting"""
self.game_history = []
observation = self.game.reset()
self.agent_plays_first = random.choice([0, 1])
if not self.agent_plays_first:
self.step(self.get_env_action(observation))
observation = self.get_obs()
return observation
def step(self, action):
_, reward, done = self.game.step(action)
new_observation = self.get_obs()
return new_observation, reward, done
def print_board(self):
self.game.print_board()
def render(self):
self.game.render()
def get_env_action(self, state):
return random.choice(self.valid_actions())
def valid_actions(self):
return self.game.valid_actions[:]
def action_space_size(self):
return self.game.action_space_size
def obs_space_size(self):
return self.game.obs_space_values
def game_history(self):
return self.game_history
def win_reward(self):
return self.game.win_reward
def draw_reward(self):
return self.game.draw_reward
def loss_penalty(self):
return self.game.loss_penalty
def get_image(self):
# Map colors from color dict to board, keeping agent color constant
if self.agent_plays_first:
cmap = self.agent_first_cmap
else:
cmap = self.agent_last_cmap
board_array = np.array(
[list(map(cmap.get, x)) for x in iter(self.game.board)],
dtype=np.uint8)
img = Image.fromarray(board_array, 'L')
return img
def get_obs(self):
# Give shape (3, 3, 1) instead of (3, 3) for grayscale image
obs = np.expand_dims(np.array(self.get_image()), axis=2)
return obs | tic_tac_toe/env/game_manager.py | from env.tic_tac_toe_env import TicTacToe
from agent.agent import Agent
import random
import numpy as np
from PIL import Image
class TicTacToeGameManager():
def __init__(self, strategy=None, saved_model=None):
self.game = TicTacToe()
self.agent_first_cmap = {0: 177, 1: 255, 2: 0}
self.agent_last_cmap = {0: 177, 1: 0, 2: 255}
def reset(self):
"""Reset game board and return empty board or board
with initial move played, depending on who's starting"""
self.game_history = []
observation = self.game.reset()
self.agent_plays_first = random.choice([0, 1])
if not self.agent_plays_first:
self.step(self.get_env_action(observation))
observation = self.get_obs()
return observation
def step(self, action):
_, reward, done = self.game.step(action)
new_observation = self.get_obs()
return new_observation, reward, done
def print_board(self):
self.game.print_board()
def render(self):
self.game.render()
def get_env_action(self, state):
return random.choice(self.valid_actions())
def valid_actions(self):
return self.game.valid_actions[:]
def action_space_size(self):
return self.game.action_space_size
def obs_space_size(self):
return self.game.obs_space_values
def game_history(self):
return self.game_history
def win_reward(self):
return self.game.win_reward
def draw_reward(self):
return self.game.draw_reward
def loss_penalty(self):
return self.game.loss_penalty
def get_image(self):
# Map colors from color dict to board, keeping agent color constant
if self.agent_plays_first:
cmap = self.agent_first_cmap
else:
cmap = self.agent_last_cmap
board_array = np.array(
[list(map(cmap.get, x)) for x in iter(self.game.board)],
dtype=np.uint8)
img = Image.fromarray(board_array, 'L')
return img
def get_obs(self):
# Give shape (3, 3, 1) instead of (3, 3) for grayscale image
obs = np.expand_dims(np.array(self.get_image()), axis=2)
return obs | 0.403567 | 0.310662 |
from app import db
from flask_login import LoginManager, UserMixin
from datetime import date, datetime
from flask_restful import Resource, Api, abort, reqparse
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100), nullable=False)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
employee = db.relationship('Employee', backref='user', lazy='dynamic')
def __repr__(self):
return "<User: {} {}>".format(self.user_id, self.email)
def get_id(self):
return (self.user_id)
def user_is_admin(user):
return (Employee.query.filter_by(user_id=user.user_id).first())
def get_employee_id(user_id):
return (Employee.query.filter_by(user_id=user_id).first()).employee_id
class Manager(db.Model):
manager_id = db.Column(db.Integer, primary_key=True)
manager_employee_id = db.Column(db.Integer)
def user_is_manager(user):
return (Employee.query.filter_by(user_id=user.user_id).first()).manager_employee_id
def get_all_managers():
return Employee.query.filter(Employee.manager_employee_id == None).all()
class Employee(db.Model):
employee_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.user_id'), nullable=True)
manager_id = db.Column(db.Integer, db.ForeignKey('manager.manager_id'), nullable=True)
first_name = db.Column(db.String(60), index=True, nullable=False)
last_name = db.Column(db.String(60), index=True, nullable=False)
start_date = db.Column(db.Date, default=date.today())
employee_is_admin = db.Column(db.Boolean, default=False, nullable=False)
employee_is_manager = db.Column(db.Boolean, default=False, nullable=False)
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
def get_logged_in_employee_id(user):
return Employee.query.filter_by(user_id=user.user_id).first()
def get_employee_by_id(id):
return Employee.query.get(id)
def get_manager_name_by_id(self, id):
name = None
emp_id = int(id) if id else None
emp = Employee.query.get(emp_id)
if (type(emp_id) is int):
name = "{} {}".format(emp.first_name, emp.last_name)
return name
def serialize(self):
return {
'id': self.employee_id,
'first_name': self.first_name,
'last_name': self.last_name,
'is_admin': self.employee_is_admin,
'is_manager': self.employee_is_manager,
'manager': {
'id': self.manager_id,
'name': self.get_manager_name_by_id(self.manager_id)
},
'user_id': self.user_id,
'start_date': self.start_date.strftime('%Y-%m-%d')
}
employee_parser = reqparse.RequestParser(bundle_errors=True)
employee_parser.add_argument('first_name', required=True, help="first name is a required parameter!")
employee_parser.add_argument('last_name', required=True, help="last name is a required parameter!")
employee_parser.add_argument('is_admin', required=True, type=bool, help="is_admin is a required parameter!")
employee_parser.add_argument('is_manager', required=True, type=bool, help="is_manager is a required parameter!")
employee_parser.add_argument('manager', type=dict)
employee_parser.add_argument('start_date', required=True, help="start_date is a required parameter!")
employee_parser.add_argument('user_id')
manager_parser = reqparse.RequestParser(bundle_errors=True)
manager_parser.add_argument('id', type=dict, location=('manager',))
manager_parser = manager_parser.parse_args(req=employee_parser)
class LeaveType(db.Model):
leave_type_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
days_per_year = db.Column(db.Integer, nullable=False)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
def serialize(self):
return {
'id': self.leave_type_id,
'name': self.name,
'days_per_year': self.days_per_year
}
leave_type_parser = reqparse.RequestParser(bundle_errors=True)
leave_type_parser.add_argument('name', required=True, help="name is a required parameter!")
leave_type_parser.add_argument('days_per_year', type=float, required=True, help="days_per_year is a required parameter!")
class ApprovalStatus(db.Model):
approval_status_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
def serialize(self):
return {
'id': self.approval_status_id,
'name': self.name,
}
class LeaveRequest(db.Model):
leave_request_id = db.Column(db.Integer, primary_key=True)
employee_id = db.Column(db.Integer, db.ForeignKey('employee.employee_id'), nullable=False)
leave_type_id = db.Column(db.Integer, db.ForeignKey('leave_type.leave_type_id'), nullable=False)
approval_status_id = db.Column(db.Integer, db.ForeignKey('approval_status.approval_status_id'), nullable=False)
start_date = db.Column(db.Date, nullable=False)
end_date = db.Column(db.Date, nullable=False)
comment = db.Column(db.String(60), nullable=True)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
employee = db.relationship("Employee", backref='leaverequest')
leaveType = db.relationship("LeaveType", backref='leaverequest')
status = db.relationship("ApprovalStatus", backref='leaverequest')
def get_type_name(self, id):
lt_id = int(id) if id else None
if (lt_id):
lt = LeaveType.query.get(lt_id)
return lt.name
def get_status_name(self, id):
status_id = int(id) if id else None
if (status_id):
status = ApprovalStatus.query.get(status_id)
return status.name
def serialize(self):
return {
'id': self.leave_request_id,
'employee': {
'id': self.employee_id,
'name': '{} {}'.format(Employee.query.get(self.employee_id).first_name, Employee.query.get(self.employee_id).last_name)
},
'leave_type': {
'id': self.leave_type_id,
'name': self.get_type_name(self.leave_type_id)
},
'approval_status': {
'id': self.approval_status_id,
'name': self.get_status_name(self.approval_status_id)
},
'start_date': self.start_date.strftime('%Y-%m-%d'),
'end_date': self.end_date.strftime('%Y-%m-%d'),
'comment': self.comment,
'submitted_date': self.created_date.strftime('%Y-%m-%d')
}
leave_parser = reqparse.RequestParser(bundle_errors=True)
leave_parser.add_argument('start_date', required=True, help="start_date is a required parameter!")
leave_parser.add_argument('end_date', required=True, help="end_date is a required parameter!")
leave_parser.add_argument('comment')
leave_parser.add_argument('employee', type=dict)
leave_parser.add_argument('leave_type', type=dict)
leave_parser.add_argument('approval_status', type=dict)
leave_employee_parser = reqparse.RequestParser(bundle_errors=True)
leave_employee_parser.add_argument('id', type=dict, location=('employee',))
leave_employee_parser = leave_employee_parser.parse_args(req=leave_parser)
leave_type_parser = reqparse.RequestParser(bundle_errors=True)
leave_type_parser.add_argument('id', type=dict, location=('leave_type',))
leave_type_parser = leave_type_parser.parse_args(req=leave_parser)
approval_status_parser = reqparse.RequestParser(bundle_errors=True)
approval_status_parser.add_argument('id', type=dict, location=('leave_type',))
approval_status_parser = approval_status_parser.parse_args(req=leave_parser) | app/models.py | from app import db
from flask_login import LoginManager, UserMixin
from datetime import date, datetime
from flask_restful import Resource, Api, abort, reqparse
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100), nullable=False)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
employee = db.relationship('Employee', backref='user', lazy='dynamic')
def __repr__(self):
return "<User: {} {}>".format(self.user_id, self.email)
def get_id(self):
return (self.user_id)
def user_is_admin(user):
return (Employee.query.filter_by(user_id=user.user_id).first())
def get_employee_id(user_id):
return (Employee.query.filter_by(user_id=user_id).first()).employee_id
class Manager(db.Model):
manager_id = db.Column(db.Integer, primary_key=True)
manager_employee_id = db.Column(db.Integer)
def user_is_manager(user):
return (Employee.query.filter_by(user_id=user.user_id).first()).manager_employee_id
def get_all_managers():
return Employee.query.filter(Employee.manager_employee_id == None).all()
class Employee(db.Model):
employee_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.user_id'), nullable=True)
manager_id = db.Column(db.Integer, db.ForeignKey('manager.manager_id'), nullable=True)
first_name = db.Column(db.String(60), index=True, nullable=False)
last_name = db.Column(db.String(60), index=True, nullable=False)
start_date = db.Column(db.Date, default=date.today())
employee_is_admin = db.Column(db.Boolean, default=False, nullable=False)
employee_is_manager = db.Column(db.Boolean, default=False, nullable=False)
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
def get_logged_in_employee_id(user):
return Employee.query.filter_by(user_id=user.user_id).first()
def get_employee_by_id(id):
return Employee.query.get(id)
def get_manager_name_by_id(self, id):
name = None
emp_id = int(id) if id else None
emp = Employee.query.get(emp_id)
if (type(emp_id) is int):
name = "{} {}".format(emp.first_name, emp.last_name)
return name
def serialize(self):
return {
'id': self.employee_id,
'first_name': self.first_name,
'last_name': self.last_name,
'is_admin': self.employee_is_admin,
'is_manager': self.employee_is_manager,
'manager': {
'id': self.manager_id,
'name': self.get_manager_name_by_id(self.manager_id)
},
'user_id': self.user_id,
'start_date': self.start_date.strftime('%Y-%m-%d')
}
employee_parser = reqparse.RequestParser(bundle_errors=True)
employee_parser.add_argument('first_name', required=True, help="first name is a required parameter!")
employee_parser.add_argument('last_name', required=True, help="last name is a required parameter!")
employee_parser.add_argument('is_admin', required=True, type=bool, help="is_admin is a required parameter!")
employee_parser.add_argument('is_manager', required=True, type=bool, help="is_manager is a required parameter!")
employee_parser.add_argument('manager', type=dict)
employee_parser.add_argument('start_date', required=True, help="start_date is a required parameter!")
employee_parser.add_argument('user_id')
manager_parser = reqparse.RequestParser(bundle_errors=True)
manager_parser.add_argument('id', type=dict, location=('manager',))
manager_parser = manager_parser.parse_args(req=employee_parser)
class LeaveType(db.Model):
leave_type_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
days_per_year = db.Column(db.Integer, nullable=False)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
def serialize(self):
return {
'id': self.leave_type_id,
'name': self.name,
'days_per_year': self.days_per_year
}
leave_type_parser = reqparse.RequestParser(bundle_errors=True)
leave_type_parser.add_argument('name', required=True, help="name is a required parameter!")
leave_type_parser.add_argument('days_per_year', type=float, required=True, help="days_per_year is a required parameter!")
class ApprovalStatus(db.Model):
approval_status_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
def serialize(self):
return {
'id': self.approval_status_id,
'name': self.name,
}
class LeaveRequest(db.Model):
leave_request_id = db.Column(db.Integer, primary_key=True)
employee_id = db.Column(db.Integer, db.ForeignKey('employee.employee_id'), nullable=False)
leave_type_id = db.Column(db.Integer, db.ForeignKey('leave_type.leave_type_id'), nullable=False)
approval_status_id = db.Column(db.Integer, db.ForeignKey('approval_status.approval_status_id'), nullable=False)
start_date = db.Column(db.Date, nullable=False)
end_date = db.Column(db.Date, nullable=False)
comment = db.Column(db.String(60), nullable=True)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
deleted_date = db.Column(db.DateTime, index=True, nullable=True, default=None)
employee = db.relationship("Employee", backref='leaverequest')
leaveType = db.relationship("LeaveType", backref='leaverequest')
status = db.relationship("ApprovalStatus", backref='leaverequest')
def get_type_name(self, id):
lt_id = int(id) if id else None
if (lt_id):
lt = LeaveType.query.get(lt_id)
return lt.name
def get_status_name(self, id):
status_id = int(id) if id else None
if (status_id):
status = ApprovalStatus.query.get(status_id)
return status.name
def serialize(self):
return {
'id': self.leave_request_id,
'employee': {
'id': self.employee_id,
'name': '{} {}'.format(Employee.query.get(self.employee_id).first_name, Employee.query.get(self.employee_id).last_name)
},
'leave_type': {
'id': self.leave_type_id,
'name': self.get_type_name(self.leave_type_id)
},
'approval_status': {
'id': self.approval_status_id,
'name': self.get_status_name(self.approval_status_id)
},
'start_date': self.start_date.strftime('%Y-%m-%d'),
'end_date': self.end_date.strftime('%Y-%m-%d'),
'comment': self.comment,
'submitted_date': self.created_date.strftime('%Y-%m-%d')
}
leave_parser = reqparse.RequestParser(bundle_errors=True)
leave_parser.add_argument('start_date', required=True, help="start_date is a required parameter!")
leave_parser.add_argument('end_date', required=True, help="end_date is a required parameter!")
leave_parser.add_argument('comment')
leave_parser.add_argument('employee', type=dict)
leave_parser.add_argument('leave_type', type=dict)
leave_parser.add_argument('approval_status', type=dict)
leave_employee_parser = reqparse.RequestParser(bundle_errors=True)
leave_employee_parser.add_argument('id', type=dict, location=('employee',))
leave_employee_parser = leave_employee_parser.parse_args(req=leave_parser)
leave_type_parser = reqparse.RequestParser(bundle_errors=True)
leave_type_parser.add_argument('id', type=dict, location=('leave_type',))
leave_type_parser = leave_type_parser.parse_args(req=leave_parser)
approval_status_parser = reqparse.RequestParser(bundle_errors=True)
approval_status_parser.add_argument('id', type=dict, location=('leave_type',))
approval_status_parser = approval_status_parser.parse_args(req=leave_parser) | 0.448185 | 0.058723 |
# @Author: <NAME> <valle>
# @Date: 10-May-2017
# @Email: <EMAIL>
# @Last modified by: valle
# @Last modified time: 16-Mar-2018
# @License: Apache license vesion 2.0
from kivy.uix.anchorlayout import AnchorLayout
from kivy.storage.jsonstore import JsonStore
from kivy.properties import ObjectProperty, ListProperty, NumericProperty, StringProperty
from kivy.lang import Builder
from glob import glob
from os import rename
from components.labels import LabelClicable
from models.db import Pedidos, QSon, VentasSender
from modals import Efectivo
from datetime import datetime
from valle_libs.tpv.impresora import DocPrint
import threading
Builder.load_file('view/listadopdwidget.kv')
class ListadoPdWidget(AnchorLayout):
tpv = ObjectProperty(None)
precio = NumericProperty(0)
des = StringProperty('Pedido')
db_lista = ListProperty([])
def __init__(self, **kargs):
super(ListadoPdWidget, self).__init__(**kargs)
self.selected = None
self.file = None
self.efectivo = Efectivo(onExit=self.salir_efectivo)
def cobrar_tarjeta(self):
if self.selected != None:
pd = self.selected.tag.get("db")
pd['modo_pago'] = "Tarjeta"
pd['efectivo'] = 0.00
pd['cambio'] = 0.00
pd['estado'] = pd['estado'].replace("NPG", "PG")
self.save_pedido()
self.lista.rm_linea(self.selected)
self.db_lista.remove(pd["id"])
self.tpv.abrir_cajon()
self.salir()
def mostrar_efectivo(self):
self.efectivo.total = str(self.precio)
self.efectivo.open()
def salir_efectivo(self, cancelar=True):
self.efectivo.dismiss()
if cancelar == False:
pd = self.selected.tag.get("db")
pd['modo_pago'] = "Efectivo"
pd['efectivo'] = self.efectivo.efectivo.replace("€", "")
pd['cambio'] = self.efectivo.cambio.replace("€", "")
pd['estado'] = pd['estado'].replace("NPG", "PG")
self.save_pedido()
self.lista.rm_linea(self.selected)
self.db_lista.remove(pd['id'])
self.tpv.abrir_cajon()
self.salir()
self.tpv.mostrar_men_cobro("Cambio "+ self.efectivo.cambio)
def salir(self):
self.clear_self_widget()
self.tpv.mostrar_inicio()
def save_pedido(self):
sender = VentasSender()
pd = self.selected.tag.get("db")
qson = QSon("Pedidos", reg=pd)
sender.save(qson)
sender.send(wait=False)
def clear_self_widget(self):
self.selected = None
self.precio = 0
self.pedido.rm_all_widgets()
def mostrar_lista(self):
self.tpv.show_spin()
sender = VentasSender()
qson = QSon("Pedidos", estado__contains="NPG_")
qson.append_child(QSon("LineasPedido"))
cl = QSon("Clientes")
cl.append_child(QSon("Direcciones"))
qson.append_child(cl)
sender.filter(qson)
sender.send(self.run_mostrar_lista, wait=False)
def run_mostrar_lista(self, req, result):
if result["success"] == True:
result["get"]["pedidos"].reverse()
pedidos = result["get"]["pedidos"]
if len(pedidos) < len(self.db_lista):
self.db_lista = []
self.lista.rm_all_widgets()
for db in pedidos:
if db["id"] in self.db_lista:
continue
self.db_lista.append(db['id'])
direccion = ""
if "clientes" in db:
if len(db["clientes"]) > 0:
cl = db["clientes"][0]
if "direcciones" in cl:
direcciones = cl["direcciones"]
if len(direcciones) > 0:
direccion = direcciones[0]['direccion']
for l in direcciones:
if cl["direccion"] == l["id"]:
direccion = l["direccion"]
btn = LabelClicable(bgColor="#444444",
font_size="16dp",
color="#ffffff")
btn.tag = {"db": db}
if type(db['fecha']) is datetime:
fecha = db['fecha'].strftime("%H:%M:%S")
else:
fecha = datetime.strptime(db['fecha'].replace("T", " "), "%Y-%m-%d %H:%M:%S.%f")
fecha = fecha.strftime("%H:%M:%S")
texto = "{0: <10} Total: {1:5.2f} € {3: <20} {2: <30} ".format(fecha, float(db['total']),
direccion, db['para_llevar'])
btn.text = texto
btn.bind(on_press=self.onPress)
self.lista.add_linea(btn)
self.tpv.hide_spin()
def onPress(self, btn):
self.pedido.rm_all_widgets()
self.selected = btn
lineas = self.selected.tag.get("db")["lineaspedido"]
total = 0
for item in lineas:
btn = LabelClicable(bgColor="#444444",
font_size = '16dp',
color = "#ffffff")
tl = float(item['total'])
total += tl
tipo = "" if not item['tipo'] in ("pizzas", "burger") else item['tipo']
if tipo.endswith("s"):
tipo = tipo[:-1]
btn.text = "{0: >4} {4} {1} {2: <30} {3:.2f} €".format(item['cant'], item['text'],
item['des'].lower(), tl, tipo)
self.pedido.add_linea(btn)
self.precio = total
def cobrar(self):
if self.selected != None:
self.mostrar_efectivo()
def imprimirTk(self):
if self.selected != None:
r = threading.Thread(target=self.tpv.imprimir_directo,
args=(self.selected.tag["db"],))
r.start()
self.salir() | tpv_for_eetop/tpv/controllers/listadopdwidget.py |
# @Author: <NAME> <valle>
# @Date: 10-May-2017
# @Email: <EMAIL>
# @Last modified by: valle
# @Last modified time: 16-Mar-2018
# @License: Apache license vesion 2.0
from kivy.uix.anchorlayout import AnchorLayout
from kivy.storage.jsonstore import JsonStore
from kivy.properties import ObjectProperty, ListProperty, NumericProperty, StringProperty
from kivy.lang import Builder
from glob import glob
from os import rename
from components.labels import LabelClicable
from models.db import Pedidos, QSon, VentasSender
from modals import Efectivo
from datetime import datetime
from valle_libs.tpv.impresora import DocPrint
import threading
Builder.load_file('view/listadopdwidget.kv')
class ListadoPdWidget(AnchorLayout):
tpv = ObjectProperty(None)
precio = NumericProperty(0)
des = StringProperty('Pedido')
db_lista = ListProperty([])
def __init__(self, **kargs):
super(ListadoPdWidget, self).__init__(**kargs)
self.selected = None
self.file = None
self.efectivo = Efectivo(onExit=self.salir_efectivo)
def cobrar_tarjeta(self):
if self.selected != None:
pd = self.selected.tag.get("db")
pd['modo_pago'] = "Tarjeta"
pd['efectivo'] = 0.00
pd['cambio'] = 0.00
pd['estado'] = pd['estado'].replace("NPG", "PG")
self.save_pedido()
self.lista.rm_linea(self.selected)
self.db_lista.remove(pd["id"])
self.tpv.abrir_cajon()
self.salir()
def mostrar_efectivo(self):
self.efectivo.total = str(self.precio)
self.efectivo.open()
def salir_efectivo(self, cancelar=True):
self.efectivo.dismiss()
if cancelar == False:
pd = self.selected.tag.get("db")
pd['modo_pago'] = "Efectivo"
pd['efectivo'] = self.efectivo.efectivo.replace("€", "")
pd['cambio'] = self.efectivo.cambio.replace("€", "")
pd['estado'] = pd['estado'].replace("NPG", "PG")
self.save_pedido()
self.lista.rm_linea(self.selected)
self.db_lista.remove(pd['id'])
self.tpv.abrir_cajon()
self.salir()
self.tpv.mostrar_men_cobro("Cambio "+ self.efectivo.cambio)
def salir(self):
self.clear_self_widget()
self.tpv.mostrar_inicio()
def save_pedido(self):
sender = VentasSender()
pd = self.selected.tag.get("db")
qson = QSon("Pedidos", reg=pd)
sender.save(qson)
sender.send(wait=False)
def clear_self_widget(self):
self.selected = None
self.precio = 0
self.pedido.rm_all_widgets()
def mostrar_lista(self):
self.tpv.show_spin()
sender = VentasSender()
qson = QSon("Pedidos", estado__contains="NPG_")
qson.append_child(QSon("LineasPedido"))
cl = QSon("Clientes")
cl.append_child(QSon("Direcciones"))
qson.append_child(cl)
sender.filter(qson)
sender.send(self.run_mostrar_lista, wait=False)
def run_mostrar_lista(self, req, result):
if result["success"] == True:
result["get"]["pedidos"].reverse()
pedidos = result["get"]["pedidos"]
if len(pedidos) < len(self.db_lista):
self.db_lista = []
self.lista.rm_all_widgets()
for db in pedidos:
if db["id"] in self.db_lista:
continue
self.db_lista.append(db['id'])
direccion = ""
if "clientes" in db:
if len(db["clientes"]) > 0:
cl = db["clientes"][0]
if "direcciones" in cl:
direcciones = cl["direcciones"]
if len(direcciones) > 0:
direccion = direcciones[0]['direccion']
for l in direcciones:
if cl["direccion"] == l["id"]:
direccion = l["direccion"]
btn = LabelClicable(bgColor="#444444",
font_size="16dp",
color="#ffffff")
btn.tag = {"db": db}
if type(db['fecha']) is datetime:
fecha = db['fecha'].strftime("%H:%M:%S")
else:
fecha = datetime.strptime(db['fecha'].replace("T", " "), "%Y-%m-%d %H:%M:%S.%f")
fecha = fecha.strftime("%H:%M:%S")
texto = "{0: <10} Total: {1:5.2f} € {3: <20} {2: <30} ".format(fecha, float(db['total']),
direccion, db['para_llevar'])
btn.text = texto
btn.bind(on_press=self.onPress)
self.lista.add_linea(btn)
self.tpv.hide_spin()
def onPress(self, btn):
self.pedido.rm_all_widgets()
self.selected = btn
lineas = self.selected.tag.get("db")["lineaspedido"]
total = 0
for item in lineas:
btn = LabelClicable(bgColor="#444444",
font_size = '16dp',
color = "#ffffff")
tl = float(item['total'])
total += tl
tipo = "" if not item['tipo'] in ("pizzas", "burger") else item['tipo']
if tipo.endswith("s"):
tipo = tipo[:-1]
btn.text = "{0: >4} {4} {1} {2: <30} {3:.2f} €".format(item['cant'], item['text'],
item['des'].lower(), tl, tipo)
self.pedido.add_linea(btn)
self.precio = total
def cobrar(self):
if self.selected != None:
self.mostrar_efectivo()
def imprimirTk(self):
if self.selected != None:
r = threading.Thread(target=self.tpv.imprimir_directo,
args=(self.selected.tag["db"],))
r.start()
self.salir() | 0.189821 | 0.102619 |
import unittest
from dependency_injector import containers, providers
class TraverseProviderTests(unittest.TestCase):
def test_nested_providers(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
container = Container()
all_providers = list(container.traverse())
self.assertIn(container.obj_factory, all_providers)
self.assertIn(container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 3)
def test_nested_providers_with_filtering(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
container = Container()
all_providers = list(container.traverse(types=[providers.Resource]))
self.assertIn(container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 2)
class TraverseProviderDeclarativeTests(unittest.TestCase):
def test_nested_providers(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
all_providers = list(Container.traverse())
self.assertIn(Container.obj_factory, all_providers)
self.assertIn(Container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(Container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 3)
def test_nested_providers_with_filtering(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
all_providers = list(Container.traverse(types=[providers.Resource]))
self.assertIn(Container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(Container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 2) | tests/unit/containers/test_traversal_py3.py | import unittest
from dependency_injector import containers, providers
class TraverseProviderTests(unittest.TestCase):
def test_nested_providers(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
container = Container()
all_providers = list(container.traverse())
self.assertIn(container.obj_factory, all_providers)
self.assertIn(container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 3)
def test_nested_providers_with_filtering(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
container = Container()
all_providers = list(container.traverse(types=[providers.Resource]))
self.assertIn(container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 2)
class TraverseProviderDeclarativeTests(unittest.TestCase):
def test_nested_providers(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
all_providers = list(Container.traverse())
self.assertIn(Container.obj_factory, all_providers)
self.assertIn(Container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(Container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 3)
def test_nested_providers_with_filtering(self):
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo='bar'
),
bar=providers.Resource(
dict,
foo='bar'
)
)
all_providers = list(Container.traverse(types=[providers.Resource]))
self.assertIn(Container.obj_factory.kwargs['foo'], all_providers)
self.assertIn(Container.obj_factory.kwargs['bar'], all_providers)
self.assertEqual(len(all_providers), 2) | 0.618204 | 0.363958 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.mask_pool = nn.AvgPool2d(3, 1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y, mask):
x = x.permute(0, 3, 1, 2) # [B, H, W, C] --> [B, C, H, W]
y = y.permute(0, 3, 1, 2)
mask = mask.permute(0, 3, 1, 2)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
SSIM_mask = self.mask_pool(mask)
output = SSIM_mask * torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
return output.permute(0, 2, 3, 1) # [B, C, H, W] --> [B, H, W, C]
def gradient_x(img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def gradient_y(img):
return img[:, :-1, :, :] - img[:, 1:, :, :]
def gradient(pred):
D_dy = pred[:, 1:, :, :] - pred[:, :-1, :, :]
D_dx = pred[:, :, 1:, :] - pred[:, :, :-1, :]
return D_dx, D_dy
def depth_smoothness(depth, img,lambda_wt=1):
"""Computes image-aware depth smoothness loss."""
depth_dx = gradient_x(depth)
depth_dy = gradient_y(depth)
image_dx = gradient_x(img)
image_dy = gradient_y(img)
weights_x = torch.exp(-(lambda_wt * torch.mean(torch.abs(image_dx), 3, keepdim=True)))
weights_y = torch.exp(-(lambda_wt * torch.mean(torch.abs(image_dy), 3, keepdim=True)))
smoothness_x = depth_dx * weights_x
smoothness_y = depth_dy * weights_y
return torch.mean(torch.abs(smoothness_x)) + torch.mean(torch.abs(smoothness_y))
def compute_reconstr_loss(warped, ref, mask, simple=True):
if simple:
return F.smooth_l1_loss(warped*mask, ref*mask, reduction='mean')
else:
alpha = 0.5
ref_dx, ref_dy = gradient(ref * mask)
warped_dx, warped_dy = gradient(warped * mask)
photo_loss = F.smooth_l1_loss(warped*mask, ref*mask, reduction='mean')
grad_loss = F.smooth_l1_loss(warped_dx, ref_dx, reduction='mean') + \
F.smooth_l1_loss(warped_dy, ref_dy, reduction='mean')
return (1 - alpha) * photo_loss + alpha * grad_loss | u_mvs_mvsnet/losses/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.mask_pool = nn.AvgPool2d(3, 1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y, mask):
x = x.permute(0, 3, 1, 2) # [B, H, W, C] --> [B, C, H, W]
y = y.permute(0, 3, 1, 2)
mask = mask.permute(0, 3, 1, 2)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
SSIM_mask = self.mask_pool(mask)
output = SSIM_mask * torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
return output.permute(0, 2, 3, 1) # [B, C, H, W] --> [B, H, W, C]
def gradient_x(img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def gradient_y(img):
return img[:, :-1, :, :] - img[:, 1:, :, :]
def gradient(pred):
D_dy = pred[:, 1:, :, :] - pred[:, :-1, :, :]
D_dx = pred[:, :, 1:, :] - pred[:, :, :-1, :]
return D_dx, D_dy
def depth_smoothness(depth, img,lambda_wt=1):
"""Computes image-aware depth smoothness loss."""
depth_dx = gradient_x(depth)
depth_dy = gradient_y(depth)
image_dx = gradient_x(img)
image_dy = gradient_y(img)
weights_x = torch.exp(-(lambda_wt * torch.mean(torch.abs(image_dx), 3, keepdim=True)))
weights_y = torch.exp(-(lambda_wt * torch.mean(torch.abs(image_dy), 3, keepdim=True)))
smoothness_x = depth_dx * weights_x
smoothness_y = depth_dy * weights_y
return torch.mean(torch.abs(smoothness_x)) + torch.mean(torch.abs(smoothness_y))
def compute_reconstr_loss(warped, ref, mask, simple=True):
if simple:
return F.smooth_l1_loss(warped*mask, ref*mask, reduction='mean')
else:
alpha = 0.5
ref_dx, ref_dy = gradient(ref * mask)
warped_dx, warped_dy = gradient(warped * mask)
photo_loss = F.smooth_l1_loss(warped*mask, ref*mask, reduction='mean')
grad_loss = F.smooth_l1_loss(warped_dx, ref_dx, reduction='mean') + \
F.smooth_l1_loss(warped_dy, ref_dy, reduction='mean')
return (1 - alpha) * photo_loss + alpha * grad_loss | 0.903746 | 0.603348 |
import sys
import numpy as np
import pickle
import scipy
from scipy.spatial.distance import squareform
from scipy.stats import zscore
from scipy.cluster import hierarchy
from tqdm import tqdm
from collections import namedtuple
from idpflex.distances import (rmsd_matrix, extract_coordinates)
from idpflex.cnextend import Tree
from idpflex.properties import ScalarProperty, propagator_size_weighted_sum
class ClusterTrove(namedtuple('ClusterTrove', 'idx rmsd tree')):
r"""A namedtuple with a `keys()` method for easy access of
fields, which are described below under header `Parameters`
Parameters
----------
idx : :class:`list`
Frame indexes for the representative structures (indexes start at zero)
rmsd : :class:`~numpy:numpy.ndarray`
distance matrix between representative structures.
tree : :class:`~idpflex.cnextend.Tree`
Clustering of representative structures. Leaf nodes associated with
each centroid contain property `iframe`, which is the frame index
in the trajectory pointing to the atomic structure corresponding to
the centroid.
"""
def keys(self):
r"""Return the list of field names"""
return self._fields
def save(self, filename):
r"""Serialize the cluster trove and save to file
Parameters
----------
filename: str
File name
"""
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def trajectory_centroids(a_universe, selection='not name H*',
segment_length=1000, n_representatives=1000):
r"""Cluster a set of consecutive trajectory segments into a set
of representative structures via structural similarity (RMSD)
The simulated trajectory is divided into consecutive segments, and
hierarchical clustering is performed on each segment to yield a
limited number of representative structures (centroids) per segment.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
Returns
-------
rep_ifr : list
Frame indexes of representative structures (centroids)
""" # noqa: E501
group = a_universe.select_atoms(selection)
# Fragmentation of the trajectory
n_frame = len(a_universe.trajectory)
n_segments = int(n_frame / segment_length)
nc = max(1, int(n_representatives / n_segments)) # clusters per segment
rep_ifr = list() # frame indexes of representative structures
info = """Clustering the trajectory:
Creating {} representatives by partitioning {} frames into {} segments
and retrieving {} representatives from each segment.
""".format(nc * n_segments, n_frame, n_segments, nc)
sys.stdout.write(info)
sys.stdout.flush()
# Hierarchical clustering on each trajectory fragment
for i_segment in tqdm(range(n_segments)):
indexes = range(i_segment * segment_length,
(i_segment + 1) * segment_length)
xyz = extract_coordinates(a_universe, group, indexes)
rmsd = rmsd_matrix(xyz, condensed=True)
z = hierarchy.linkage(rmsd, method='complete')
for node in Tree(z=z).nodes_at_depth(nc-1):
# Find the frame of each representative structure
i_frame = i_segment * segment_length + node.representative(rmsd).id
rep_ifr.append(i_frame)
rep_ifr.sort()
return rep_ifr
def cluster_with_properties(a_universe, pcls, p_names=None,
selection='not name H*', segment_length=1000,
n_representatives=1000):
r"""Cluster a set of representative structures by structural similarity
(RMSD) and by a set of properties
The simulated trajectory is divided into segments, and hierarchical
clustering is performed on each segment to yield a limited number of
representative structures (the centroids). Properties are calculated
for each centroid, thus each centroid is described by a property
vector. The dimensionality of the vector is related to the number of
properties and the dimensionality of each property.
The distances between any two centroids is calculated as the
Euclidean distance between their respective vector properties.
The distance matrix containing distances between all possible
centroid pairs is employed as the similarity measure to generate
the hierarchical tree of centroids.
The properties calculated for the centroids are stored in the
leaf nodes of the hierarchical tree. Properties are then propagated
up to the tree's root node.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
pcls : list
Property classes, such as :class:`~idpflex.properties.Asphericity`
of :class:`~idpflex.properties.SaSa`
p_names : list
Property names. If None, then default property names are used
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
Hierarchical clustering tree of the centroids
""" # noqa: E501
rep_ifr = trajectory_centroids(a_universe, selection=selection,
segment_length=segment_length,
n_representatives=n_representatives)
n_centroids = len(rep_ifr) # can be different than n_representatives
# Create names if not passed
if p_names is None:
p_names = [Property.default_name for Property in pcls]
# Calculate properties for each centroid
l_prop = list()
for p_name, Pcl in zip(p_names, pcls):
l_prop.append([Pcl(name=p_name).from_universe(a_universe, index=i)
for i in tqdm(rep_ifr)])
# Calculate distances between pair of centroids
xyz = np.zeros((len(pcls), n_centroids))
for i_prop, prop in enumerate(l_prop):
xyz[i_prop] = [p.y for p in prop]
# zero mean and unity variance for each property
xyz = np.transpose(zscore(xyz, axis=1))
distance_matrix = squareform(scipy.spatial.distance_matrix(xyz, xyz))
# Cluster the representative structures
tree = Tree(z=hierarchy.linkage(distance_matrix, method='complete'))
for i_leaf, leaf in enumerate(tree.leafs):
prop = ScalarProperty(name='iframe', y=rep_ifr[i_leaf])
leaf[prop.name] = prop
# Propagate the properties up the tree
[propagator_size_weighted_sum(prop, tree) for prop in l_prop]
return ClusterTrove(rep_ifr, distance_matrix, tree)
def cluster_trajectory(a_universe, selection='not name H*',
segment_length=1000, n_representatives=1000):
r"""Cluster a set of representative structures by structural similarity
(RMSD)
The simulated trajectory is divided into segments, and hierarchical
clustering is performed on each segment to yield a limited number of
representative structures. These are then clustered into the final
hierachical tree.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
distance_matrix: :class:`~numpy:numpy.ndarray`
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
clustering results for the representatives
""" # noqa: E501
rep_ifr = trajectory_centroids(a_universe, selection=selection,
segment_length=segment_length,
n_representatives=n_representatives)
group = a_universe.select_atoms(selection)
xyz = extract_coordinates(a_universe, group, rep_ifr)
distance_matrix = rmsd_matrix(xyz, condensed=True)
# Cluster the representative structures
tree = Tree(z=hierarchy.linkage(distance_matrix, method='complete'))
for i_leaf, leaf in enumerate(tree.leafs):
prop = ScalarProperty(name='iframe', y=rep_ifr[i_leaf])
leaf[prop.name] = prop
return ClusterTrove(rep_ifr, distance_matrix, tree)
def load_cluster_trove(filename):
r"""Load a previously saved
:class:`~idpflex.cluster.ClusterTrove` instance
Parameters
----------
filename: str
File name containing the serialized
:class:`~idpflex.cluster.ClusterTrove`
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
Cluster trove instance stored in file
"""
with open(filename, 'rb') as infile:
t = pickle.load(infile)
return t | idpflex/cluster.py | import sys
import numpy as np
import pickle
import scipy
from scipy.spatial.distance import squareform
from scipy.stats import zscore
from scipy.cluster import hierarchy
from tqdm import tqdm
from collections import namedtuple
from idpflex.distances import (rmsd_matrix, extract_coordinates)
from idpflex.cnextend import Tree
from idpflex.properties import ScalarProperty, propagator_size_weighted_sum
class ClusterTrove(namedtuple('ClusterTrove', 'idx rmsd tree')):
r"""A namedtuple with a `keys()` method for easy access of
fields, which are described below under header `Parameters`
Parameters
----------
idx : :class:`list`
Frame indexes for the representative structures (indexes start at zero)
rmsd : :class:`~numpy:numpy.ndarray`
distance matrix between representative structures.
tree : :class:`~idpflex.cnextend.Tree`
Clustering of representative structures. Leaf nodes associated with
each centroid contain property `iframe`, which is the frame index
in the trajectory pointing to the atomic structure corresponding to
the centroid.
"""
def keys(self):
r"""Return the list of field names"""
return self._fields
def save(self, filename):
r"""Serialize the cluster trove and save to file
Parameters
----------
filename: str
File name
"""
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def trajectory_centroids(a_universe, selection='not name H*',
segment_length=1000, n_representatives=1000):
r"""Cluster a set of consecutive trajectory segments into a set
of representative structures via structural similarity (RMSD)
The simulated trajectory is divided into consecutive segments, and
hierarchical clustering is performed on each segment to yield a
limited number of representative structures (centroids) per segment.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
Returns
-------
rep_ifr : list
Frame indexes of representative structures (centroids)
""" # noqa: E501
group = a_universe.select_atoms(selection)
# Fragmentation of the trajectory
n_frame = len(a_universe.trajectory)
n_segments = int(n_frame / segment_length)
nc = max(1, int(n_representatives / n_segments)) # clusters per segment
rep_ifr = list() # frame indexes of representative structures
info = """Clustering the trajectory:
Creating {} representatives by partitioning {} frames into {} segments
and retrieving {} representatives from each segment.
""".format(nc * n_segments, n_frame, n_segments, nc)
sys.stdout.write(info)
sys.stdout.flush()
# Hierarchical clustering on each trajectory fragment
for i_segment in tqdm(range(n_segments)):
indexes = range(i_segment * segment_length,
(i_segment + 1) * segment_length)
xyz = extract_coordinates(a_universe, group, indexes)
rmsd = rmsd_matrix(xyz, condensed=True)
z = hierarchy.linkage(rmsd, method='complete')
for node in Tree(z=z).nodes_at_depth(nc-1):
# Find the frame of each representative structure
i_frame = i_segment * segment_length + node.representative(rmsd).id
rep_ifr.append(i_frame)
rep_ifr.sort()
return rep_ifr
def cluster_with_properties(a_universe, pcls, p_names=None,
selection='not name H*', segment_length=1000,
n_representatives=1000):
r"""Cluster a set of representative structures by structural similarity
(RMSD) and by a set of properties
The simulated trajectory is divided into segments, and hierarchical
clustering is performed on each segment to yield a limited number of
representative structures (the centroids). Properties are calculated
for each centroid, thus each centroid is described by a property
vector. The dimensionality of the vector is related to the number of
properties and the dimensionality of each property.
The distances between any two centroids is calculated as the
Euclidean distance between their respective vector properties.
The distance matrix containing distances between all possible
centroid pairs is employed as the similarity measure to generate
the hierarchical tree of centroids.
The properties calculated for the centroids are stored in the
leaf nodes of the hierarchical tree. Properties are then propagated
up to the tree's root node.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
pcls : list
Property classes, such as :class:`~idpflex.properties.Asphericity`
of :class:`~idpflex.properties.SaSa`
p_names : list
Property names. If None, then default property names are used
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
Hierarchical clustering tree of the centroids
""" # noqa: E501
rep_ifr = trajectory_centroids(a_universe, selection=selection,
segment_length=segment_length,
n_representatives=n_representatives)
n_centroids = len(rep_ifr) # can be different than n_representatives
# Create names if not passed
if p_names is None:
p_names = [Property.default_name for Property in pcls]
# Calculate properties for each centroid
l_prop = list()
for p_name, Pcl in zip(p_names, pcls):
l_prop.append([Pcl(name=p_name).from_universe(a_universe, index=i)
for i in tqdm(rep_ifr)])
# Calculate distances between pair of centroids
xyz = np.zeros((len(pcls), n_centroids))
for i_prop, prop in enumerate(l_prop):
xyz[i_prop] = [p.y for p in prop]
# zero mean and unity variance for each property
xyz = np.transpose(zscore(xyz, axis=1))
distance_matrix = squareform(scipy.spatial.distance_matrix(xyz, xyz))
# Cluster the representative structures
tree = Tree(z=hierarchy.linkage(distance_matrix, method='complete'))
for i_leaf, leaf in enumerate(tree.leafs):
prop = ScalarProperty(name='iframe', y=rep_ifr[i_leaf])
leaf[prop.name] = prop
# Propagate the properties up the tree
[propagator_size_weighted_sum(prop, tree) for prop in l_prop]
return ClusterTrove(rep_ifr, distance_matrix, tree)
def cluster_trajectory(a_universe, selection='not name H*',
segment_length=1000, n_representatives=1000):
r"""Cluster a set of representative structures by structural similarity
(RMSD)
The simulated trajectory is divided into segments, and hierarchical
clustering is performed on each segment to yield a limited number of
representative structures. These are then clustered into the final
hierachical tree.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
distance_matrix: :class:`~numpy:numpy.ndarray`
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
clustering results for the representatives
""" # noqa: E501
rep_ifr = trajectory_centroids(a_universe, selection=selection,
segment_length=segment_length,
n_representatives=n_representatives)
group = a_universe.select_atoms(selection)
xyz = extract_coordinates(a_universe, group, rep_ifr)
distance_matrix = rmsd_matrix(xyz, condensed=True)
# Cluster the representative structures
tree = Tree(z=hierarchy.linkage(distance_matrix, method='complete'))
for i_leaf, leaf in enumerate(tree.leafs):
prop = ScalarProperty(name='iframe', y=rep_ifr[i_leaf])
leaf[prop.name] = prop
return ClusterTrove(rep_ifr, distance_matrix, tree)
def load_cluster_trove(filename):
r"""Load a previously saved
:class:`~idpflex.cluster.ClusterTrove` instance
Parameters
----------
filename: str
File name containing the serialized
:class:`~idpflex.cluster.ClusterTrove`
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
Cluster trove instance stored in file
"""
with open(filename, 'rb') as infile:
t = pickle.load(infile)
return t | 0.78789 | 0.58948 |
import unittest
from tplink_wr.parse import html
class TestScriptFinder(unittest.TestCase):
def test_exist(self):
finder = html.ScriptFinder()
finder.feed("<script>var abc = true;</script>")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;"])
def test_exist_uppercase(self):
finder = html.ScriptFinder()
finder.feed("<SCRIPT>var abc = true;</SCRIPT>")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;"])
def test_exist_multiline(self):
finder = html.ScriptFinder()
finder.feed("<script>\nvar abc = true;\nvar def = false;\n</script>")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["\nvar abc = true;\nvar def = false;\n"])
def test_exist_attrs(self):
finder = html.ScriptFinder()
finder.feed('<script language="javascript" type="text/javascript">var abc = true;</script>')
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;"])
def test_exist_empty(self):
finder = html.ScriptFinder()
finder.feed("<script>\n</script>")
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_exist_src(self):
finder = html.ScriptFinder()
finder.feed('<script language="javascript" src="../dynaform/common.js" type="text/javascript"></script>')
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_exist_multiple(self):
finder = html.ScriptFinder()
finder.feed("""
<script>var abc = true;</script>
<script>var def = false;</script>
""")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;", "var def = false;"])
def test_exist_other_tags(self):
finder = html.ScriptFinder()
finder.feed(""",
<title>Some scripts here</title>
<script>var abc = true;</script>
<meta charset="utf-8">
<script>var def = false;</script>
<script language="javascript" src="../dynaform/common.js" type="text/javascript"></script>
""")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;", "var def = false;"])
def test_exist_not_closed(self):
finder = html.ScriptFinder()
finder.feed('<script>var abc = "true";')
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_not_exist(self):
finder = html.ScriptFinder()
finder.feed("""
<title>No scripts here</title>
<meta charset="utf-8">
""")
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_clear(self):
finder = html.ScriptFinder()
finder.feed("""
<script>var abc = true;</script>
<script>var def = false;</script>
""")
finder.close()
finder.feed("""
<title>No scripts here</title>
<meta charset="utf-8">
""")
scripts = finder.get_scripts()
self.assertEqual(len(scripts), 0) | tests/parse/test_html.py | import unittest
from tplink_wr.parse import html
class TestScriptFinder(unittest.TestCase):
def test_exist(self):
finder = html.ScriptFinder()
finder.feed("<script>var abc = true;</script>")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;"])
def test_exist_uppercase(self):
finder = html.ScriptFinder()
finder.feed("<SCRIPT>var abc = true;</SCRIPT>")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;"])
def test_exist_multiline(self):
finder = html.ScriptFinder()
finder.feed("<script>\nvar abc = true;\nvar def = false;\n</script>")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["\nvar abc = true;\nvar def = false;\n"])
def test_exist_attrs(self):
finder = html.ScriptFinder()
finder.feed('<script language="javascript" type="text/javascript">var abc = true;</script>')
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;"])
def test_exist_empty(self):
finder = html.ScriptFinder()
finder.feed("<script>\n</script>")
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_exist_src(self):
finder = html.ScriptFinder()
finder.feed('<script language="javascript" src="../dynaform/common.js" type="text/javascript"></script>')
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_exist_multiple(self):
finder = html.ScriptFinder()
finder.feed("""
<script>var abc = true;</script>
<script>var def = false;</script>
""")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;", "var def = false;"])
def test_exist_other_tags(self):
finder = html.ScriptFinder()
finder.feed(""",
<title>Some scripts here</title>
<script>var abc = true;</script>
<meta charset="utf-8">
<script>var def = false;</script>
<script language="javascript" src="../dynaform/common.js" type="text/javascript"></script>
""")
scripts = finder.get_scripts()
self.assertEqual(scripts, ["var abc = true;", "var def = false;"])
def test_exist_not_closed(self):
finder = html.ScriptFinder()
finder.feed('<script>var abc = "true";')
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_not_exist(self):
finder = html.ScriptFinder()
finder.feed("""
<title>No scripts here</title>
<meta charset="utf-8">
""")
scripts = finder.get_scripts()
self.assertEqual(scripts, [])
def test_clear(self):
finder = html.ScriptFinder()
finder.feed("""
<script>var abc = true;</script>
<script>var def = false;</script>
""")
finder.close()
finder.feed("""
<title>No scripts here</title>
<meta charset="utf-8">
""")
scripts = finder.get_scripts()
self.assertEqual(len(scripts), 0) | 0.400632 | 0.198122 |
"""Misc utils. Currently largely for assistance testing domain models."""
import copy
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from domain_model import DomainModel
import pytest
def copy_dict_with_key_removed(
the_dict: Dict[Any, Any], key_to_remove: str = None
) -> Dict[Any, Any]:
new_dict = copy.deepcopy(the_dict)
if key_to_remove is not None:
del new_dict[key_to_remove]
return new_dict
def _init_domain_model(
model: DomainModel,
attribute_under_test: Optional[str] = None,
test_value: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
) -> DomainModel:
if additional_kwargs is None:
additional_kwargs = dict()
if attribute_under_test is not None:
additional_kwargs[attribute_under_test] = test_value
domain_model = model(**additional_kwargs)
return domain_model
def _domain_model_validation_test(
callable_to_run: Callable[[Any], Any],
expected_error: Optional[Exception],
expected_texts_in_error: Optional[Union[List[str], Tuple[str]]],
autopopulate: bool = False,
) -> None:
if expected_error is not None:
with pytest.raises(expected_error) as e:
callable_to_run(autopopulate=autopopulate) # type: ignore
if expected_texts_in_error is not None:
if not isinstance(expected_texts_in_error, (list, tuple)):
expected_texts_in_error = [expected_texts_in_error]
for this_expected_text_in_error in expected_texts_in_error:
assert this_expected_text_in_error in str(e)
else:
callable_to_run(autopopulate=autopopulate) # type: ignore
def domain_model_validation_test(
model: DomainModel,
attribute_under_test: Optional[str] = None,
test_value: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
expected_error: Optional[Exception] = None,
expected_texts_in_error: Optional[Union[List[str], Tuple[str]]] = None,
autopopulate: bool = False,
) -> None:
"""Help for testing the validate method."""
domain_model = _init_domain_model(
model, attribute_under_test, test_value, additional_kwargs
)
_domain_model_validation_test(
domain_model.validate,
expected_error,
expected_texts_in_error,
autopopulate=autopopulate,
)
def domain_model_validate_internals_test(
model: DomainModel,
attribute_under_test: Optional[str] = None,
test_value: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
expected_error: Optional[Exception] = None,
expected_texts_in_error: Optional[Union[List[str], Tuple[str]]] = None,
autopopulate: bool = False,
) -> None:
"""Help for testing the validate_internals method."""
domain_model = _init_domain_model(
model, attribute_under_test, test_value, additional_kwargs
)
_domain_model_validation_test(
domain_model.validate_internals,
expected_error,
expected_texts_in_error,
autopopulate=autopopulate,
) | src/misc_test_utils/misc_test_utils.py | """Misc utils. Currently largely for assistance testing domain models."""
import copy
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from domain_model import DomainModel
import pytest
def copy_dict_with_key_removed(
the_dict: Dict[Any, Any], key_to_remove: str = None
) -> Dict[Any, Any]:
new_dict = copy.deepcopy(the_dict)
if key_to_remove is not None:
del new_dict[key_to_remove]
return new_dict
def _init_domain_model(
model: DomainModel,
attribute_under_test: Optional[str] = None,
test_value: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
) -> DomainModel:
if additional_kwargs is None:
additional_kwargs = dict()
if attribute_under_test is not None:
additional_kwargs[attribute_under_test] = test_value
domain_model = model(**additional_kwargs)
return domain_model
def _domain_model_validation_test(
callable_to_run: Callable[[Any], Any],
expected_error: Optional[Exception],
expected_texts_in_error: Optional[Union[List[str], Tuple[str]]],
autopopulate: bool = False,
) -> None:
if expected_error is not None:
with pytest.raises(expected_error) as e:
callable_to_run(autopopulate=autopopulate) # type: ignore
if expected_texts_in_error is not None:
if not isinstance(expected_texts_in_error, (list, tuple)):
expected_texts_in_error = [expected_texts_in_error]
for this_expected_text_in_error in expected_texts_in_error:
assert this_expected_text_in_error in str(e)
else:
callable_to_run(autopopulate=autopopulate) # type: ignore
def domain_model_validation_test(
model: DomainModel,
attribute_under_test: Optional[str] = None,
test_value: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
expected_error: Optional[Exception] = None,
expected_texts_in_error: Optional[Union[List[str], Tuple[str]]] = None,
autopopulate: bool = False,
) -> None:
"""Help for testing the validate method."""
domain_model = _init_domain_model(
model, attribute_under_test, test_value, additional_kwargs
)
_domain_model_validation_test(
domain_model.validate,
expected_error,
expected_texts_in_error,
autopopulate=autopopulate,
)
def domain_model_validate_internals_test(
model: DomainModel,
attribute_under_test: Optional[str] = None,
test_value: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
expected_error: Optional[Exception] = None,
expected_texts_in_error: Optional[Union[List[str], Tuple[str]]] = None,
autopopulate: bool = False,
) -> None:
"""Help for testing the validate_internals method."""
domain_model = _init_domain_model(
model, attribute_under_test, test_value, additional_kwargs
)
_domain_model_validation_test(
domain_model.validate_internals,
expected_error,
expected_texts_in_error,
autopopulate=autopopulate,
) | 0.905673 | 0.386908 |
import numpy as np
import os
from numpy import linalg as LA
import matplotlib.pyplot as plt
#datapath = '../Chair_parts'
datapath = 'data/examples'
def renderBoxes2mesh_new(boxes, boxes_type, obj_names):
results = []
for box_i in range(boxes.shape[0]):
vertices = []
faces = []
obj_name = obj_names[box_i]
v_num = 0
for name in obj_name:
with open(os.path.join(datapath, name[0]), 'r') as f:
lines = f.readlines()
t = 0
for line in lines:
if line[0] != 'v' and line[0] != 'f':
continue
line = line.strip('\n')
items = line.split(' ')
if items[0] == 'v':
vertices.append((float(items[1]), float(items[2]), float(items[3])))
t += 1
if items[0] == 'f':
faces.append([int(items[1])+v_num, int(items[2])+v_num, int(items[3])+v_num])
v_num += t
if isinstance(boxes_type[box_i], int):
results.append((vertices, faces))
else:
gtbox = boxes_type[box_i]
gtCenter = gtbox[0:3][np.newaxis, ...].T
gtlengths = gtbox[3:6]
gtdir_1 = gtbox[6:9]
gtdir_2 = gtbox[9:12]
gtdir_1 = gtdir_1/LA.norm(gtdir_1)
gtdir_2 = gtdir_2/LA.norm(gtdir_2)
gtdir_3 = np.cross(gtdir_1, gtdir_2)
predbox = boxes[box_i]
predCenter = predbox[0:3][np.newaxis, ...].T
predlengths = predbox[3:6]
preddir_1 = predbox[6:9]
preddir_2 = predbox[9:12]
preddir_1 = preddir_1/LA.norm(preddir_1)
preddir_2 = preddir_2/LA.norm(preddir_2)
preddir_3 = -np.cross(preddir_1, preddir_2)
scale = predlengths / gtlengths
scale = np.array([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, scale[2]]])
x = np.array(vertices).T
A = np.array([gtdir_1, gtdir_2, gtdir_3])
B = np.array([preddir_1, preddir_2, preddir_3])
B = B.T
y = scale.dot(B).dot(A).dot(x-gtCenter)+predCenter
x = y.T
vertices = []
for i in range(x.shape[0]):
vertices.append(x[i])
for i in range(len(faces)):
temp = faces[i][0]
faces[i][0] = faces[i][1]
faces[i][1] = temp
results.append((vertices, faces))
return results
def renderBoxes2mesh(boxes, obj_names):
obj_name_set = set(obj_names)
obj_dict = {}
for idx, name in enumerate(obj_name_set):
vertices = []
faces = []
with open(os.path.join(datapath, name), 'r') as f:
lines = f.readlines()
for line in lines:
if line[0] != 'v' and line[0] != 'f':
continue
line = line.strip('\n')
items = line.split(' ')
if items[0] == 'v':
vertices.append((float(items[1]), float(items[2]), float(items[3])))
if items[0] == 'f':
faces.append((int(items[1]), int(items[2]), int(items[3])))
vertices = np.array(vertices)
obj_dict[name] = {'vertices': vertices, 'faces': faces, 'id': idx}
results = []
for box_i in range(boxes.shape[0]):
box = boxes[box_i]
obj = obj_dict[obj_names[box_i]]
vertices = obj['vertices']
faces = obj['faces']
center = box[0:3]
lengths = box[3:6] * 1.1
dir_1 = box[6:9]
dir_2 = box[9:12]
dir_1 = dir_1/LA.norm(dir_1)
dir_2 = dir_2/LA.norm(dir_2)
dir_3 = np.cross(dir_1, dir_2)
dist_v = vertices - center
dist_1 = np.abs(np.dot(dist_v, dir_1))
dist_2 = np.abs(np.dot(dist_v, dir_2))
dist_3 = np.abs(np.dot(dist_v, dir_3))
clean_flag = np.logical_and(dist_1 <= lengths[0] / 2, dist_2 <= lengths[1] / 2)
clean_flag = np.logical_and(clean_flag, dist_3 <= lengths[2] / 2)
new_id = [0 for _ in range(vertices.shape[0])]
count = 0
new_v = []
new_f = []
for i in range(vertices.shape[0]):
if clean_flag[i]:
count += 1
new_id[i] = count
new_v.append(vertices[i])
for i in range(len(faces)):
a = faces[i][0]
b = faces[i][1]
c = faces[i][2]
if clean_flag[a-1] and clean_flag[b-1] and clean_flag[c-1]:
new_f.append([new_id[a-1], new_id[b-1], new_id[c-1]])
results.append((new_v, new_f))
return results
def saveOBJ(obj_names, outfilename, results):
cmap = plt.get_cmap('jet_r')
mesh_name = set()
for obj in obj_names:
n = obj[0][0].split('/')[0]
mesh_name.add(n)
obj_dict = {}
for idx, name in enumerate(mesh_name):
obj_dict[name] = idx
f = open(outfilename, 'w')
offset = 0
for box_i in range(len(results)):
n = obj_names[box_i][0][0].split('/')[0]
color = cmap(float(obj_dict[n]) / len(mesh_name))[:-1]
vertices = results[box_i][0]
faces = results[box_i][1]
for i in range(len(vertices)):
f.write('v ' + str(vertices[i][0]) + ' ' + str(vertices[i][1]) + ' ' + str(vertices[i][2]) +
' ' + str(color[0]) + ' ' + str(color[1]) + ' ' + str(color[2]) + '\n')
for i in range(len(faces)):
f.write('f ' + str(faces[i][0]+offset) + ' ' + str(faces[i][1]+offset) + ' ' + str(faces[i][2]+offset) + '\n')
offset += len(vertices)
f.close()
def directRender(boxes, boxes_type, obj_names, outfilename):
results = renderBoxes2mesh_new(boxes, boxes_type, obj_names)
saveOBJ(obj_names, outfilename, results)
def alignBoxAndRender(gtBoxes, predBoxes, boxes_type, obj_names, outfilename):
results = renderBoxes2mesh_new(gtBoxes, boxes_type, obj_names)
for i in range(len(results)):
gtbox = gtBoxes[i]
gtCenter = gtbox[0:3][np.newaxis, ...].T
gtlengths = gtbox[3:6]
gtdir_1 = gtbox[6:9]
gtdir_2 = gtbox[9:12]
gtdir_1 = gtdir_1/LA.norm(gtdir_1)
gtdir_2 = gtdir_2/LA.norm(gtdir_2)
gtdir_3 = np.cross(gtdir_1, gtdir_2)
predbox = predBoxes[i]
predCenter = predbox[0:3][np.newaxis, ...].T
predlengths = predbox[3:6]
preddir_1 = predbox[6:9]
preddir_2 = predbox[9:12]
preddir_1 = preddir_1/LA.norm(preddir_1)
preddir_2 = preddir_2/LA.norm(preddir_2)
preddir_3 = np.cross(preddir_1, preddir_2)
scale = predlengths / gtlengths
scale = np.array([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, scale[2]]])
x = np.array(results[i][0]).T
A = np.array([gtdir_1, gtdir_2, gtdir_3])
B = np.array([preddir_1, preddir_2, preddir_3])
B = B.T
y = scale.dot(B).dot(A).dot(x-gtCenter)+predCenter
x = y.T
for t in range(len(results[i][0])):
results[i][0][t] = x[t]
saveOBJ(obj_names, outfilename, results) | render2mesh.py | import numpy as np
import os
from numpy import linalg as LA
import matplotlib.pyplot as plt
#datapath = '../Chair_parts'
datapath = 'data/examples'
def renderBoxes2mesh_new(boxes, boxes_type, obj_names):
results = []
for box_i in range(boxes.shape[0]):
vertices = []
faces = []
obj_name = obj_names[box_i]
v_num = 0
for name in obj_name:
with open(os.path.join(datapath, name[0]), 'r') as f:
lines = f.readlines()
t = 0
for line in lines:
if line[0] != 'v' and line[0] != 'f':
continue
line = line.strip('\n')
items = line.split(' ')
if items[0] == 'v':
vertices.append((float(items[1]), float(items[2]), float(items[3])))
t += 1
if items[0] == 'f':
faces.append([int(items[1])+v_num, int(items[2])+v_num, int(items[3])+v_num])
v_num += t
if isinstance(boxes_type[box_i], int):
results.append((vertices, faces))
else:
gtbox = boxes_type[box_i]
gtCenter = gtbox[0:3][np.newaxis, ...].T
gtlengths = gtbox[3:6]
gtdir_1 = gtbox[6:9]
gtdir_2 = gtbox[9:12]
gtdir_1 = gtdir_1/LA.norm(gtdir_1)
gtdir_2 = gtdir_2/LA.norm(gtdir_2)
gtdir_3 = np.cross(gtdir_1, gtdir_2)
predbox = boxes[box_i]
predCenter = predbox[0:3][np.newaxis, ...].T
predlengths = predbox[3:6]
preddir_1 = predbox[6:9]
preddir_2 = predbox[9:12]
preddir_1 = preddir_1/LA.norm(preddir_1)
preddir_2 = preddir_2/LA.norm(preddir_2)
preddir_3 = -np.cross(preddir_1, preddir_2)
scale = predlengths / gtlengths
scale = np.array([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, scale[2]]])
x = np.array(vertices).T
A = np.array([gtdir_1, gtdir_2, gtdir_3])
B = np.array([preddir_1, preddir_2, preddir_3])
B = B.T
y = scale.dot(B).dot(A).dot(x-gtCenter)+predCenter
x = y.T
vertices = []
for i in range(x.shape[0]):
vertices.append(x[i])
for i in range(len(faces)):
temp = faces[i][0]
faces[i][0] = faces[i][1]
faces[i][1] = temp
results.append((vertices, faces))
return results
def renderBoxes2mesh(boxes, obj_names):
obj_name_set = set(obj_names)
obj_dict = {}
for idx, name in enumerate(obj_name_set):
vertices = []
faces = []
with open(os.path.join(datapath, name), 'r') as f:
lines = f.readlines()
for line in lines:
if line[0] != 'v' and line[0] != 'f':
continue
line = line.strip('\n')
items = line.split(' ')
if items[0] == 'v':
vertices.append((float(items[1]), float(items[2]), float(items[3])))
if items[0] == 'f':
faces.append((int(items[1]), int(items[2]), int(items[3])))
vertices = np.array(vertices)
obj_dict[name] = {'vertices': vertices, 'faces': faces, 'id': idx}
results = []
for box_i in range(boxes.shape[0]):
box = boxes[box_i]
obj = obj_dict[obj_names[box_i]]
vertices = obj['vertices']
faces = obj['faces']
center = box[0:3]
lengths = box[3:6] * 1.1
dir_1 = box[6:9]
dir_2 = box[9:12]
dir_1 = dir_1/LA.norm(dir_1)
dir_2 = dir_2/LA.norm(dir_2)
dir_3 = np.cross(dir_1, dir_2)
dist_v = vertices - center
dist_1 = np.abs(np.dot(dist_v, dir_1))
dist_2 = np.abs(np.dot(dist_v, dir_2))
dist_3 = np.abs(np.dot(dist_v, dir_3))
clean_flag = np.logical_and(dist_1 <= lengths[0] / 2, dist_2 <= lengths[1] / 2)
clean_flag = np.logical_and(clean_flag, dist_3 <= lengths[2] / 2)
new_id = [0 for _ in range(vertices.shape[0])]
count = 0
new_v = []
new_f = []
for i in range(vertices.shape[0]):
if clean_flag[i]:
count += 1
new_id[i] = count
new_v.append(vertices[i])
for i in range(len(faces)):
a = faces[i][0]
b = faces[i][1]
c = faces[i][2]
if clean_flag[a-1] and clean_flag[b-1] and clean_flag[c-1]:
new_f.append([new_id[a-1], new_id[b-1], new_id[c-1]])
results.append((new_v, new_f))
return results
def saveOBJ(obj_names, outfilename, results):
cmap = plt.get_cmap('jet_r')
mesh_name = set()
for obj in obj_names:
n = obj[0][0].split('/')[0]
mesh_name.add(n)
obj_dict = {}
for idx, name in enumerate(mesh_name):
obj_dict[name] = idx
f = open(outfilename, 'w')
offset = 0
for box_i in range(len(results)):
n = obj_names[box_i][0][0].split('/')[0]
color = cmap(float(obj_dict[n]) / len(mesh_name))[:-1]
vertices = results[box_i][0]
faces = results[box_i][1]
for i in range(len(vertices)):
f.write('v ' + str(vertices[i][0]) + ' ' + str(vertices[i][1]) + ' ' + str(vertices[i][2]) +
' ' + str(color[0]) + ' ' + str(color[1]) + ' ' + str(color[2]) + '\n')
for i in range(len(faces)):
f.write('f ' + str(faces[i][0]+offset) + ' ' + str(faces[i][1]+offset) + ' ' + str(faces[i][2]+offset) + '\n')
offset += len(vertices)
f.close()
def directRender(boxes, boxes_type, obj_names, outfilename):
results = renderBoxes2mesh_new(boxes, boxes_type, obj_names)
saveOBJ(obj_names, outfilename, results)
def alignBoxAndRender(gtBoxes, predBoxes, boxes_type, obj_names, outfilename):
results = renderBoxes2mesh_new(gtBoxes, boxes_type, obj_names)
for i in range(len(results)):
gtbox = gtBoxes[i]
gtCenter = gtbox[0:3][np.newaxis, ...].T
gtlengths = gtbox[3:6]
gtdir_1 = gtbox[6:9]
gtdir_2 = gtbox[9:12]
gtdir_1 = gtdir_1/LA.norm(gtdir_1)
gtdir_2 = gtdir_2/LA.norm(gtdir_2)
gtdir_3 = np.cross(gtdir_1, gtdir_2)
predbox = predBoxes[i]
predCenter = predbox[0:3][np.newaxis, ...].T
predlengths = predbox[3:6]
preddir_1 = predbox[6:9]
preddir_2 = predbox[9:12]
preddir_1 = preddir_1/LA.norm(preddir_1)
preddir_2 = preddir_2/LA.norm(preddir_2)
preddir_3 = np.cross(preddir_1, preddir_2)
scale = predlengths / gtlengths
scale = np.array([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, scale[2]]])
x = np.array(results[i][0]).T
A = np.array([gtdir_1, gtdir_2, gtdir_3])
B = np.array([preddir_1, preddir_2, preddir_3])
B = B.T
y = scale.dot(B).dot(A).dot(x-gtCenter)+predCenter
x = y.T
for t in range(len(results[i][0])):
results[i][0][t] = x[t]
saveOBJ(obj_names, outfilename, results) | 0.119871 | 0.29922 |
__all__ = ['mahalanobis_pca_outliers']
import numpy as np
def mahalanobis_pca_outliers(X, n_components=2, threshold=2, plot=False):
"""
Compute PCA on X, then compute the malanobis distance
of all data points from the PCA components.
Params
------
X: data
n_components: int (default=2)
Number of PCA components to use to calculate the distance
threshold: float (default=2)
If None, returns the unaltered distance values.
If float, output is binarized to True (Outlier)
or False (Not outlier) based on threshold * stddev
from the mean distance.
plot: bool (default=False)
If True, displays a 2D plot of the points colored by
their distance
Returns
-------
m: np.ndarray
Distance values. len(m) == len(X).
Usage
-----
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [0, 0], [-20, 50], [3, 5]])
>>> m = mahalanobis_pca_outliers(X)
>>> m.shape[0] == 6
True
>>> print(m)
"""
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EmpiricalCovariance, MinCovDet
import matplotlib.pyplot as plt
# Define the PCA object
pca = PCA()
# Run PCA on scaled data and obtain the scores array
T = pca.fit_transform(StandardScaler().fit_transform(X))
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(T[:,:n_components])
# Get the Mahalanobis distance
md = robust_cov.mahalanobis(T[:,:n_components])
# plot
if plot:
colors = [plt.cm.jet(float(i) / max(md)) for i in md]
fig = plt.figure(figsize=(8,6))
with plt.style.context(('ggplot')):
plt.scatter(T[:, 0], T[:, 1], c=colors, edgecolors='k', s=60)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.xlim((-60, 60))
plt.ylim((-60, 60))
plt.title('Score Plot')
plt.show()
if threshold:
std = np.std(md)
m = np.mean(md)
k = threshold * std
up, lo = m + k, m - k
return np.logical_or(md >= up, md <= lo)
return md
if __name__ == '__main__':
import doctest
doctest.testmod() | python_data_utils/sklearn/data/utils.py | __all__ = ['mahalanobis_pca_outliers']
import numpy as np
def mahalanobis_pca_outliers(X, n_components=2, threshold=2, plot=False):
"""
Compute PCA on X, then compute the malanobis distance
of all data points from the PCA components.
Params
------
X: data
n_components: int (default=2)
Number of PCA components to use to calculate the distance
threshold: float (default=2)
If None, returns the unaltered distance values.
If float, output is binarized to True (Outlier)
or False (Not outlier) based on threshold * stddev
from the mean distance.
plot: bool (default=False)
If True, displays a 2D plot of the points colored by
their distance
Returns
-------
m: np.ndarray
Distance values. len(m) == len(X).
Usage
-----
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [0, 0], [-20, 50], [3, 5]])
>>> m = mahalanobis_pca_outliers(X)
>>> m.shape[0] == 6
True
>>> print(m)
"""
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EmpiricalCovariance, MinCovDet
import matplotlib.pyplot as plt
# Define the PCA object
pca = PCA()
# Run PCA on scaled data and obtain the scores array
T = pca.fit_transform(StandardScaler().fit_transform(X))
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(T[:,:n_components])
# Get the Mahalanobis distance
md = robust_cov.mahalanobis(T[:,:n_components])
# plot
if plot:
colors = [plt.cm.jet(float(i) / max(md)) for i in md]
fig = plt.figure(figsize=(8,6))
with plt.style.context(('ggplot')):
plt.scatter(T[:, 0], T[:, 1], c=colors, edgecolors='k', s=60)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.xlim((-60, 60))
plt.ylim((-60, 60))
plt.title('Score Plot')
plt.show()
if threshold:
std = np.std(md)
m = np.mean(md)
k = threshold * std
up, lo = m + k, m - k
return np.logical_or(md >= up, md <= lo)
return md
if __name__ == '__main__':
import doctest
doctest.testmod() | 0.90652 | 0.742235 |
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8')
info( '*** Adding controller\n' )
c0=net.addController(name='c0',
controller=Controller,
protocol='tcp',
port=6633)
info( '*** Add switches\n')
s9 = net.addSwitch('s9', cls=OVSKernelSwitch)
s8 = net.addSwitch('s8', cls=OVSKernelSwitch)
s4 = net.addSwitch('s4', cls=OVSKernelSwitch)
s5 = net.addSwitch('s5', cls=OVSKernelSwitch)
s7 = net.addSwitch('s7', cls=OVSKernelSwitch)
s6 = net.addSwitch('s6', cls=OVSKernelSwitch)
s10 = net.addSwitch('s10', cls=OVSKernelSwitch)
s1 = net.addSwitch('s1', cls=OVSKernelSwitch)
s3 = net.addSwitch('s3', cls=OVSKernelSwitch)
s2 = net.addSwitch('s2', cls=OVSKernelSwitch)
info( '*** Add hosts\n')
h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None)
h9 = net.addHost('h9', cls=Host, ip='10.0.0.9', defaultRoute=None)
h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None)
h7 = net.addHost('h7', cls=Host, ip='10.0.0.7', defaultRoute=None)
h10 = net.addHost('h10', cls=Host, ip='10.0.0.10', defaultRoute=None)
h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None)
h6 = net.addHost('h6', cls=Host, ip='10.0.0.6', defaultRoute=None)
h4 = net.addHost('h4', cls=Host, ip='10.0.0.4', defaultRoute=None)
h11 = net.addHost('h11', cls=Host, ip='10.0.0.11', defaultRoute=None)
h8 = net.addHost('h8', cls=Host, ip='10.0.0.8', defaultRoute=None)
h5 = net.addHost('h5', cls=Host, ip='10.0.0.5', defaultRoute=None)
info( '*** Add links\n')
s7s1 = {'bw':250,'loss':0}
net.addLink(s7, s1, cls=TCLink , **s7s1)
s7s8 = {'bw':250,'loss':0}
net.addLink(s7, s8, cls=TCLink , **s7s8)
s8s2 = {'bw':250,'loss':0}
net.addLink(s8, s2, cls=TCLink , **s8s2)
net.addLink(s1, s3)
net.addLink(s1, s4)
net.addLink(s1, s9)
net.addLink(s1, s10)
net.addLink(s1, s5)
net.addLink(s1, s6)
net.addLink(s2, s3)
net.addLink(s2, s4)
net.addLink(s2, s9)
net.addLink(s2, s10)
net.addLink(s2, s5)
net.addLink(s2, s6)
net.addLink(s3, h1)
net.addLink(s3, h2)
net.addLink(s4, h3)
net.addLink(s4, h4)
net.addLink(s9, h5)
net.addLink(s9, h6)
net.addLink(s10, h7)
net.addLink(s10, h8)
net.addLink(s5, h9)
net.addLink(s6, h10)
net.addLink(s6, h11)
info( '*** Starting network\n')
net.build()
info( '*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
net.get('s9').start([])
net.get('s8').start([c0])
net.get('s4').start([])
net.get('s5').start([])
net.get('s7').start([c0])
net.get('s6').start([])
net.get('s10').start([])
net.get('s1').start([])
net.get('s3').start([])
net.get('s2').start([])
info( '*** Post configure switches and hosts\n')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork() | Chapter10/10_7_sdn_miniedit.py |
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8')
info( '*** Adding controller\n' )
c0=net.addController(name='c0',
controller=Controller,
protocol='tcp',
port=6633)
info( '*** Add switches\n')
s9 = net.addSwitch('s9', cls=OVSKernelSwitch)
s8 = net.addSwitch('s8', cls=OVSKernelSwitch)
s4 = net.addSwitch('s4', cls=OVSKernelSwitch)
s5 = net.addSwitch('s5', cls=OVSKernelSwitch)
s7 = net.addSwitch('s7', cls=OVSKernelSwitch)
s6 = net.addSwitch('s6', cls=OVSKernelSwitch)
s10 = net.addSwitch('s10', cls=OVSKernelSwitch)
s1 = net.addSwitch('s1', cls=OVSKernelSwitch)
s3 = net.addSwitch('s3', cls=OVSKernelSwitch)
s2 = net.addSwitch('s2', cls=OVSKernelSwitch)
info( '*** Add hosts\n')
h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None)
h9 = net.addHost('h9', cls=Host, ip='10.0.0.9', defaultRoute=None)
h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None)
h7 = net.addHost('h7', cls=Host, ip='10.0.0.7', defaultRoute=None)
h10 = net.addHost('h10', cls=Host, ip='10.0.0.10', defaultRoute=None)
h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None)
h6 = net.addHost('h6', cls=Host, ip='10.0.0.6', defaultRoute=None)
h4 = net.addHost('h4', cls=Host, ip='10.0.0.4', defaultRoute=None)
h11 = net.addHost('h11', cls=Host, ip='10.0.0.11', defaultRoute=None)
h8 = net.addHost('h8', cls=Host, ip='10.0.0.8', defaultRoute=None)
h5 = net.addHost('h5', cls=Host, ip='10.0.0.5', defaultRoute=None)
info( '*** Add links\n')
s7s1 = {'bw':250,'loss':0}
net.addLink(s7, s1, cls=TCLink , **s7s1)
s7s8 = {'bw':250,'loss':0}
net.addLink(s7, s8, cls=TCLink , **s7s8)
s8s2 = {'bw':250,'loss':0}
net.addLink(s8, s2, cls=TCLink , **s8s2)
net.addLink(s1, s3)
net.addLink(s1, s4)
net.addLink(s1, s9)
net.addLink(s1, s10)
net.addLink(s1, s5)
net.addLink(s1, s6)
net.addLink(s2, s3)
net.addLink(s2, s4)
net.addLink(s2, s9)
net.addLink(s2, s10)
net.addLink(s2, s5)
net.addLink(s2, s6)
net.addLink(s3, h1)
net.addLink(s3, h2)
net.addLink(s4, h3)
net.addLink(s4, h4)
net.addLink(s9, h5)
net.addLink(s9, h6)
net.addLink(s10, h7)
net.addLink(s10, h8)
net.addLink(s5, h9)
net.addLink(s6, h10)
net.addLink(s6, h11)
info( '*** Starting network\n')
net.build()
info( '*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
net.get('s9').start([])
net.get('s8').start([c0])
net.get('s4').start([])
net.get('s5').start([])
net.get('s7').start([c0])
net.get('s6').start([])
net.get('s10').start([])
net.get('s1').start([])
net.get('s3').start([])
net.get('s2').start([])
info( '*** Post configure switches and hosts\n')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork() | 0.644001 | 0.061312 |
import os
import unittest
import typing
import math
import collections
def get_file_contents() -> str:
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, "..", "data", "d14.txt")
with open(file_path, "r") as f:
lines = f.read()
return lines
ChemicalName = str
ProductionAmount = int
AmountAndChemical = typing.Tuple[int, ChemicalName]
Requirements = typing.List[AmountAndChemical]
Reaction = typing.Tuple[ProductionAmount, Requirements]
Reactions = typing.Dict[ChemicalName, Reaction]
CurrentChemicalAmounts = typing.MutableMapping[ChemicalName, int]
def str_to_chemical_amount(s: str) -> AmountAndChemical:
s_split = s.split(" ")
return int(s_split[0]), s_split[1]
def get_fuel_amount_to_be_produced(r: Reactions) -> typing.Optional[AmountAndChemical]:
for chemical in r.keys():
if chemical == "FUEL":
return r[chemical][0], chemical
return None
def str_ro_reactions(reactions_input_str: str) -> Reactions:
reactions: Reactions = {}
reaction_lines = reactions_input_str.strip().splitlines(keepends=False)
for reaction_line in reaction_lines:
requirements_str, produces_chemical_str = reaction_line.strip().split(" => ")
produces_chemical = str_to_chemical_amount(produces_chemical_str)
requirements_split = requirements_str.split(", ")
requirements = [str_to_chemical_amount(r) for r in requirements_split]
reaction = produces_chemical[0], requirements
reactions[produces_chemical[1]] = reaction
return reactions
def get_chemical_requirements(r: Reactions,
unused_chemicals: CurrentChemicalAmounts,
chemical_amount: AmountAndChemical) -> CurrentChemicalAmounts:
needed_amount, chemical = chemical_amount
if chemical == "ORE":
return {}
unused_amount = unused_chemicals[chemical]
if unused_amount >= needed_amount:
unused_chemicals[chemical] -= needed_amount
return {}
production_amount, reaction_requirements = r[chemical]
reaction_multiplier = 1
if (production_amount + unused_amount) < needed_amount:
reaction_multiplier = math.ceil((needed_amount - unused_amount) / production_amount)
reaction_requirements = {r_chemical: r_amount * reaction_multiplier
for r_amount, r_chemical in reaction_requirements}
unused_chemicals[chemical] = (production_amount * reaction_multiplier + unused_amount) - needed_amount
return reaction_requirements
def merge_chemical_requirements(current_requirements: CurrentChemicalAmounts, new_requirements: CurrentChemicalAmounts):
for chemical, amount in new_requirements.items():
if chemical in current_requirements:
current_requirements[chemical] += amount
else:
current_requirements[chemical] = amount
def get_next_non_ore_chemical(current_chemicals: CurrentChemicalAmounts) -> typing.Optional[ChemicalName]:
for k in current_chemicals.keys():
if k != "ORE":
return k
return None
def get_ore_for_fuel(reactions_str: str) -> int:
r = str_ro_reactions(reactions_str)
return get_ore_for_fuel_helper(r)
def get_ore_for_fuel_helper(r: Reactions) -> int:
fuel_amount, fuel_key = get_fuel_amount_to_be_produced(r)
assert fuel_key
unused_chemicals = collections.defaultdict(lambda: 0)
current_chemical_amounts = get_chemical_requirements(r, unused_chemicals, (fuel_amount, fuel_key))
while len(current_chemical_amounts) != 1:
chemical = get_next_non_ore_chemical(current_chemical_amounts)
amount = current_chemical_amounts[chemical]
del current_chemical_amounts[chemical]
new_requirements = get_chemical_requirements(r, unused_chemicals, (amount, chemical))
merge_chemical_requirements(current_chemical_amounts, new_requirements)
needed_ore = current_chemical_amounts[next(iter(current_chemical_amounts))]
return needed_ore
def set_fuel_requirement(r: Reactions, fuel_amount: int) -> Reactions:
chemical = "FUEL"
new_reactions = dict(r)
amount, requirements = r[chemical]
new_requirements = [(r_amount * fuel_amount, r_chemical)
for r_amount, r_chemical in requirements]
new_reactions[chemical] = (fuel_amount, new_requirements)
return new_reactions
def bisect_max_fuel(reactions_str: str) -> int:
r = str_ro_reactions(reactions_str)
available_ore = 1000000000000
max_fuel_heuristic = available_ore
low = 1
high = max_fuel_heuristic
max_fuel = 1
while low <= high:
mid = math.floor((low + high) / 2)
new_reactions = set_fuel_requirement(r, mid)
needed_ore = get_ore_for_fuel_helper(new_reactions)
if needed_ore > available_ore:
high = mid - 1
elif mid > max_fuel:
low = mid + 1
max_fuel = mid
return max_fuel
def part1():
input_reactions = get_file_contents()
ore_amount = get_ore_for_fuel(input_reactions)
assert ore_amount == 783895
def part2():
input_reactions = get_file_contents()
max_fuel = bisect_max_fuel(input_reactions)
print(max_fuel)
assert max_fuel == 1896688
class Tests(unittest.TestCase):
def test_samples(self):
ore_amount = get_ore_for_fuel("""
10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL""")
self.assertEqual(ore_amount, 31)
ore_amount = get_ore_for_fuel("""
9 ORE => 2 A
8 ORE => 3 B
7 ORE => 5 C
3 A, 4 B => 1 AB
5 B, 7 C => 1 BC
4 C, 1 A => 1 CA
2 AB, 3 BC, 4 CA => 1 FUEL""")
self.assertEqual(ore_amount, 165)
ore_amount = get_ore_for_fuel("""
10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL""")
self.assertEqual(ore_amount, 31)
ore_amount = get_ore_for_fuel("""
157 ORE => 5 NZVS
165 ORE => 6 DCFZ
44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
179 ORE => 7 PSHF
177 ORE => 5 HKGWZ
7 DCFZ, 7 PSHF => 2 XJWVT
165 ORE => 2 GPVTF
3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT""")
self.assertEqual(ore_amount, 13312)
ore_amount = get_ore_for_fuel("""
2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG
17 NVRVD, 3 JNWZP => 8 VPVL
53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL
22 VJHF, 37 MNCFX => 5 FWMGM
139 ORE => 4 NVRVD
144 ORE => 7 JNWZP
5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC
5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV
145 ORE => 6 MNCFX
1 NVRVD => 8 CXFTF
1 VJHF, 6 MNCFX => 4 RFSQX
176 ORE => 6 VJHF""")
self.assertEqual(ore_amount, 180697)
ore_amount = get_ore_for_fuel("""
171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX""")
self.assertEqual(ore_amount, 2210736)
max_fuel = bisect_max_fuel("""
157 ORE => 5 NZVS
165 ORE => 6 DCFZ
44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
179 ORE => 7 PSHF
177 ORE => 5 HKGWZ
7 DCFZ, 7 PSHF => 2 XJWVT
165 ORE => 2 GPVTF
3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT""")
self.assertEqual(max_fuel, 82892753)
max_fuel = bisect_max_fuel("""
2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG
17 NVRVD, 3 JNWZP => 8 VPVL
53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL
22 VJHF, 37 MNCFX => 5 FWMGM
139 ORE => 4 NVRVD
144 ORE => 7 JNWZP
5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC
5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV
145 ORE => 6 MNCFX
1 NVRVD => 8 CXFTF
1 VJHF, 6 MNCFX => 4 RFSQX
176 ORE => 6 VJHF""")
self.assertEqual(max_fuel, 5586022)
max_fuel = bisect_max_fuel("""
171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX""")
self.assertEqual(max_fuel, 460664)
if __name__ == '__main__':
part1()
part2()
unittest.main() | python/p14.py | import os
import unittest
import typing
import math
import collections
def get_file_contents() -> str:
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, "..", "data", "d14.txt")
with open(file_path, "r") as f:
lines = f.read()
return lines
ChemicalName = str
ProductionAmount = int
AmountAndChemical = typing.Tuple[int, ChemicalName]
Requirements = typing.List[AmountAndChemical]
Reaction = typing.Tuple[ProductionAmount, Requirements]
Reactions = typing.Dict[ChemicalName, Reaction]
CurrentChemicalAmounts = typing.MutableMapping[ChemicalName, int]
def str_to_chemical_amount(s: str) -> AmountAndChemical:
s_split = s.split(" ")
return int(s_split[0]), s_split[1]
def get_fuel_amount_to_be_produced(r: Reactions) -> typing.Optional[AmountAndChemical]:
for chemical in r.keys():
if chemical == "FUEL":
return r[chemical][0], chemical
return None
def str_ro_reactions(reactions_input_str: str) -> Reactions:
reactions: Reactions = {}
reaction_lines = reactions_input_str.strip().splitlines(keepends=False)
for reaction_line in reaction_lines:
requirements_str, produces_chemical_str = reaction_line.strip().split(" => ")
produces_chemical = str_to_chemical_amount(produces_chemical_str)
requirements_split = requirements_str.split(", ")
requirements = [str_to_chemical_amount(r) for r in requirements_split]
reaction = produces_chemical[0], requirements
reactions[produces_chemical[1]] = reaction
return reactions
def get_chemical_requirements(r: Reactions,
unused_chemicals: CurrentChemicalAmounts,
chemical_amount: AmountAndChemical) -> CurrentChemicalAmounts:
needed_amount, chemical = chemical_amount
if chemical == "ORE":
return {}
unused_amount = unused_chemicals[chemical]
if unused_amount >= needed_amount:
unused_chemicals[chemical] -= needed_amount
return {}
production_amount, reaction_requirements = r[chemical]
reaction_multiplier = 1
if (production_amount + unused_amount) < needed_amount:
reaction_multiplier = math.ceil((needed_amount - unused_amount) / production_amount)
reaction_requirements = {r_chemical: r_amount * reaction_multiplier
for r_amount, r_chemical in reaction_requirements}
unused_chemicals[chemical] = (production_amount * reaction_multiplier + unused_amount) - needed_amount
return reaction_requirements
def merge_chemical_requirements(current_requirements: CurrentChemicalAmounts, new_requirements: CurrentChemicalAmounts):
for chemical, amount in new_requirements.items():
if chemical in current_requirements:
current_requirements[chemical] += amount
else:
current_requirements[chemical] = amount
def get_next_non_ore_chemical(current_chemicals: CurrentChemicalAmounts) -> typing.Optional[ChemicalName]:
for k in current_chemicals.keys():
if k != "ORE":
return k
return None
def get_ore_for_fuel(reactions_str: str) -> int:
r = str_ro_reactions(reactions_str)
return get_ore_for_fuel_helper(r)
def get_ore_for_fuel_helper(r: Reactions) -> int:
fuel_amount, fuel_key = get_fuel_amount_to_be_produced(r)
assert fuel_key
unused_chemicals = collections.defaultdict(lambda: 0)
current_chemical_amounts = get_chemical_requirements(r, unused_chemicals, (fuel_amount, fuel_key))
while len(current_chemical_amounts) != 1:
chemical = get_next_non_ore_chemical(current_chemical_amounts)
amount = current_chemical_amounts[chemical]
del current_chemical_amounts[chemical]
new_requirements = get_chemical_requirements(r, unused_chemicals, (amount, chemical))
merge_chemical_requirements(current_chemical_amounts, new_requirements)
needed_ore = current_chemical_amounts[next(iter(current_chemical_amounts))]
return needed_ore
def set_fuel_requirement(r: Reactions, fuel_amount: int) -> Reactions:
chemical = "FUEL"
new_reactions = dict(r)
amount, requirements = r[chemical]
new_requirements = [(r_amount * fuel_amount, r_chemical)
for r_amount, r_chemical in requirements]
new_reactions[chemical] = (fuel_amount, new_requirements)
return new_reactions
def bisect_max_fuel(reactions_str: str) -> int:
r = str_ro_reactions(reactions_str)
available_ore = 1000000000000
max_fuel_heuristic = available_ore
low = 1
high = max_fuel_heuristic
max_fuel = 1
while low <= high:
mid = math.floor((low + high) / 2)
new_reactions = set_fuel_requirement(r, mid)
needed_ore = get_ore_for_fuel_helper(new_reactions)
if needed_ore > available_ore:
high = mid - 1
elif mid > max_fuel:
low = mid + 1
max_fuel = mid
return max_fuel
def part1():
input_reactions = get_file_contents()
ore_amount = get_ore_for_fuel(input_reactions)
assert ore_amount == 783895
def part2():
input_reactions = get_file_contents()
max_fuel = bisect_max_fuel(input_reactions)
print(max_fuel)
assert max_fuel == 1896688
class Tests(unittest.TestCase):
def test_samples(self):
ore_amount = get_ore_for_fuel("""
10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL""")
self.assertEqual(ore_amount, 31)
ore_amount = get_ore_for_fuel("""
9 ORE => 2 A
8 ORE => 3 B
7 ORE => 5 C
3 A, 4 B => 1 AB
5 B, 7 C => 1 BC
4 C, 1 A => 1 CA
2 AB, 3 BC, 4 CA => 1 FUEL""")
self.assertEqual(ore_amount, 165)
ore_amount = get_ore_for_fuel("""
10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL""")
self.assertEqual(ore_amount, 31)
ore_amount = get_ore_for_fuel("""
157 ORE => 5 NZVS
165 ORE => 6 DCFZ
44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
179 ORE => 7 PSHF
177 ORE => 5 HKGWZ
7 DCFZ, 7 PSHF => 2 XJWVT
165 ORE => 2 GPVTF
3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT""")
self.assertEqual(ore_amount, 13312)
ore_amount = get_ore_for_fuel("""
2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG
17 NVRVD, 3 JNWZP => 8 VPVL
53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL
22 VJHF, 37 MNCFX => 5 FWMGM
139 ORE => 4 NVRVD
144 ORE => 7 JNWZP
5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC
5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV
145 ORE => 6 MNCFX
1 NVRVD => 8 CXFTF
1 VJHF, 6 MNCFX => 4 RFSQX
176 ORE => 6 VJHF""")
self.assertEqual(ore_amount, 180697)
ore_amount = get_ore_for_fuel("""
171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX""")
self.assertEqual(ore_amount, 2210736)
max_fuel = bisect_max_fuel("""
157 ORE => 5 NZVS
165 ORE => 6 DCFZ
44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
179 ORE => 7 PSHF
177 ORE => 5 HKGWZ
7 DCFZ, 7 PSHF => 2 XJWVT
165 ORE => 2 GPVTF
3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT""")
self.assertEqual(max_fuel, 82892753)
max_fuel = bisect_max_fuel("""
2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG
17 NVRVD, 3 JNWZP => 8 VPVL
53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL
22 VJHF, 37 MNCFX => 5 FWMGM
139 ORE => 4 NVRVD
144 ORE => 7 JNWZP
5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC
5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV
145 ORE => 6 MNCFX
1 NVRVD => 8 CXFTF
1 VJHF, 6 MNCFX => 4 RFSQX
176 ORE => 6 VJHF""")
self.assertEqual(max_fuel, 5586022)
max_fuel = bisect_max_fuel("""
171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX""")
self.assertEqual(max_fuel, 460664)
if __name__ == '__main__':
part1()
part2()
unittest.main() | 0.409221 | 0.373333 |
from lib.cuckoo.common.abstracts import Signature
class RansomwareExtensions(Signature):
name = "ransomware_extensions"
description = "Appends known ransomware file extensions to files that have been encrypted"
severity = 3
categories = ["ransomware"]
authors = ["<NAME>"]
indicators = [
(".*\.(?:R5A|R4A)$", ["7ev3n"]),
(".*\.Alcatraz$", ["Alcatraz-Locker"]),
(".*\.adk$", ["AngryDuck"]),
(".*\.bart\.zip$", ["Bart"]),
(".*\.(?:CHIP|DALE)$", ["CHIP"]),
(".*\.comrade$", ["Comrade-Circle"]),
(".*\.cry$", ["CryLocker"]),
(".*_luck$", ["CryptoLuck"]),
(".*\.CrySiS$", ["Crysis"]),
(".*\.(?:id_[^\/]*\.rscl|id_[^\/]*\.scl)$", ["CryptFile2"]),
(".*\.(?:lesli|WALLET)$", ["CryptoMix"]),
(".*\.CRYPTOSHIELD$", ["CryptoShield"]),
(".*\.(?:crypz|cryp1|[0-9A-F]{32}\.[0-9A-F]{5})$", ["CryptXXX"]),
(".*\.onion$", ["Dharma"]),
(".*\.domino$", ["Domino"]),
(".*\.dCrypt$", ["DummyLocker"]),
(".*dxxd$", ["DXXD"]),
(".*\.1txt$", ["Enigma"]),
(".*\.exotic$", ["Exotic"]),
(".*\.fantom$", ["Fantom"]),
(".*\.fs0ciety$", ["Fsociety"]),
(".*\.(?:purge|globe|raid10|lovewindows)$", ["Globe"]),
(".*\.rnsmwr$", ["Gremit"]),
(".*\.~HL[A-Z0-9]{5}$", ["HadesLocker"]),
(".*\.herbst$", ["Herbst"]),
(".*\.(?:hydracrypt_ID_[a-z0-9]{8}|hydracrypttmp_ID_[a-z0-9]{8})$", ["HydraCrypt"]),
(".*\.jaff$", ["Jaff"]),
(".*\.(?:jaff|wlu)$", ["Jaff"]),
(".*\.kraken$", ["Kraken"]),
(".*\.grt$", ["Karmen"]),
(".*\.rip$", ["KillerLocker"]),
(".*\.k0stya$", ["Kostya"]),
(".*\.lock93$", ["Lock93"]),
(".*\.locklock$", ["LockLock"]),
(".*\.(?:locky|zepto|odin|shit|thor|aesir|zzzzz|osiris)$", ["Locky"]),
(".*\.MOLE$", ["Mole"]),
(".*\.mordor$", ["Mordor"]),
(".*\.(?:crypted|crypt|encrypted|encrypt|enc|locked|lock)$", ["multi-family"]),
(".*\.(?:0x5bm|nuclear55)$", ["Nuke"]),
(".*_nullbyte$", ["Nullbyte"]),
(".*\.sexy$", ["PayDay"]),
(".*\.razy$", ["Razy"]),
(".*\.REVENGE$", ["Revenge"]),
(".*\.sage$", ["Sage"]),
(".*\.serpent$", ["Serpent"]),
(".*\.toxcrypt$", ["ToxCrypt"]),
(".*\.(?:da_vinci_code|magic_software_syndicate|no_more_ransom|Dexter)$", ["Troldesh"]),
(".*\.Venus(f|p)$", ["VenusLocker"]),
(".*\.(?:WNCRY|WNCRYT|WCRY)$", ["WannaCry"]),
(".*\.wflx$", ["WildFire-Locker"]),
]
def on_complete(self):
for indicator in self.indicators:
for filepath in self.check_file(pattern=indicator[0], regex=True, all=True):
self.mark_ioc("file", filepath)
if indicator[1]:
self.description = (
"Appends a known %s ransomware file extension to "
"files that have been encrypted" %
"/".join(indicator[1])
)
return self.has_marks() | modules/signatures/windows/ransomware_fileextensions.py |
from lib.cuckoo.common.abstracts import Signature
class RansomwareExtensions(Signature):
name = "ransomware_extensions"
description = "Appends known ransomware file extensions to files that have been encrypted"
severity = 3
categories = ["ransomware"]
authors = ["<NAME>"]
indicators = [
(".*\.(?:R5A|R4A)$", ["7ev3n"]),
(".*\.Alcatraz$", ["Alcatraz-Locker"]),
(".*\.adk$", ["AngryDuck"]),
(".*\.bart\.zip$", ["Bart"]),
(".*\.(?:CHIP|DALE)$", ["CHIP"]),
(".*\.comrade$", ["Comrade-Circle"]),
(".*\.cry$", ["CryLocker"]),
(".*_luck$", ["CryptoLuck"]),
(".*\.CrySiS$", ["Crysis"]),
(".*\.(?:id_[^\/]*\.rscl|id_[^\/]*\.scl)$", ["CryptFile2"]),
(".*\.(?:lesli|WALLET)$", ["CryptoMix"]),
(".*\.CRYPTOSHIELD$", ["CryptoShield"]),
(".*\.(?:crypz|cryp1|[0-9A-F]{32}\.[0-9A-F]{5})$", ["CryptXXX"]),
(".*\.onion$", ["Dharma"]),
(".*\.domino$", ["Domino"]),
(".*\.dCrypt$", ["DummyLocker"]),
(".*dxxd$", ["DXXD"]),
(".*\.1txt$", ["Enigma"]),
(".*\.exotic$", ["Exotic"]),
(".*\.fantom$", ["Fantom"]),
(".*\.fs0ciety$", ["Fsociety"]),
(".*\.(?:purge|globe|raid10|lovewindows)$", ["Globe"]),
(".*\.rnsmwr$", ["Gremit"]),
(".*\.~HL[A-Z0-9]{5}$", ["HadesLocker"]),
(".*\.herbst$", ["Herbst"]),
(".*\.(?:hydracrypt_ID_[a-z0-9]{8}|hydracrypttmp_ID_[a-z0-9]{8})$", ["HydraCrypt"]),
(".*\.jaff$", ["Jaff"]),
(".*\.(?:jaff|wlu)$", ["Jaff"]),
(".*\.kraken$", ["Kraken"]),
(".*\.grt$", ["Karmen"]),
(".*\.rip$", ["KillerLocker"]),
(".*\.k0stya$", ["Kostya"]),
(".*\.lock93$", ["Lock93"]),
(".*\.locklock$", ["LockLock"]),
(".*\.(?:locky|zepto|odin|shit|thor|aesir|zzzzz|osiris)$", ["Locky"]),
(".*\.MOLE$", ["Mole"]),
(".*\.mordor$", ["Mordor"]),
(".*\.(?:crypted|crypt|encrypted|encrypt|enc|locked|lock)$", ["multi-family"]),
(".*\.(?:0x5bm|nuclear55)$", ["Nuke"]),
(".*_nullbyte$", ["Nullbyte"]),
(".*\.sexy$", ["PayDay"]),
(".*\.razy$", ["Razy"]),
(".*\.REVENGE$", ["Revenge"]),
(".*\.sage$", ["Sage"]),
(".*\.serpent$", ["Serpent"]),
(".*\.toxcrypt$", ["ToxCrypt"]),
(".*\.(?:da_vinci_code|magic_software_syndicate|no_more_ransom|Dexter)$", ["Troldesh"]),
(".*\.Venus(f|p)$", ["VenusLocker"]),
(".*\.(?:WNCRY|WNCRYT|WCRY)$", ["WannaCry"]),
(".*\.wflx$", ["WildFire-Locker"]),
]
def on_complete(self):
for indicator in self.indicators:
for filepath in self.check_file(pattern=indicator[0], regex=True, all=True):
self.mark_ioc("file", filepath)
if indicator[1]:
self.description = (
"Appends a known %s ransomware file extension to "
"files that have been encrypted" %
"/".join(indicator[1])
)
return self.has_marks() | 0.510252 | 0.224459 |
from plugin.core.constants import PLUGIN_VERSION_BASE
from plugin.core.helpers.variable import all
from lxml import etree
import shutil
import os
class FSMigrator(object):
migrations = []
@classmethod
def register(cls, migration):
cls.migrations.append(migration())
@classmethod
def run(cls):
for migration in cls.migrations:
Log.Debug('Running migration: %s', migration)
migration.run()
class Migration(object):
@property
def code_path(self):
return Core.code_path
@property
def lib_path(self):
return os.path.join(self.code_path, '..', 'Libraries')
@property
def tests_path(self):
return os.path.join(self.code_path, '..', 'Tests')
@property
def plex_path(self):
return os.path.abspath(os.path.join(self.code_path, '..', '..', '..', '..'))
@property
def preferences_path(self):
return os.path.join(self.plex_path, 'Plug-in Support', 'Preferences', 'com.plexapp.plugins.trakttv.xml')
def get_preferences(self):
if not os.path.exists(self.preferences_path):
Log.Error('Unable to find preferences file at "%s", unable to run migration', self.preferences_path)
return {}
data = Core.storage.load(self.preferences_path)
doc = etree.fromstring(data)
return dict([(elem.tag, elem.text) for elem in doc])
def set_preferences(self, changes):
if not os.path.exists(self.preferences_path):
Log.Error('Unable to find preferences file at "%s", unable to run migration', self.preferences_path)
return False
data = Core.storage.load(self.preferences_path)
doc = etree.fromstring(data)
for key, value in changes.items():
elem = doc.find(key)
# Ensure node exists
if elem is None:
elem = etree.SubElement(doc, key)
# Update node value, ensure it is a string
elem.text = str(value)
Log.Debug('Updated preference with key "%s" to value %s', key, repr(value))
# Write back new preferences
Core.storage.save(self.preferences_path, etree.tostring(doc, pretty_print=True))
@staticmethod
def delete_file(path, conditions=None):
if not all([c(path) for c in conditions]):
return False
try:
os.remove(path)
return True
except Exception as ex:
Log.Warn('Unable to remove file %r - %s', path, ex, exc_info=True)
return False
@staticmethod
def delete_directory(path, conditions=None):
if not all([c(path) for c in conditions]):
return False
try:
shutil.rmtree(path)
return True
except Exception as ex:
Log.Warn('Unable to remove directory %r - %s', path, ex, exc_info=True)
return False
class Clean(Migration):
tasks_code = [
(
'delete_file', [
# /core
'core/action.py',
'core/cache.py',
'core/configuration.py',
'core/environment.py',
'core/eventing.py',
'core/localization.py',
'core/logging_handler.py',
'core/logging_reporter.py',
'core/method_manager.py',
'core/migrator.py',
'core/model.py',
'core/network.py',
'core/numeric.py',
'core/plugin.py',
'core/task.py',
'core/trakt.py',
'core/trakt_objects.py',
'core/update_checker.py',
# /interface
'interface/main_menu.py',
'interface/sync_menu.py',
# /
'libraries.py',
'sync.py'
], os.path.isfile
),
(
'delete_directory', [
'data',
'plex',
'pts',
'sync'
], os.path.isdir
)
]
tasks_lib = [
(
'delete_file', [
# plugin
'Shared/plugin/api/account.py',
'Shared/plugin/core/event.py',
'Shared/plugin/core/helpers/database.py',
'Shared/plugin/core/io.py',
'Shared/plugin/core/jsonw.py',
'Shared/plugin/core/libraries/main.py',
'Shared/plugin/core/libraries/tests/pyopenssl_.py',
'Shared/plugin/core/logger/handlers/error_reporter.py',
'Shared/plugin/core/session_status.py',
'Shared/plugin/models/core/exceptions.py',
'Shared/plugin/modules/base.py',
'Shared/plugin/modules/manager.py',
'Shared/plugin/preferences/options/core/base.py',
'Shared/plugin/sync/modes/core/base.py',
'Shared/plugin/sync/modes/fast_pull.py',
'Shared/plugin/sync/modes/pull.py',
'Shared/plugin/sync/modes/push.py',
# native
'FreeBSD/i386/apsw.so',
'FreeBSD/i386/llist.so',
'FreeBSD/i386/ucs2/apsw.dependencies',
'FreeBSD/i386/ucs2/apsw.file',
'FreeBSD/i386/ucs2/llist.dependencies',
'FreeBSD/i386/ucs2/llist.file',
'FreeBSD/i386/ucs4/apsw.dependencies',
'FreeBSD/i386/ucs4/apsw.file',
'FreeBSD/i386/ucs4/llist.dependencies',
'FreeBSD/i386/ucs4/llist.file',
'FreeBSD/x86_64/ucs2/apsw.dependencies',
'FreeBSD/x86_64/ucs2/apsw.file',
'FreeBSD/x86_64/ucs2/llist.dependencies',
'FreeBSD/x86_64/ucs2/llist.file',
'FreeBSD/x86_64/ucs4/apsw.dependencies',
'FreeBSD/x86_64/ucs4/apsw.file',
'FreeBSD/x86_64/ucs4/llist.dependencies',
'FreeBSD/x86_64/ucs4/llist.file',
'Windows/i386/apsw.pyd',
'Windows/i386/llist.pyd',
'Linux/i386/apsw.so',
'Linux/i386/llist.so',
'Linux/x86_64/apsw.so',
'Linux/x86_64/llist.so',
'Linux/armv6_hf/ucs4/apsw.dependencies',
'Linux/armv6_hf/ucs4/apsw.file',
'Linux/armv6_hf/ucs4/apsw.header',
'Linux/armv6_hf/ucs4/llist.dependencies',
'Linux/armv6_hf/ucs4/llist.file',
'Linux/armv6_hf/ucs4/llist.header',
'Linux/armv7_hf/ucs4/apsw.dependencies',
'Linux/armv7_hf/ucs4/apsw.file',
'Linux/armv7_hf/ucs4/apsw.header',
'Linux/armv7_hf/ucs4/llist.dependencies',
'Linux/armv7_hf/ucs4/llist.file',
'Linux/armv7_hf/ucs4/llist.header',
'Linux/i386/ucs2/apsw.dependencies',
'Linux/i386/ucs2/apsw.file',
'Linux/i386/ucs2/llist.dependencies',
'Linux/i386/ucs2/llist.file',
'Linux/i386/ucs4/apsw.dependencies',
'Linux/i386/ucs4/apsw.file',
'Linux/i386/ucs4/llist.dependencies',
'Linux/i386/ucs4/llist.file',
'Linux/x86_64/ucs2/apsw.dependencies',
'Linux/x86_64/ucs2/apsw.file',
'Linux/x86_64/ucs2/llist.dependencies',
'Linux/x86_64/ucs2/llist.file',
'Linux/x86_64/ucs4/apsw.dependencies',
'Linux/x86_64/ucs4/apsw.file',
'Linux/x86_64/ucs4/llist.dependencies',
'Linux/x86_64/ucs4/llist.file',
'MacOSX/i386/ucs2/apsw.dependencies',
'MacOSX/i386/ucs2/apsw.file',
'MacOSX/i386/ucs2/llist.dependencies',
'MacOSX/i386/ucs2/llist.file',
'MacOSX/i386/ucs4/apsw.dependencies',
'MacOSX/i386/ucs4/apsw.file',
'MacOSX/i386/ucs4/llist.dependencies',
'MacOSX/i386/ucs4/llist.file',
'MacOSX/x86_64/ucs2/apsw.dependencies',
'MacOSX/x86_64/ucs2/apsw.file',
'MacOSX/x86_64/ucs2/llist.dependencies',
'MacOSX/x86_64/ucs2/llist.file',
'MacOSX/x86_64/ucs4/apsw.dependencies',
'MacOSX/x86_64/ucs4/apsw.file',
'MacOSX/x86_64/ucs4/llist.dependencies',
'MacOSX/x86_64/ucs4/llist.file',
'Windows/i386/ucs2/apsw.pyd',
'Windows/i386/ucs2/llist.pyd',
# asio
'Shared/asio.py',
'Shared/asio_base.py',
'Shared/asio_posix.py',
'Shared/asio_windows.py',
'Shared/asio_windows_interop.py',
# concurrent
'Shared/concurrent/futures/_compat.py',
# msgpack
'Shared/msgpack/_packer.pyx',
'Shared/msgpack/_unpacker.pyx',
'Shared/msgpack/pack.h',
'Shared/msgpack/pack_template.h',
'Shared/msgpack/sysdep.h',
'Shared/msgpack/unpack.h',
'Shared/msgpack/unpack_define.h',
'Shared/msgpack/unpack_template.h',
# playhouse
'Shared/playhouse/pskel',
# plex.py
'Shared/plex/core/compat.py',
'Shared/plex/core/event.py',
'Shared/plex/interfaces/library.py',
'Shared/plex/interfaces/plugin.py',
# plex.metadata.py
'Shared/plex_metadata/core/cache.py',
# raven
'Shared/raven/transport/aiohttp.py',
'Shared/raven/transport/udp.py',
'Shared/raven/utils/six.py',
# requests
'Shared/requests/packages/urllib3/util.py',
'Shared/requests/packages/README.rst',
# trakt.py
'Shared/trakt/core/context.py',
'Shared/trakt/interfaces/base/media.py',
'Shared/trakt/interfaces/account.py',
'Shared/trakt/interfaces/rate.py',
'Shared/trakt/interfaces/sync/base.py',
'Shared/trakt/media_mapper.py',
'Shared/trakt/objects.py',
'Shared/trakt/objects/list.py',
'Shared/trakt/request.py',
# tzlocal
'Shared/tzlocal/tests.py',
# websocket
'Shared/websocket.py'
], os.path.isfile
),
(
'delete_directory', [
# plugin
'Shared/plugin/core/collections',
'Shared/plugin/data',
'Shared/plugin/modules/backup',
'Shared/plugin/raven',
# native
'MacOSX/universal',
# pytz
'Shared/pytz/tests',
# raven
'Shared/raven',
# shove
'Shared/shove',
# stuf
'Shared/stuf',
# trakt.py
'Shared/trakt/interfaces/movie',
'Shared/trakt/interfaces/show',
'Shared/trakt/interfaces/user',
# tzlocal
'Shared/tzlocal/test_data'
], os.path.isdir
)
]
tasks_tests = [
(
'delete_file', [
], os.path.isfile
),
(
'delete_directory', [
'tests/core/mock',
'tests/scrobbler/engine_tests.py',
], os.path.isdir
)
]
def run(self):
if PLUGIN_VERSION_BASE >= (0, 8):
self.upgrade()
def upgrade(self):
self.execute(self.tasks_code, 'upgrade', self.code_path)
self.execute(self.tasks_lib, 'upgrade', self.lib_path)
self.execute(self.tasks_tests, 'upgrade', self.tests_path)
def execute(self, tasks, name, base_path):
for action, paths, conditions in tasks:
if type(paths) is not list:
paths = [paths]
if type(conditions) is not list:
conditions = [conditions]
if not hasattr(self, action):
Log.Error('Unknown migration action "%s"', action)
continue
m = getattr(self, action)
for path in paths:
path = os.path.join(base_path, path)
path = os.path.abspath(path)
# Remove file
if m(path, conditions):
Log.Info('(%s) %s: "%s"', name, action, path)
# Remove .pyc files as-well
if path.endswith('.py') and m(path + 'c', conditions):
Log.Info('(%s) %s: "%s"', name, action, path + 'c')
class ForceLegacy(Migration):
"""Migrates the 'force_legacy' option to the 'activity_mode' option."""
def run(self):
self.upgrade()
def upgrade(self):
if not os.path.exists(self.preferences_path):
Log.Error('Unable to find preferences file at "%s", unable to run migration', self.preferences_path)
return
preferences = self.get_preferences()
# Read 'force_legacy' option from raw preferences
force_legacy = preferences.get('force_legacy')
if force_legacy is None:
return
force_legacy = force_legacy.lower() == "true"
if not force_legacy:
return
# Read 'activity_mode' option from raw preferences
activity_mode = preferences.get('activity_mode')
# Activity mode has already been set, not changing it
if activity_mode is not None:
return
self.set_preferences({
'activity_mode': '1'
})
class SelectiveSync(Migration):
"""Migrates the syncing task bool options to selective synchronize/push/pull enums"""
option_keys = [
'sync_watched',
'sync_ratings',
'sync_collection'
]
value_map = {
'false': '0',
'true': '1',
}
def run(self):
self.upgrade()
def upgrade(self):
preferences = self.get_preferences()
# Filter to only relative preferences
preferences = dict([
(key, value)
for key, value in preferences.items()
if key in self.option_keys
])
changes = {}
for key, value in preferences.items():
if value not in self.value_map:
continue
changes[key] = self.value_map[value]
if not changes:
return
Log.Debug('Updating preferences with changes: %s', changes)
self.set_preferences(changes)
FSMigrator.register(Clean)
FSMigrator.register(ForceLegacy)
FSMigrator.register(SelectiveSync) | Trakttv.bundle/Contents/Code/fs_migrator.py | from plugin.core.constants import PLUGIN_VERSION_BASE
from plugin.core.helpers.variable import all
from lxml import etree
import shutil
import os
class FSMigrator(object):
migrations = []
@classmethod
def register(cls, migration):
cls.migrations.append(migration())
@classmethod
def run(cls):
for migration in cls.migrations:
Log.Debug('Running migration: %s', migration)
migration.run()
class Migration(object):
@property
def code_path(self):
return Core.code_path
@property
def lib_path(self):
return os.path.join(self.code_path, '..', 'Libraries')
@property
def tests_path(self):
return os.path.join(self.code_path, '..', 'Tests')
@property
def plex_path(self):
return os.path.abspath(os.path.join(self.code_path, '..', '..', '..', '..'))
@property
def preferences_path(self):
return os.path.join(self.plex_path, 'Plug-in Support', 'Preferences', 'com.plexapp.plugins.trakttv.xml')
def get_preferences(self):
if not os.path.exists(self.preferences_path):
Log.Error('Unable to find preferences file at "%s", unable to run migration', self.preferences_path)
return {}
data = Core.storage.load(self.preferences_path)
doc = etree.fromstring(data)
return dict([(elem.tag, elem.text) for elem in doc])
def set_preferences(self, changes):
if not os.path.exists(self.preferences_path):
Log.Error('Unable to find preferences file at "%s", unable to run migration', self.preferences_path)
return False
data = Core.storage.load(self.preferences_path)
doc = etree.fromstring(data)
for key, value in changes.items():
elem = doc.find(key)
# Ensure node exists
if elem is None:
elem = etree.SubElement(doc, key)
# Update node value, ensure it is a string
elem.text = str(value)
Log.Debug('Updated preference with key "%s" to value %s', key, repr(value))
# Write back new preferences
Core.storage.save(self.preferences_path, etree.tostring(doc, pretty_print=True))
@staticmethod
def delete_file(path, conditions=None):
if not all([c(path) for c in conditions]):
return False
try:
os.remove(path)
return True
except Exception as ex:
Log.Warn('Unable to remove file %r - %s', path, ex, exc_info=True)
return False
@staticmethod
def delete_directory(path, conditions=None):
if not all([c(path) for c in conditions]):
return False
try:
shutil.rmtree(path)
return True
except Exception as ex:
Log.Warn('Unable to remove directory %r - %s', path, ex, exc_info=True)
return False
class Clean(Migration):
tasks_code = [
(
'delete_file', [
# /core
'core/action.py',
'core/cache.py',
'core/configuration.py',
'core/environment.py',
'core/eventing.py',
'core/localization.py',
'core/logging_handler.py',
'core/logging_reporter.py',
'core/method_manager.py',
'core/migrator.py',
'core/model.py',
'core/network.py',
'core/numeric.py',
'core/plugin.py',
'core/task.py',
'core/trakt.py',
'core/trakt_objects.py',
'core/update_checker.py',
# /interface
'interface/main_menu.py',
'interface/sync_menu.py',
# /
'libraries.py',
'sync.py'
], os.path.isfile
),
(
'delete_directory', [
'data',
'plex',
'pts',
'sync'
], os.path.isdir
)
]
tasks_lib = [
(
'delete_file', [
# plugin
'Shared/plugin/api/account.py',
'Shared/plugin/core/event.py',
'Shared/plugin/core/helpers/database.py',
'Shared/plugin/core/io.py',
'Shared/plugin/core/jsonw.py',
'Shared/plugin/core/libraries/main.py',
'Shared/plugin/core/libraries/tests/pyopenssl_.py',
'Shared/plugin/core/logger/handlers/error_reporter.py',
'Shared/plugin/core/session_status.py',
'Shared/plugin/models/core/exceptions.py',
'Shared/plugin/modules/base.py',
'Shared/plugin/modules/manager.py',
'Shared/plugin/preferences/options/core/base.py',
'Shared/plugin/sync/modes/core/base.py',
'Shared/plugin/sync/modes/fast_pull.py',
'Shared/plugin/sync/modes/pull.py',
'Shared/plugin/sync/modes/push.py',
# native
'FreeBSD/i386/apsw.so',
'FreeBSD/i386/llist.so',
'FreeBSD/i386/ucs2/apsw.dependencies',
'FreeBSD/i386/ucs2/apsw.file',
'FreeBSD/i386/ucs2/llist.dependencies',
'FreeBSD/i386/ucs2/llist.file',
'FreeBSD/i386/ucs4/apsw.dependencies',
'FreeBSD/i386/ucs4/apsw.file',
'FreeBSD/i386/ucs4/llist.dependencies',
'FreeBSD/i386/ucs4/llist.file',
'FreeBSD/x86_64/ucs2/apsw.dependencies',
'FreeBSD/x86_64/ucs2/apsw.file',
'FreeBSD/x86_64/ucs2/llist.dependencies',
'FreeBSD/x86_64/ucs2/llist.file',
'FreeBSD/x86_64/ucs4/apsw.dependencies',
'FreeBSD/x86_64/ucs4/apsw.file',
'FreeBSD/x86_64/ucs4/llist.dependencies',
'FreeBSD/x86_64/ucs4/llist.file',
'Windows/i386/apsw.pyd',
'Windows/i386/llist.pyd',
'Linux/i386/apsw.so',
'Linux/i386/llist.so',
'Linux/x86_64/apsw.so',
'Linux/x86_64/llist.so',
'Linux/armv6_hf/ucs4/apsw.dependencies',
'Linux/armv6_hf/ucs4/apsw.file',
'Linux/armv6_hf/ucs4/apsw.header',
'Linux/armv6_hf/ucs4/llist.dependencies',
'Linux/armv6_hf/ucs4/llist.file',
'Linux/armv6_hf/ucs4/llist.header',
'Linux/armv7_hf/ucs4/apsw.dependencies',
'Linux/armv7_hf/ucs4/apsw.file',
'Linux/armv7_hf/ucs4/apsw.header',
'Linux/armv7_hf/ucs4/llist.dependencies',
'Linux/armv7_hf/ucs4/llist.file',
'Linux/armv7_hf/ucs4/llist.header',
'Linux/i386/ucs2/apsw.dependencies',
'Linux/i386/ucs2/apsw.file',
'Linux/i386/ucs2/llist.dependencies',
'Linux/i386/ucs2/llist.file',
'Linux/i386/ucs4/apsw.dependencies',
'Linux/i386/ucs4/apsw.file',
'Linux/i386/ucs4/llist.dependencies',
'Linux/i386/ucs4/llist.file',
'Linux/x86_64/ucs2/apsw.dependencies',
'Linux/x86_64/ucs2/apsw.file',
'Linux/x86_64/ucs2/llist.dependencies',
'Linux/x86_64/ucs2/llist.file',
'Linux/x86_64/ucs4/apsw.dependencies',
'Linux/x86_64/ucs4/apsw.file',
'Linux/x86_64/ucs4/llist.dependencies',
'Linux/x86_64/ucs4/llist.file',
'MacOSX/i386/ucs2/apsw.dependencies',
'MacOSX/i386/ucs2/apsw.file',
'MacOSX/i386/ucs2/llist.dependencies',
'MacOSX/i386/ucs2/llist.file',
'MacOSX/i386/ucs4/apsw.dependencies',
'MacOSX/i386/ucs4/apsw.file',
'MacOSX/i386/ucs4/llist.dependencies',
'MacOSX/i386/ucs4/llist.file',
'MacOSX/x86_64/ucs2/apsw.dependencies',
'MacOSX/x86_64/ucs2/apsw.file',
'MacOSX/x86_64/ucs2/llist.dependencies',
'MacOSX/x86_64/ucs2/llist.file',
'MacOSX/x86_64/ucs4/apsw.dependencies',
'MacOSX/x86_64/ucs4/apsw.file',
'MacOSX/x86_64/ucs4/llist.dependencies',
'MacOSX/x86_64/ucs4/llist.file',
'Windows/i386/ucs2/apsw.pyd',
'Windows/i386/ucs2/llist.pyd',
# asio
'Shared/asio.py',
'Shared/asio_base.py',
'Shared/asio_posix.py',
'Shared/asio_windows.py',
'Shared/asio_windows_interop.py',
# concurrent
'Shared/concurrent/futures/_compat.py',
# msgpack
'Shared/msgpack/_packer.pyx',
'Shared/msgpack/_unpacker.pyx',
'Shared/msgpack/pack.h',
'Shared/msgpack/pack_template.h',
'Shared/msgpack/sysdep.h',
'Shared/msgpack/unpack.h',
'Shared/msgpack/unpack_define.h',
'Shared/msgpack/unpack_template.h',
# playhouse
'Shared/playhouse/pskel',
# plex.py
'Shared/plex/core/compat.py',
'Shared/plex/core/event.py',
'Shared/plex/interfaces/library.py',
'Shared/plex/interfaces/plugin.py',
# plex.metadata.py
'Shared/plex_metadata/core/cache.py',
# raven
'Shared/raven/transport/aiohttp.py',
'Shared/raven/transport/udp.py',
'Shared/raven/utils/six.py',
# requests
'Shared/requests/packages/urllib3/util.py',
'Shared/requests/packages/README.rst',
# trakt.py
'Shared/trakt/core/context.py',
'Shared/trakt/interfaces/base/media.py',
'Shared/trakt/interfaces/account.py',
'Shared/trakt/interfaces/rate.py',
'Shared/trakt/interfaces/sync/base.py',
'Shared/trakt/media_mapper.py',
'Shared/trakt/objects.py',
'Shared/trakt/objects/list.py',
'Shared/trakt/request.py',
# tzlocal
'Shared/tzlocal/tests.py',
# websocket
'Shared/websocket.py'
], os.path.isfile
),
(
'delete_directory', [
# plugin
'Shared/plugin/core/collections',
'Shared/plugin/data',
'Shared/plugin/modules/backup',
'Shared/plugin/raven',
# native
'MacOSX/universal',
# pytz
'Shared/pytz/tests',
# raven
'Shared/raven',
# shove
'Shared/shove',
# stuf
'Shared/stuf',
# trakt.py
'Shared/trakt/interfaces/movie',
'Shared/trakt/interfaces/show',
'Shared/trakt/interfaces/user',
# tzlocal
'Shared/tzlocal/test_data'
], os.path.isdir
)
]
tasks_tests = [
(
'delete_file', [
], os.path.isfile
),
(
'delete_directory', [
'tests/core/mock',
'tests/scrobbler/engine_tests.py',
], os.path.isdir
)
]
def run(self):
if PLUGIN_VERSION_BASE >= (0, 8):
self.upgrade()
def upgrade(self):
self.execute(self.tasks_code, 'upgrade', self.code_path)
self.execute(self.tasks_lib, 'upgrade', self.lib_path)
self.execute(self.tasks_tests, 'upgrade', self.tests_path)
def execute(self, tasks, name, base_path):
for action, paths, conditions in tasks:
if type(paths) is not list:
paths = [paths]
if type(conditions) is not list:
conditions = [conditions]
if not hasattr(self, action):
Log.Error('Unknown migration action "%s"', action)
continue
m = getattr(self, action)
for path in paths:
path = os.path.join(base_path, path)
path = os.path.abspath(path)
# Remove file
if m(path, conditions):
Log.Info('(%s) %s: "%s"', name, action, path)
# Remove .pyc files as-well
if path.endswith('.py') and m(path + 'c', conditions):
Log.Info('(%s) %s: "%s"', name, action, path + 'c')
class ForceLegacy(Migration):
"""Migrates the 'force_legacy' option to the 'activity_mode' option."""
def run(self):
self.upgrade()
def upgrade(self):
if not os.path.exists(self.preferences_path):
Log.Error('Unable to find preferences file at "%s", unable to run migration', self.preferences_path)
return
preferences = self.get_preferences()
# Read 'force_legacy' option from raw preferences
force_legacy = preferences.get('force_legacy')
if force_legacy is None:
return
force_legacy = force_legacy.lower() == "true"
if not force_legacy:
return
# Read 'activity_mode' option from raw preferences
activity_mode = preferences.get('activity_mode')
# Activity mode has already been set, not changing it
if activity_mode is not None:
return
self.set_preferences({
'activity_mode': '1'
})
class SelectiveSync(Migration):
"""Migrates the syncing task bool options to selective synchronize/push/pull enums"""
option_keys = [
'sync_watched',
'sync_ratings',
'sync_collection'
]
value_map = {
'false': '0',
'true': '1',
}
def run(self):
self.upgrade()
def upgrade(self):
preferences = self.get_preferences()
# Filter to only relative preferences
preferences = dict([
(key, value)
for key, value in preferences.items()
if key in self.option_keys
])
changes = {}
for key, value in preferences.items():
if value not in self.value_map:
continue
changes[key] = self.value_map[value]
if not changes:
return
Log.Debug('Updating preferences with changes: %s', changes)
self.set_preferences(changes)
FSMigrator.register(Clean)
FSMigrator.register(ForceLegacy)
FSMigrator.register(SelectiveSync) | 0.500488 | 0.099996 |
from itertools import chain
from util import nub
import numpy as np
import string
from collections import OrderedDict
UNK_TOKEN = "*UNK*"
START_TOKEN = "*START*"
END_TOKEN = "*END*"
PRINTABLE = set(string.printable)
def main():
validation_data_file, validation_label_file, train_data_file, train_label_file = "./soft_patterns/data/dev.data", "./soft_patterns/data/dev.labels", "./soft_patterns/data/train.data", "./soft_patterns/data/train.labels"
dev_docs, dev_names, dev_index = [], [], {}
with open(validation_data_file, encoding="ISO-8859-1") as input_file:
for line in input_file: dev_docs.append(line.strip().split())
for doc in dev_docs:
for i in doc:
dev_names.append(i)
dev_names = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], dev_names)))
for i, name in enumerate(dev_names): dev_index[name] = i
train_docs, train_names, train_index = [], [], {}
with open(train_data_file, encoding="ISO-8859-1") as input_file:
for line in input_file: train_docs.append(line.strip().split())
for doc in train_docs:
for i in doc: train_names.append(i)
train_names = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], train_names)))
for i, name in enumerate(train_names): train_index[name] = i
new_dev_names, new_dev_index = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], dev_names+train_names))), {}
for i, name in enumerate(new_dev_names): new_dev_index[name] = i
embedding_file = "./soft_patterns/glove.6B.50d.txt"
dim = 50
embedding_names, embedding_index, word_vecs = [], {}, []
with open(embedding_file, encoding="utf-8") as input_file:
for line in input_file:
word, vec_str = line.strip().split(' ', 1)
if all(c in PRINTABLE for c in word) and word in new_dev_names:
word_vecs.append((word, np.fromstring(vec_str, dtype=float, sep=' ')))
embedding_names.append(word)
embedding_names = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], embedding_names)))
for i, name in enumerate(embedding_names): embedding_index[name] = i
embedding_vectors = [np.zeros(dim), np.zeros(dim), np.zeros(dim)] + [vec/np.linalg.norm(vec) for word, vec in word_vecs]
patterns = "5-50_4-50_3-50_2-50"
pattern_specs = OrderedDict(sorted(([int(x) for x in pattern.split('-')] for pattern in patterns.split('_')), key = lambda t: t[0]))
num_padding_tokens = max(list(pattern_specs.keys())) - 1
dev_docs = []
with open(validation_data_file, encoding="ISO-8859-1") as input_file:
for line in input_file:
dev_docs.append(line.strip().split())
dev_input = []
for doc in dev_docs:
dev_input.append(([START_TOKEN]*num_padding_tokens) + [embedding_index.get(token, UNK_TOKEN) for token in doc] + ([END_TOKEN]*num_padding_tokens))
dev_labels = []
with open(validation_label_file) as input_file:
for line in input_file:
dev_labels.append(int(line.strip()))
dev_data = list(zip(dev_input, dev_labels))
train_input = []
for doc in train_docs:
train_input.append(([START_TOKEN]*num_padding_tokens) + [embedding_index.get(token, UNK_TOKEN) for token in doc] + ([END_TOKEN]*num_padding_tokens))
train_labels = []
with open(train_label_file) as input_file:
for line in input_file:
train_labels.append(int(line.strip()))
train_data = list(zip(train_input, train_labels))
if __name__ == "__main__":
main() | my_soft_pattern.py | from itertools import chain
from util import nub
import numpy as np
import string
from collections import OrderedDict
UNK_TOKEN = "*UNK*"
START_TOKEN = "*START*"
END_TOKEN = "*END*"
PRINTABLE = set(string.printable)
def main():
validation_data_file, validation_label_file, train_data_file, train_label_file = "./soft_patterns/data/dev.data", "./soft_patterns/data/dev.labels", "./soft_patterns/data/train.data", "./soft_patterns/data/train.labels"
dev_docs, dev_names, dev_index = [], [], {}
with open(validation_data_file, encoding="ISO-8859-1") as input_file:
for line in input_file: dev_docs.append(line.strip().split())
for doc in dev_docs:
for i in doc:
dev_names.append(i)
dev_names = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], dev_names)))
for i, name in enumerate(dev_names): dev_index[name] = i
train_docs, train_names, train_index = [], [], {}
with open(train_data_file, encoding="ISO-8859-1") as input_file:
for line in input_file: train_docs.append(line.strip().split())
for doc in train_docs:
for i in doc: train_names.append(i)
train_names = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], train_names)))
for i, name in enumerate(train_names): train_index[name] = i
new_dev_names, new_dev_index = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], dev_names+train_names))), {}
for i, name in enumerate(new_dev_names): new_dev_index[name] = i
embedding_file = "./soft_patterns/glove.6B.50d.txt"
dim = 50
embedding_names, embedding_index, word_vecs = [], {}, []
with open(embedding_file, encoding="utf-8") as input_file:
for line in input_file:
word, vec_str = line.strip().split(' ', 1)
if all(c in PRINTABLE for c in word) and word in new_dev_names:
word_vecs.append((word, np.fromstring(vec_str, dtype=float, sep=' ')))
embedding_names.append(word)
embedding_names = list(nub(chain([UNK_TOKEN, START_TOKEN, END_TOKEN], embedding_names)))
for i, name in enumerate(embedding_names): embedding_index[name] = i
embedding_vectors = [np.zeros(dim), np.zeros(dim), np.zeros(dim)] + [vec/np.linalg.norm(vec) for word, vec in word_vecs]
patterns = "5-50_4-50_3-50_2-50"
pattern_specs = OrderedDict(sorted(([int(x) for x in pattern.split('-')] for pattern in patterns.split('_')), key = lambda t: t[0]))
num_padding_tokens = max(list(pattern_specs.keys())) - 1
dev_docs = []
with open(validation_data_file, encoding="ISO-8859-1") as input_file:
for line in input_file:
dev_docs.append(line.strip().split())
dev_input = []
for doc in dev_docs:
dev_input.append(([START_TOKEN]*num_padding_tokens) + [embedding_index.get(token, UNK_TOKEN) for token in doc] + ([END_TOKEN]*num_padding_tokens))
dev_labels = []
with open(validation_label_file) as input_file:
for line in input_file:
dev_labels.append(int(line.strip()))
dev_data = list(zip(dev_input, dev_labels))
train_input = []
for doc in train_docs:
train_input.append(([START_TOKEN]*num_padding_tokens) + [embedding_index.get(token, UNK_TOKEN) for token in doc] + ([END_TOKEN]*num_padding_tokens))
train_labels = []
with open(train_label_file) as input_file:
for line in input_file:
train_labels.append(int(line.strip()))
train_data = list(zip(train_input, train_labels))
if __name__ == "__main__":
main() | 0.327346 | 0.244775 |
from pyradur import Dict
from pyradur.db import Sqlite3DB
from pyradur.server import SockServer
import tempfile
import threading
import unittest
import shutil
import os
import logging
import sys
class CommonTests(object):
use_cache = True
close_on_cleanup = True
def _server_thread(self, event):
try:
self.server.db.add_db('var', Sqlite3DB(':memory:'))
event.set()
self.server.serve_forever()
# Process any outstanding events until the queue is empty
while self.server.handle_request():
pass
except Exception as e:
logging.exception('Server raised %s', e, exc_info=True)
finally:
# Close down the server. This prevents the main thread from being
# stuck blocking on a response from the server in the event that it
# has an exception
self.server.close()
def setUp(self):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
self.handler = logging.StreamHandler(sys.stdout)
self.handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.handler.setFormatter(formatter)
root.addHandler(self.handler)
self.addCleanup(root.removeHandler, self.handler)
self.tempdir = tempfile.mkdtemp(prefix='pyradur-')
self.addCleanup(shutil.rmtree, self.tempdir, ignore_errors=True)
self.sock_path = os.path.join(self.tempdir, 'sock')
self.server = SockServer(self.sock_path)
self.sever_suspended = False
try:
event = threading.Event()
self.server_thread = threading.Thread(target=self._server_thread, args=[event])
self.server_thread.start()
event.wait()
self.addCleanup(self.check_server)
self.addCleanup(self.server_thread.join)
self.addCleanup(self.server.shutdown)
except Exception as e:
self.server.close()
raise e
def check_server(self):
# Check that all clients have disconnected
self.assertDictEqual(self.server.clients, {})
def get_dict(self, name, share_connection=True):
d = Dict(self.sock_path, name, use_cache=self.use_cache, share_connection=share_connection)
if self.close_on_cleanup:
self.addCleanup(lambda: d.close())
return d
def test_basic_get_set(self):
d = self.get_dict('var')
d['foo'] = 'bar'
self.assertEqual(d['foo'], 'bar')
with self.assertRaises(KeyError):
d['baz']
def test_get_set_shared(self):
a = self.get_dict('var')
b = self.get_dict('var')
a['foo'] = 'bar'
self.assertEqual(b['foo'], 'bar')
def test_get_set_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(a.get('bat', 'baz'), 'baz')
a.sync()
self.assertFalse('baz' in b)
a.set('test', 'blah')
a.sync()
self.assertEqual(b['test'], 'blah')
def test_del_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
del a['foo']
a.sync()
with self.assertRaises(KeyError):
b['foo']
def test_setdefault(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
self.assertEqual(a.setdefault('foo', 'bar'), 'bar')
a.sync()
self.assertEqual(b['foo'], 'bar')
def test_server_suspend(self):
a = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
with self.server.suspended():
a['foo'] = 'test'
a.sync()
self.assertEqual(a['foo'], 'test')
def test_contains(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertTrue('foo' in b)
self.assertFalse('bar' in b)
def test_cache_grow(self):
import mmap
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
count = mmap.PAGESIZE * 2
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
a[key] = val
self.assertEqual(a[key], val)
a.sync()
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
self.assertEqual(a[key], val)
self.assertEqual(b[key], val)
def test_missing_var(self):
a = self.get_dict('var')
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist', share_connection=False)
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist')
def test_var_factory(self):
def factory(name):
return Sqlite3DB(':memory:')
a = self.get_dict('var')
self.server.db.set_db_factory(factory)
b = self.get_dict('test1', share_connection=False)
c = self.get_dict('test2')
def test_cross_var(self):
def factory(name):
return Sqlite3DB(':memory:')
self.server.db.set_db_factory(factory)
a = self.get_dict('var', share_connection=False)
b = self.get_dict('test', share_connection=False)
a['foo'] = 'bar'
a.sync()
with self.assertRaises(KeyError):
b['foo']
b['foo'] = 'baz'
b.sync()
self.assertEqual(a['foo'], 'bar')
self.assertEqual(b['foo'], 'baz')
class NoCacheTests(CommonTests, unittest.TestCase):
use_cache = False
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
a['foo'] = 'test'
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class CacheTests(CommonTests, unittest.TestCase):
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class ImplicitCloseTests(CacheTests):
close_on_cleanup = False
def test_close(self):
a = self.get_dict('var')
b = self.get_dict('var', share_connection=False)
c = self.get_dict('var')
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(c['foo'], 'bar')
a.close()
c['baz'] = 'bat'
c.sync()
self.assertEqual(b['baz'], 'bat')
del c
del a
b['test'] = 'blah' | pyradur/tests/test_pyradur.py |
from pyradur import Dict
from pyradur.db import Sqlite3DB
from pyradur.server import SockServer
import tempfile
import threading
import unittest
import shutil
import os
import logging
import sys
class CommonTests(object):
use_cache = True
close_on_cleanup = True
def _server_thread(self, event):
try:
self.server.db.add_db('var', Sqlite3DB(':memory:'))
event.set()
self.server.serve_forever()
# Process any outstanding events until the queue is empty
while self.server.handle_request():
pass
except Exception as e:
logging.exception('Server raised %s', e, exc_info=True)
finally:
# Close down the server. This prevents the main thread from being
# stuck blocking on a response from the server in the event that it
# has an exception
self.server.close()
def setUp(self):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
self.handler = logging.StreamHandler(sys.stdout)
self.handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.handler.setFormatter(formatter)
root.addHandler(self.handler)
self.addCleanup(root.removeHandler, self.handler)
self.tempdir = tempfile.mkdtemp(prefix='pyradur-')
self.addCleanup(shutil.rmtree, self.tempdir, ignore_errors=True)
self.sock_path = os.path.join(self.tempdir, 'sock')
self.server = SockServer(self.sock_path)
self.sever_suspended = False
try:
event = threading.Event()
self.server_thread = threading.Thread(target=self._server_thread, args=[event])
self.server_thread.start()
event.wait()
self.addCleanup(self.check_server)
self.addCleanup(self.server_thread.join)
self.addCleanup(self.server.shutdown)
except Exception as e:
self.server.close()
raise e
def check_server(self):
# Check that all clients have disconnected
self.assertDictEqual(self.server.clients, {})
def get_dict(self, name, share_connection=True):
d = Dict(self.sock_path, name, use_cache=self.use_cache, share_connection=share_connection)
if self.close_on_cleanup:
self.addCleanup(lambda: d.close())
return d
def test_basic_get_set(self):
d = self.get_dict('var')
d['foo'] = 'bar'
self.assertEqual(d['foo'], 'bar')
with self.assertRaises(KeyError):
d['baz']
def test_get_set_shared(self):
a = self.get_dict('var')
b = self.get_dict('var')
a['foo'] = 'bar'
self.assertEqual(b['foo'], 'bar')
def test_get_set_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(a.get('bat', 'baz'), 'baz')
a.sync()
self.assertFalse('baz' in b)
a.set('test', 'blah')
a.sync()
self.assertEqual(b['test'], 'blah')
def test_del_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
del a['foo']
a.sync()
with self.assertRaises(KeyError):
b['foo']
def test_setdefault(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
self.assertEqual(a.setdefault('foo', 'bar'), 'bar')
a.sync()
self.assertEqual(b['foo'], 'bar')
def test_server_suspend(self):
a = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
with self.server.suspended():
a['foo'] = 'test'
a.sync()
self.assertEqual(a['foo'], 'test')
def test_contains(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertTrue('foo' in b)
self.assertFalse('bar' in b)
def test_cache_grow(self):
import mmap
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
count = mmap.PAGESIZE * 2
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
a[key] = val
self.assertEqual(a[key], val)
a.sync()
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
self.assertEqual(a[key], val)
self.assertEqual(b[key], val)
def test_missing_var(self):
a = self.get_dict('var')
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist', share_connection=False)
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist')
def test_var_factory(self):
def factory(name):
return Sqlite3DB(':memory:')
a = self.get_dict('var')
self.server.db.set_db_factory(factory)
b = self.get_dict('test1', share_connection=False)
c = self.get_dict('test2')
def test_cross_var(self):
def factory(name):
return Sqlite3DB(':memory:')
self.server.db.set_db_factory(factory)
a = self.get_dict('var', share_connection=False)
b = self.get_dict('test', share_connection=False)
a['foo'] = 'bar'
a.sync()
with self.assertRaises(KeyError):
b['foo']
b['foo'] = 'baz'
b.sync()
self.assertEqual(a['foo'], 'bar')
self.assertEqual(b['foo'], 'baz')
class NoCacheTests(CommonTests, unittest.TestCase):
use_cache = False
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
a['foo'] = 'test'
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class CacheTests(CommonTests, unittest.TestCase):
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class ImplicitCloseTests(CacheTests):
close_on_cleanup = False
def test_close(self):
a = self.get_dict('var')
b = self.get_dict('var', share_connection=False)
c = self.get_dict('var')
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(c['foo'], 'bar')
a.close()
c['baz'] = 'bat'
c.sync()
self.assertEqual(b['baz'], 'bat')
del c
del a
b['test'] = 'blah' | 0.412648 | 0.152789 |
from linked_list import SinglyLinkedList, SinglyLinkedNode
def inner_step(n1, n2, n3, sum_ll, carry):
total = carry
if n1:
total += n1.value
n1 = n1.next
if n2:
total += n2.value
n2 = n2.next
result = total % 10
carry = total // 10
new_node = SinglyLinkedNode(result)
if not n3:
sum_ll.head = new_node
n3 = sum_ll.head
else:
n3.next = new_node
n3 = new_node
return n1, n2, n3, carry
def sum_reverse(self, ll2):
sum_ll = SinglyLinkedList()
carry = 0
n1, n2, n3 = self.head, ll2.head, sum_ll.head
while n1 and n2:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
while n1:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
while n2:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
if carry:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
return sum_ll
SinglyLinkedList.sum_reverse = sum_reverse
def add_zero_nodes(ll, count):
node = SinglyLinkedNode(0)
head = node
for i in range(count - 1):
node.next = SinglyLinkedNode(0)
node = node.next
node.next = ll.head
return head
def do_sum_forward(node1, node2):
if not node1:
return None, 0
elif not node1.next:
total = node1.value + node2.value
carry = total // 10
value = total % 10
return SinglyLinkedNode(value), carry
child_node, carry = do_sum_forward(node1.next, node2.next)
total = node1.value + node2.value + carry
carry = total // 10
value = total % 10
node = SinglyLinkedNode(value)
node.next = child_node
return node, carry
def sum_forward(self, ll2):
len1, len2 = len(self), len(ll2)
if len1 > len2:
head = add_zero_nodes(ll2, len1 - len2)
ll2.head = head
len2 = len1
elif len2 > len1:
head = add_zero_nodes(self, len2 - len1)
self.head = head
len1 = len2
if len1 == 0:
return None
node, carry = do_sum_forward(self.head, ll2.head)
if carry > 0:
head = SinglyLinkedNode(carry)
node, head.next = head, node
ll = SinglyLinkedList()
ll.head = node
return ll
SinglyLinkedList.sum_forward = sum_forward
if __name__ == "__main__":
import sys
for line in sys.stdin:
ll1, ll2 = line.strip().split("; ")
ll1 = SinglyLinkedList((int(val) for val in ll1.split(', ')))
ll2 = SinglyLinkedList((int(val) for val in ll2.split(', ')))
for node in ll1.sum_reverse(ll2):
print(node.value)
print("")
for node in ll1.sum_forward(ll2):
print(node.value) | ch02_linked_lists/q05_sum_lists.py | from linked_list import SinglyLinkedList, SinglyLinkedNode
def inner_step(n1, n2, n3, sum_ll, carry):
total = carry
if n1:
total += n1.value
n1 = n1.next
if n2:
total += n2.value
n2 = n2.next
result = total % 10
carry = total // 10
new_node = SinglyLinkedNode(result)
if not n3:
sum_ll.head = new_node
n3 = sum_ll.head
else:
n3.next = new_node
n3 = new_node
return n1, n2, n3, carry
def sum_reverse(self, ll2):
sum_ll = SinglyLinkedList()
carry = 0
n1, n2, n3 = self.head, ll2.head, sum_ll.head
while n1 and n2:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
while n1:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
while n2:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
if carry:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
return sum_ll
SinglyLinkedList.sum_reverse = sum_reverse
def add_zero_nodes(ll, count):
node = SinglyLinkedNode(0)
head = node
for i in range(count - 1):
node.next = SinglyLinkedNode(0)
node = node.next
node.next = ll.head
return head
def do_sum_forward(node1, node2):
if not node1:
return None, 0
elif not node1.next:
total = node1.value + node2.value
carry = total // 10
value = total % 10
return SinglyLinkedNode(value), carry
child_node, carry = do_sum_forward(node1.next, node2.next)
total = node1.value + node2.value + carry
carry = total // 10
value = total % 10
node = SinglyLinkedNode(value)
node.next = child_node
return node, carry
def sum_forward(self, ll2):
len1, len2 = len(self), len(ll2)
if len1 > len2:
head = add_zero_nodes(ll2, len1 - len2)
ll2.head = head
len2 = len1
elif len2 > len1:
head = add_zero_nodes(self, len2 - len1)
self.head = head
len1 = len2
if len1 == 0:
return None
node, carry = do_sum_forward(self.head, ll2.head)
if carry > 0:
head = SinglyLinkedNode(carry)
node, head.next = head, node
ll = SinglyLinkedList()
ll.head = node
return ll
SinglyLinkedList.sum_forward = sum_forward
if __name__ == "__main__":
import sys
for line in sys.stdin:
ll1, ll2 = line.strip().split("; ")
ll1 = SinglyLinkedList((int(val) for val in ll1.split(', ')))
ll2 = SinglyLinkedList((int(val) for val in ll2.split(', ')))
for node in ll1.sum_reverse(ll2):
print(node.value)
print("")
for node in ll1.sum_forward(ll2):
print(node.value) | 0.328314 | 0.369002 |
import os
import json
from typing import Dict, List, Optional, Union, cast
import requests
from requests import get
import bs4
from bs4 import BeautifulSoup
import pandas as pd
from env import github_token, github_username
#------------------------------------------------------------------------------------------------------------------------------------------------------
urls = ['https://github.com/search?p=1&q=spaceX&type=Repositories',
'https://github.com/search?p=2&q=spaceX&type=Repositories',
'https://github.com/search?p=3&q=spaceX&type=Repositories',
'https://github.com/search?p=4&q=spaceX&type=Repositories',
'https://github.com/search?p=5&q=spaceX&type=Repositories',
'https://github.com/search?p=6&q=spaceX&type=Repositories',
'https://github.com/search?p=7&q=spaceX&type=Repositories',
'https://github.com/search?p=8&q=spaceX&type=Repositories',
'https://github.com/search?p=9&q=spaceX&type=Repositories']
urls2 = ['https://github.com/search?p=10&q=spaceX&type=Repositories',
'https://github.com/search?p=11&q=spaceX&type=Repositories',
'https://github.com/search?p=12&q=spaceX&type=Repositories',
'https://github.com/search?p=13&q=spaceX&type=Repositories',
'https://github.com/search?p=14&q=spaceX&type=Repositories',
'https://github.com/search?p=15&q=spaceX&type=Repositories',
'https://github.com/search?p=16&q=spaceX&type=Repositories',
'https://github.com/search?p=17&q=spaceX&type=Repositories',
'https://github.com/search?p=18&q=spaceX&type=Repositories']
url_type = [urls, urls2]
def loop_url(url_type):
url_list= []
url_list2= []
if url_type == urls:
for url in urls:
page = requests.get(url)
# Create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
# get the repo list
repo = soup.find(class_="repo-list")
# find all instances of that class
repo_list = repo.find_all(class_='repo-list-item')
for repo in repo_list:
# find the first <a> tag and get the text. Split the text using '/' to get an array with developer name and repo name
full_repo_name = repo.find('a').text.split('/')
# extract the developer name at index 0
developer = full_repo_name[0].strip()
# extract the repo name at index 1
repo_name = full_repo_name[1].strip()
# strip() all to remove leading and traling white spaces
print("'" + developer + "/" + repo_name + "'" + ",")
url_list.extend(repo_list)
else:
for url in urls2:
page = requests.get(url)
# Create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
# get the repo list
repo = soup.find(class_="repo-list")
# find all instances of that class
repo_list = repo.find_all(class_='repo-list-item')
for repo in repo_list:
# find the first <a> tag and get the text. Split the text using '/' to get an array with developer name and repo name
full_repo_name = repo.find('a').text.split('/')
# extract the developer name at index 0
developer = full_repo_name[0].strip()
# extract the repo name at index 1
repo_name = full_repo_name[1].strip()
# strip() all to remove leading and traling white spaces
print("'" + developer + "/" + repo_name + "'" + ",")
url_list2.extend(repo_list)
return url_list, url_list2
#------------------------------------------------------------------------------------------------------------------------------------------------------
# TODO: Make a github personal access token.
# 1. Go here and generate a personal access token https://github.com/settings/tokens
# You do _not_ need select any scopes, i.e. leave all the checkboxes unchecked
# 2. Save it in your env.py file under the variable `github_token`
# TODO: Add your github username to your env.py file under the variable `github_username`
# TODO: Add more repositories to the `REPOS` list below.
REPOS = ['r-spacex/SpaceX-API',
'jesusrp98/spacex-go',
'bradtraversy/spacex_launch_stats',
'r-spacex/spacexstats-react',
'arjunyel/angular-spacex-graphql-codegen',
'llSourcell/Landing-a-SpaceX-Falcon-Heavy-Rocket',
'EmbersArc/gym-rocketlander',
'haroldadmin/MoonShot',
'shahar603/SpaceXtract',
'treyhuffine/graphql-react-typescript-spacex',
'rodolfobandeira/spacex',
'SpaceXLaunchBot/SpaceXLaunchBot',
'lukeify/spacex-reddit-css',
'NITJSR-OSS/My-SpaceX-Console',
'arex18/rocket-lander',
'joshuaferrara/SpaceX',
'lazywinadmin/SpaceX',
'r-spacex/launch-timeline',
'DaniruKun/spacex-iss-docking-sim-autopilot',
'mbertschler/dragon-iss-docking-autopilot',
'hyperloop/hyperloop',
'shahar603/Telemetry-Data',
'SpaceXLand/api',
'OMIsie11/SpaceXFollower',
'TheAlphamerc/flutter_spacexopedia',
'EduD/front-challenge-spacex',
'ItsCalebJones/SpaceLaunchNow-Android',
'SaidBySolo/SpaceXPy',
'vinayphadnis/SpaceX-Python',
'romebell/ga-spacex-frontend',
'BaderEddineOuaich/spacex_stellar',
'manhdv96/SpaceX-Kernel-Exynos7420',
'Alric/spacex',
'romebell/ga-spacex-api',
'rikkertkoppes/spacex-telemetry',
'HiKaylum/SpaceX-PY',
'tdrach/Sciview',
'RoryStolzenberg/spacexstats',
'Hyp-ed/hyped-2019',
'ghelobytes/mission-control',
'sparky8512/starlink-grpc-tools',
'candydasein/spacex-launches',
'SaraJo/SpaceXGMail',
'emersonlaurentino/spacex-qraphql-api',
'SpaceXLand/client',
'SophieDeBenedetto/spacex-apply',
'moesalih/spacex.moesalih.com',
'VGVentures/spacex_demo',
'codersgyan/spacex-redesign',
'JohnnySC/SpaceX',
'IJMacD/spacex-launches',
'brunolcarli/Ark',
'tipenehughes/space-x-app',
'pushpinderpalsingh/SpaceDash',
'orcaman/spacex',
'Illu/moonwalk',
'R4yGM/SpaceXTelemetry-Api',
'jesusrp98/space-curiosity',
'shahar603/Launch-Dashboard-API',
'sudharsan-selvaraj/selenium-spacex-docking',
'zlsa/f9r-v2',
'SteveSunTech/stardust',
'koxm/MMM-SpaceX',
'santiaguf/spacex-platzi',
'alshapton/SpacePY-X',
'looksocii/SpaceX_PSIT-Project',
'DirectMyFile/DiffuseSpace',
'badreddine-dlaila/spacex-app-demo',
'codexa/SpaceX-Rocket',
'ivanddm/spacexapp',
'andrnors/flutter-101-spaceX',
'djtimca/HASpaceX',
'lukacs-m/SpaceXMVVMSwiftUICombine',
'zwenza/spacexnow',
'danopstech/starlink_exporter',
'BrianIshii/git-falcon9',
'sdsubhajitdas/Rocket_Lander_Gym',
'imranhsayed/graphql-react-app',
'Eliminater74/SpaceX-Pure',
'asicguy/spacex_uart',
'samisharafeddine/SpaceXAPI-Swift',
'ayybradleyjh/kOS-Hoverslam',
'openland/spacex',
'Goldob/iss_docking_automation',
'jvsinghk/spacex',
'ugurkanates/SpaceXReinforcementLearning',
'HanSolo/touchjoystick',
'colbyfayock/my-spacex-launches',
'wilkerlucio/pathom-connect-spacex',
'Hyp-ed/hyped-2018',
'DanielRings/ReusableLaunchSystem',
'RomanSytnyk/SpaceX-App-unofficial',
'louisjc/spacexlaunches.com',
'AkiaCode/spacex-api.js',
'Ionic-SpaceX/SpaceX',
'JohannesFriedrich/SpaceX',
'R4yGM/SpaceXNews-api',
'akim3235/spacex-apollo-graphql',
'goncharom/SpaceXRocket',
'r-spacex/api-style-guide',
'ahmetakil/spacex_graphql',
'ElvinC/Dragon-docker',
'PiotrRut/SpaceX-Launches',
'schmidgallm/spaceXwatch',
'Thomas-Smyth/SpaceX-API-Wrapper',
'Elucidation/ThrustVectorControl',
'IainCole/SpaceXVideoApp',
'michaellyons/react-launch-gauge',
's-ai-kia/SpaceXland',
'gregv/meeting-timeline',
'sroaj/spacexfm',
'reidbuckingham48/spacex-nasa-flight-data',
'matdziu/SpaceXMVI',
'ryansan/SpaceX-Design',
'ergenekonyigit/spacex-cljs',
'JAQ-SpaceX/spaceX-brief',
'XiaoTeTech/spacex.xiaote.com',
'Tearth/Oddity',
'patrickyin/kotlin-coroutines-vs-rx',
'doflah/boostback',
'PatrykWojcieszak/X-Info',
'MITHyperloopTeam/software_core',
'airesvsg/spacex',
'Tearth/InElonWeTrust',
'ALuxios/SpaceX',
'leoge0113/SpaceX-Web',
'Patrykz94/kOS-RTLS-Landing',
'harisudhan7889/SpaceX',
'jamesgeorge007/spacex-launcher-stats',
'BekaAM/spaceX',
'499602D2/tg-launchbot',
'phch/ucdavis-hyperloop',
'syedsadiqali/sapient-spacex-app',
'shashidhark/Spacex-API-Frontend',
'peetck/spacex-explorer',
'SirKeplan/spacex-reddit-wiki',
'AndrewRLloyd88/mb-career-accelerator-spaceX',
'victorshinya/spacex-rockets',
'enciyo/SpaceX',
'crunchysoul/spacex_ex',
'bbutler522/SpaceX-Visualization',
'odziem/fetch-deno',
'ahmetmvural1/SpaceXProject',
'janipalsamaki/spacex-robot',
'alexgtn/spacex-api-wrapper',
'oplS16projects/SpaceXplore',
't-ozeren/SpaceXData',
'andrey-leshenko/ISSDockingBotGame',
'dongzerun/nice-spacex',
'mikkrieg/spaceXAPI',
'elricdog/SpaceX-StarShip',
'jiachengzhang1/spacex-and-mars',
'jesusrp98/bot-hackathon-spacex',
'developer-junaid/SpaceX-App',
'faiza203/SpaceX',
'richiemccoll/visualising-front-end-performance-demo',
'ozonni/SpotTheFire',
'svipatov/spacex-tracker',
'pmborg/SpaceX-RO-Falcons',
'me-aakash-online/spaceX-launch-program',
'ping-n/spaceX-js-app',
'zlsa/spacex-info',
'joshuadeguzman/spacex-land',
'ShinteiMai/next-spacex',
'BriantOliveira/SpaceX-Dataset',
'spacexksp/spacexksp.github.io',
'Sheldon1538/SpaceXApp',
'tejalkotkar/Mission_SpaceX',
'mattmillsxyz/x-watch',
'staszewski/spacex-api-app',
'ronal2do/Graphql-SpaceX-API',
'ayberkgerey/SpaceX_Data_Retrofit',
'cmoir97/SpaceX-App',
'rinoldm/SBURB',
'abh80/spacexapp',
'jor-dan/SpaceX-GraphQL',
'mcastorena0316/react-redux-capstone',
'jackkoppa/go-for-launch',
'Emmanuel1118/Crew-Dragon-Autopilot',
'AzuxDario/Marsy'
]
headers = {"Authorization": f"token {github_token}", "User-Agent": github_username}
if headers["Authorization"] == "token " or headers["User-Agent"] == "":
raise Exception(
"You need to follow the instructions marked TODO in this script before trying to use it"
)
def github_api_request(url: str) -> Union[List, Dict]:
response = requests.get(url, headers=headers)
response_data = response.json()
if response.status_code != 200:
raise Exception(
f"Error response from github api! status code: {response.status_code}, "
f"response: {json.dumps(response_data)}"
)
return response_data
def get_repo_language(repo: str) -> str:
url = f"https://api.github.com/repos/{repo}"
repo_info = github_api_request(url)
if type(repo_info) is dict:
repo_info = cast(Dict, repo_info)
if "language" not in repo_info:
raise Exception(
"'language' key not round in response\n{}".format(json.dumps(repo_info))
)
return repo_info["language"]
raise Exception(
f"Expecting a dictionary response from {url}, instead got {json.dumps(repo_info)}"
)
def get_repo_contents(repo: str) -> List[Dict[str, str]]:
url = f"https://api.github.com/repos/{repo}/contents/"
contents = github_api_request(url)
if type(contents) is list:
contents = cast(List, contents)
return contents
raise Exception(
f"Expecting a list response from {url}, instead got {json.dumps(contents)}"
)
def get_readme_download_url(files: List[Dict[str, str]]) -> str:
"""
Takes in a response from the github api that lists the files in a repo and
returns the url that can be used to download the repo's README file.
"""
for file in files:
if file["name"].lower().startswith("readme"):
return file["download_url"]
return ""
def process_repo(repo: str) -> Dict[str, str]:
"""
Takes a repo name like "gocodeup/codeup-setup-script" and returns a
dictionary with the language of the repo and the readme contents.
"""
contents = get_repo_contents(repo)
readme_download_url = get_readme_download_url(contents)
if readme_download_url == "":
readme_contents = ""
else:
readme_contents = requests.get(readme_download_url).text
return {
"repo": repo,
"language": get_repo_language(repo),
"readme_contents": readme_contents,
}
def scrape_github_data() -> List[Dict[str, str]]:
"""
Loop through all of the repos and process them. Returns the processed data.
"""
return [process_repo(repo) for repo in REPOS]
if __name__ == "__main__":
data = scrape_github_data()
json.dump(data, open("data.json", "w"), indent=1)
#------------------------------------------------------------------------------------------------------------------------------------------------------ | acquire.py | import os
import json
from typing import Dict, List, Optional, Union, cast
import requests
from requests import get
import bs4
from bs4 import BeautifulSoup
import pandas as pd
from env import github_token, github_username
#------------------------------------------------------------------------------------------------------------------------------------------------------
urls = ['https://github.com/search?p=1&q=spaceX&type=Repositories',
'https://github.com/search?p=2&q=spaceX&type=Repositories',
'https://github.com/search?p=3&q=spaceX&type=Repositories',
'https://github.com/search?p=4&q=spaceX&type=Repositories',
'https://github.com/search?p=5&q=spaceX&type=Repositories',
'https://github.com/search?p=6&q=spaceX&type=Repositories',
'https://github.com/search?p=7&q=spaceX&type=Repositories',
'https://github.com/search?p=8&q=spaceX&type=Repositories',
'https://github.com/search?p=9&q=spaceX&type=Repositories']
urls2 = ['https://github.com/search?p=10&q=spaceX&type=Repositories',
'https://github.com/search?p=11&q=spaceX&type=Repositories',
'https://github.com/search?p=12&q=spaceX&type=Repositories',
'https://github.com/search?p=13&q=spaceX&type=Repositories',
'https://github.com/search?p=14&q=spaceX&type=Repositories',
'https://github.com/search?p=15&q=spaceX&type=Repositories',
'https://github.com/search?p=16&q=spaceX&type=Repositories',
'https://github.com/search?p=17&q=spaceX&type=Repositories',
'https://github.com/search?p=18&q=spaceX&type=Repositories']
url_type = [urls, urls2]
def loop_url(url_type):
url_list= []
url_list2= []
if url_type == urls:
for url in urls:
page = requests.get(url)
# Create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
# get the repo list
repo = soup.find(class_="repo-list")
# find all instances of that class
repo_list = repo.find_all(class_='repo-list-item')
for repo in repo_list:
# find the first <a> tag and get the text. Split the text using '/' to get an array with developer name and repo name
full_repo_name = repo.find('a').text.split('/')
# extract the developer name at index 0
developer = full_repo_name[0].strip()
# extract the repo name at index 1
repo_name = full_repo_name[1].strip()
# strip() all to remove leading and traling white spaces
print("'" + developer + "/" + repo_name + "'" + ",")
url_list.extend(repo_list)
else:
for url in urls2:
page = requests.get(url)
# Create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
# get the repo list
repo = soup.find(class_="repo-list")
# find all instances of that class
repo_list = repo.find_all(class_='repo-list-item')
for repo in repo_list:
# find the first <a> tag and get the text. Split the text using '/' to get an array with developer name and repo name
full_repo_name = repo.find('a').text.split('/')
# extract the developer name at index 0
developer = full_repo_name[0].strip()
# extract the repo name at index 1
repo_name = full_repo_name[1].strip()
# strip() all to remove leading and traling white spaces
print("'" + developer + "/" + repo_name + "'" + ",")
url_list2.extend(repo_list)
return url_list, url_list2
#------------------------------------------------------------------------------------------------------------------------------------------------------
# TODO: Make a github personal access token.
# 1. Go here and generate a personal access token https://github.com/settings/tokens
# You do _not_ need select any scopes, i.e. leave all the checkboxes unchecked
# 2. Save it in your env.py file under the variable `github_token`
# TODO: Add your github username to your env.py file under the variable `github_username`
# TODO: Add more repositories to the `REPOS` list below.
REPOS = ['r-spacex/SpaceX-API',
'jesusrp98/spacex-go',
'bradtraversy/spacex_launch_stats',
'r-spacex/spacexstats-react',
'arjunyel/angular-spacex-graphql-codegen',
'llSourcell/Landing-a-SpaceX-Falcon-Heavy-Rocket',
'EmbersArc/gym-rocketlander',
'haroldadmin/MoonShot',
'shahar603/SpaceXtract',
'treyhuffine/graphql-react-typescript-spacex',
'rodolfobandeira/spacex',
'SpaceXLaunchBot/SpaceXLaunchBot',
'lukeify/spacex-reddit-css',
'NITJSR-OSS/My-SpaceX-Console',
'arex18/rocket-lander',
'joshuaferrara/SpaceX',
'lazywinadmin/SpaceX',
'r-spacex/launch-timeline',
'DaniruKun/spacex-iss-docking-sim-autopilot',
'mbertschler/dragon-iss-docking-autopilot',
'hyperloop/hyperloop',
'shahar603/Telemetry-Data',
'SpaceXLand/api',
'OMIsie11/SpaceXFollower',
'TheAlphamerc/flutter_spacexopedia',
'EduD/front-challenge-spacex',
'ItsCalebJones/SpaceLaunchNow-Android',
'SaidBySolo/SpaceXPy',
'vinayphadnis/SpaceX-Python',
'romebell/ga-spacex-frontend',
'BaderEddineOuaich/spacex_stellar',
'manhdv96/SpaceX-Kernel-Exynos7420',
'Alric/spacex',
'romebell/ga-spacex-api',
'rikkertkoppes/spacex-telemetry',
'HiKaylum/SpaceX-PY',
'tdrach/Sciview',
'RoryStolzenberg/spacexstats',
'Hyp-ed/hyped-2019',
'ghelobytes/mission-control',
'sparky8512/starlink-grpc-tools',
'candydasein/spacex-launches',
'SaraJo/SpaceXGMail',
'emersonlaurentino/spacex-qraphql-api',
'SpaceXLand/client',
'SophieDeBenedetto/spacex-apply',
'moesalih/spacex.moesalih.com',
'VGVentures/spacex_demo',
'codersgyan/spacex-redesign',
'JohnnySC/SpaceX',
'IJMacD/spacex-launches',
'brunolcarli/Ark',
'tipenehughes/space-x-app',
'pushpinderpalsingh/SpaceDash',
'orcaman/spacex',
'Illu/moonwalk',
'R4yGM/SpaceXTelemetry-Api',
'jesusrp98/space-curiosity',
'shahar603/Launch-Dashboard-API',
'sudharsan-selvaraj/selenium-spacex-docking',
'zlsa/f9r-v2',
'SteveSunTech/stardust',
'koxm/MMM-SpaceX',
'santiaguf/spacex-platzi',
'alshapton/SpacePY-X',
'looksocii/SpaceX_PSIT-Project',
'DirectMyFile/DiffuseSpace',
'badreddine-dlaila/spacex-app-demo',
'codexa/SpaceX-Rocket',
'ivanddm/spacexapp',
'andrnors/flutter-101-spaceX',
'djtimca/HASpaceX',
'lukacs-m/SpaceXMVVMSwiftUICombine',
'zwenza/spacexnow',
'danopstech/starlink_exporter',
'BrianIshii/git-falcon9',
'sdsubhajitdas/Rocket_Lander_Gym',
'imranhsayed/graphql-react-app',
'Eliminater74/SpaceX-Pure',
'asicguy/spacex_uart',
'samisharafeddine/SpaceXAPI-Swift',
'ayybradleyjh/kOS-Hoverslam',
'openland/spacex',
'Goldob/iss_docking_automation',
'jvsinghk/spacex',
'ugurkanates/SpaceXReinforcementLearning',
'HanSolo/touchjoystick',
'colbyfayock/my-spacex-launches',
'wilkerlucio/pathom-connect-spacex',
'Hyp-ed/hyped-2018',
'DanielRings/ReusableLaunchSystem',
'RomanSytnyk/SpaceX-App-unofficial',
'louisjc/spacexlaunches.com',
'AkiaCode/spacex-api.js',
'Ionic-SpaceX/SpaceX',
'JohannesFriedrich/SpaceX',
'R4yGM/SpaceXNews-api',
'akim3235/spacex-apollo-graphql',
'goncharom/SpaceXRocket',
'r-spacex/api-style-guide',
'ahmetakil/spacex_graphql',
'ElvinC/Dragon-docker',
'PiotrRut/SpaceX-Launches',
'schmidgallm/spaceXwatch',
'Thomas-Smyth/SpaceX-API-Wrapper',
'Elucidation/ThrustVectorControl',
'IainCole/SpaceXVideoApp',
'michaellyons/react-launch-gauge',
's-ai-kia/SpaceXland',
'gregv/meeting-timeline',
'sroaj/spacexfm',
'reidbuckingham48/spacex-nasa-flight-data',
'matdziu/SpaceXMVI',
'ryansan/SpaceX-Design',
'ergenekonyigit/spacex-cljs',
'JAQ-SpaceX/spaceX-brief',
'XiaoTeTech/spacex.xiaote.com',
'Tearth/Oddity',
'patrickyin/kotlin-coroutines-vs-rx',
'doflah/boostback',
'PatrykWojcieszak/X-Info',
'MITHyperloopTeam/software_core',
'airesvsg/spacex',
'Tearth/InElonWeTrust',
'ALuxios/SpaceX',
'leoge0113/SpaceX-Web',
'Patrykz94/kOS-RTLS-Landing',
'harisudhan7889/SpaceX',
'jamesgeorge007/spacex-launcher-stats',
'BekaAM/spaceX',
'499602D2/tg-launchbot',
'phch/ucdavis-hyperloop',
'syedsadiqali/sapient-spacex-app',
'shashidhark/Spacex-API-Frontend',
'peetck/spacex-explorer',
'SirKeplan/spacex-reddit-wiki',
'AndrewRLloyd88/mb-career-accelerator-spaceX',
'victorshinya/spacex-rockets',
'enciyo/SpaceX',
'crunchysoul/spacex_ex',
'bbutler522/SpaceX-Visualization',
'odziem/fetch-deno',
'ahmetmvural1/SpaceXProject',
'janipalsamaki/spacex-robot',
'alexgtn/spacex-api-wrapper',
'oplS16projects/SpaceXplore',
't-ozeren/SpaceXData',
'andrey-leshenko/ISSDockingBotGame',
'dongzerun/nice-spacex',
'mikkrieg/spaceXAPI',
'elricdog/SpaceX-StarShip',
'jiachengzhang1/spacex-and-mars',
'jesusrp98/bot-hackathon-spacex',
'developer-junaid/SpaceX-App',
'faiza203/SpaceX',
'richiemccoll/visualising-front-end-performance-demo',
'ozonni/SpotTheFire',
'svipatov/spacex-tracker',
'pmborg/SpaceX-RO-Falcons',
'me-aakash-online/spaceX-launch-program',
'ping-n/spaceX-js-app',
'zlsa/spacex-info',
'joshuadeguzman/spacex-land',
'ShinteiMai/next-spacex',
'BriantOliveira/SpaceX-Dataset',
'spacexksp/spacexksp.github.io',
'Sheldon1538/SpaceXApp',
'tejalkotkar/Mission_SpaceX',
'mattmillsxyz/x-watch',
'staszewski/spacex-api-app',
'ronal2do/Graphql-SpaceX-API',
'ayberkgerey/SpaceX_Data_Retrofit',
'cmoir97/SpaceX-App',
'rinoldm/SBURB',
'abh80/spacexapp',
'jor-dan/SpaceX-GraphQL',
'mcastorena0316/react-redux-capstone',
'jackkoppa/go-for-launch',
'Emmanuel1118/Crew-Dragon-Autopilot',
'AzuxDario/Marsy'
]
headers = {"Authorization": f"token {github_token}", "User-Agent": github_username}
if headers["Authorization"] == "token " or headers["User-Agent"] == "":
raise Exception(
"You need to follow the instructions marked TODO in this script before trying to use it"
)
def github_api_request(url: str) -> Union[List, Dict]:
response = requests.get(url, headers=headers)
response_data = response.json()
if response.status_code != 200:
raise Exception(
f"Error response from github api! status code: {response.status_code}, "
f"response: {json.dumps(response_data)}"
)
return response_data
def get_repo_language(repo: str) -> str:
url = f"https://api.github.com/repos/{repo}"
repo_info = github_api_request(url)
if type(repo_info) is dict:
repo_info = cast(Dict, repo_info)
if "language" not in repo_info:
raise Exception(
"'language' key not round in response\n{}".format(json.dumps(repo_info))
)
return repo_info["language"]
raise Exception(
f"Expecting a dictionary response from {url}, instead got {json.dumps(repo_info)}"
)
def get_repo_contents(repo: str) -> List[Dict[str, str]]:
url = f"https://api.github.com/repos/{repo}/contents/"
contents = github_api_request(url)
if type(contents) is list:
contents = cast(List, contents)
return contents
raise Exception(
f"Expecting a list response from {url}, instead got {json.dumps(contents)}"
)
def get_readme_download_url(files: List[Dict[str, str]]) -> str:
"""
Takes in a response from the github api that lists the files in a repo and
returns the url that can be used to download the repo's README file.
"""
for file in files:
if file["name"].lower().startswith("readme"):
return file["download_url"]
return ""
def process_repo(repo: str) -> Dict[str, str]:
"""
Takes a repo name like "gocodeup/codeup-setup-script" and returns a
dictionary with the language of the repo and the readme contents.
"""
contents = get_repo_contents(repo)
readme_download_url = get_readme_download_url(contents)
if readme_download_url == "":
readme_contents = ""
else:
readme_contents = requests.get(readme_download_url).text
return {
"repo": repo,
"language": get_repo_language(repo),
"readme_contents": readme_contents,
}
def scrape_github_data() -> List[Dict[str, str]]:
"""
Loop through all of the repos and process them. Returns the processed data.
"""
return [process_repo(repo) for repo in REPOS]
if __name__ == "__main__":
data = scrape_github_data()
json.dump(data, open("data.json", "w"), indent=1)
#------------------------------------------------------------------------------------------------------------------------------------------------------ | 0.278061 | 0.239427 |
print('Start next file, \'page_04\'')
# imports
from openpyxl import load_workbook
from openpyxl.styles import Alignment, Border, Side, NamedStyle, Font, PatternFill
wb = load_workbook(filename = 'Plymouth_Daily_Rounds.xlsx')
sheet = wb["Page_04"]
print('Active sheet is ', sheet)
print('04-01')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_headers():
# center = Alignment(horizontal='center', vertical='center')
# right = Alignment(horizontal='right', vertical='bottom')
# Print Options
sheet.print_area = 'A1:I42' # TODO: set cell region
sheet.print_options.horizontalCentered = True
sheet.print_options.verticalCentered = True
# Page margins
sheet.page_margins.left = 0.25
sheet.page_margins.right = 0.25
sheet.page_margins.top = 0.55
sheet.page_margins.bottom = 0.55
sheet.page_margins.header = 0.25
sheet.page_margins.footer = 0.25
# Headers & Footers
sheet.oddHeader.center.text = "&[File]"
sheet.oddHeader.center.size = 20
sheet.oddHeader.center.font = "Tahoma, Bold"
sheet.oddHeader.center.color = "000000" #
sheet.oddFooter.left.text = "&[Tab] of 11"
sheet.oddFooter.left.size = 10
sheet.oddFooter.left.font = "Tahoma, Bold"
sheet.oddFooter.left.color = "000000" #
sheet.oddFooter.right.text = "&[Path]&[File]"
sheet.oddFooter.right.size = 6
sheet.oddFooter.right.font = "Tahoma, Bold"
sheet.oddFooter.right.color = "000000"
print('04-02')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_merge():
# center = Alignment(horizontal='center', vertical='center')
# right = Alignment(horizontal='right', vertical='bottom')
# Merges 9 cells into 1 in 1 row
for row in (1, 5, 12, 13, 23, 24):
sheet.merge_cells(start_row=row, start_column=1, end_row=row, end_column=9)
# merge 2 cells into 1 in 1 row
columns = [(col, col+1) for col in range(2, 9, 2)]
for row in [2, 3, 4, 6, 7, 8, 9, 10, 11, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]:
for col1, col2 in columns:
sheet.merge_cells(start_row=row, start_column=col1, end_row=row, end_column=col2)
# Column width and Row height
sheet.column_dimensions['A'].width = 30.00
for col in ['B', 'D', 'F', 'H']:
sheet.column_dimensions[col].width = 4.00
for col in ['C', 'E', 'G', 'I']:
sheet.column_dimensions[col].width = 10.00
rows = range(1, 43)
for row in rows:
sheet.row_dimensions[row].Height = 15.00
# Wrap text Column A
rows = range(1, 31)
for row in rows:
for col in columns:
sheet.cell(row, 1).alignment = Alignment(wrap_text=True)
sheet.merge_cells(start_row=30, start_column=1, end_row=32, end_column=1)
print('04-03')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_namedstyle():
center = Alignment(horizontal='center', vertical='center')
thin_border = Border(left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin'))
thick_border = Border(left=Side(style='thick'),
right=Side(style='thick'),
top=Side(style='thick'),
bottom=Side(style='thick'))
# Styles
sheet['A1'].style = 'rooms'
sheet['A12'].style = 'rooms'
sheet['A24'].style = 'rooms'
sheet['A28'].style = 'rooms'
''' sheet['B21'].style = 'rightAlign' # Todo: Add into forLoop
sheet['B24'].style = 'rightAlign'
sheet['B25'].style = 'rightAlign'
sheet['B27'].style = 'rightAlign' '''
sheet.cell(row=30, column=1).alignment = center
sheet['A5'].alignment = center
# Borders
rows = range(1, 80)
columns = range(1, 10)
for row in rows:
for col in columns:
sheet.cell(row, col).border = thin_border
print('04-04')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_cell_values():
# Cell values
sheet['A1'].value = 'CC3'
sheet['A2'].value = 'CC3-B05 (MBB) Breaker is Open'
sheet['A3'].value = 'CC3-B01 (MIB) Breaker is Closed'
sheet['A4'].value = 'CC3-B99 (LBB) Breaker is Open'
sheet['A5'].value = 'Ensure Key is in Locked position before touching STS screen'
sheet['A6'].value = 'STS3A is on preferred source 1'
sheet['A7'].value = 'STS3B is on preferred source 1'
sheet['A8'].value = 'EF 4'
sheet['A9'].value = 'EF 5'
sheet['A10'].value = 'East Electrical Room Leak Detection'
sheet['A11'].value = 'Tear off sticky mat for SR3 (East side)'
sheet['A12'].value = 'Fire Pump/ Pre-Action Room' # Note: Room
sheet['A13'].value = 'Only on the 20:00 rounds check pre action valves to make sure they’re open (if open put a check next to each zone):'
sheet['A14'].value = 'Zone 1'
sheet['B14'].value = 'Zone 2'
sheet['C14'].value = 'Zone 3'
sheet['D14'].value = 'Zone 4'
sheet['E14'].value = 'Zone 5'
sheet['F14'].value = 'Zone 6'
sheet['G14'].value = 'Zone 7'
sheet['H14'].value = 'Wet system level 1-4 '
sheet['I14'].value = 'Wet system level 0 (Corridors)'
sheet['A15'].value = 'Jockey pump controller in Auto '
sheet['A16'].value = 'Fire pump controller in Auto '
sheet['A17'].value = 'Fire pump is on Normal source power'
sheet['A18'].value = 'System water pressure left side of controller (140 -150psi)'
sheet['A19'].value = 'System Nitorgen PSI (inside the red cabinet)'
sheet['A20'].value = 'At Nitrogen tank regulator: (Replace with Extra Dry Nitrogen at 200PSI)'
sheet['A21'].value = 'Main building water meter (Total) readings (Top reading)'
sheet['A22'].value = 'Is Building Main-Drain Water Leaking?'
sheet['A23'].value = 'If drain pipe has water leaking, check the air-bleed-off-valve in the penthouse stairwell for leaks.'
sheet['A24'].value = 'Loading Dock Area' # Note: Room
sheet['A25'].value = 'Do we need to order salt? If yes let the Chief Engineer know.'
sheet['A26'].value = 'Check brine level (should be at the indicating line).'
sheet['A27'].value = 'HP LL- 5 Ok (Fan is ok, If there\'s sweating of pipes check operation of HP)'
sheet['A28'].value = 'Mechanical / Chill Water Units Room' # Note: Room
sheet['A29'].value = 'Cooling Twr. Supply water meter reading.'
sheet['A30'].value = 'Write down the water softener gallon readings from each softener.'
# sheet['A31'].value = '' # Todo: merge with line 29
# sheet['A32'].value = '' # Todo: merge with line 29
sheet['A33'].value = 'Well meter reading'
sheet['A34'].value = 'HP LL- 4 Ok (Fan is ok, If there\'s sweating of pipes check operation of HP)'
sheet['A35'].value = 'CHWP #3'
sheet['A36'].value = 'CHWP #5'
sheet['A37'].value = 'CHWP #2'
sheet['A38'].value = 'CHWP #4'
sheet['A39'].value = 'CHWP #1'
sheet['A40'].value = 'CDW to CHW makeup' # Todo: two line 40's
# sheet['A40'].value = 'CHW'
sheet['A41'].value = 'CHW Filter PSI (23psi)'
sheet['A42'].value = 'Bladder tank pressure (<30)'
sheet['A43'].value = 'CHW Lakos Bag filter'
sheet['A44'].value = 'Condenser Supply Temp. East Side (68 – 85)'
sheet['A45'].value = 'CWP-6 VFD'
sheet['A46'].value = 'CWP-1 VFD'
sheet['A47'].value = 'CWP-4 VFD'
sheet['A48'].value = 'CWP-3 VFD'
sheet['A49'].value = 'CDWF VFD '
sheet['A50'].value = 'CWP-2 VFD'
sheet['A51'].value = 'CWP-5 VFD'
sheet['A52'].value = 'TWR Fan- 6 VFD'
sheet['A53'].value = 'TWR Fan- 5 VFD'
sheet['A54'].value = 'CHWR Header Temp East'
sheet['A55'].value = 'CHWR Temp (Bypass) East'
sheet['A56'].value = 'Lakos Separator (6psi)'
sheet['A57'].value = 'CHWS Temp East'
sheet['A58'].value = 'CHWP #3 VFD'
sheet['A59'].value = 'Well VFD'
sheet['A60'].value = 'CHWP #2 VFD'
sheet['A61'].value = 'CHWP #4 VFD'
sheet['A62'].value = 'CHWP #1 VFD'
sheet['A63'].value = 'CHWP #5 VFD'
sheet['A64'].value = 'EF #6 VFD'
sheet['A65'].value = 'Core Pump #1 VFD'
sheet['A66'].value = 'Core Pump #2 VFD'
sheet['A67'].value = 'HP LL- 3 Ok (Fan is ok, If there\'s sweating of pipes check operation of HP)'
sheet['A68'].value = 'Core Pump #2 (15 - 20 PSID)'
sheet['A69'].value = 'Core Pump #1 (15 - 20 PSID)'
sheet['A70'].value = 'Condenser Supply Temp. West Side (68 – 85)'
sheet['A71'].value = 'Chemical tanks level (above the order lines)'
sheet['A72'].value = 'Nalco controller'
sheet['A73'].value = 'Coupon Rack flow is between 4 – 6 GPM'
sheet['A74'].value = 'Tower #4 VFD'
sheet['A75'].value = 'Tower #3 VFD'
sheet['A76'].value = 'Tower #2 VFD'
sheet['A77'].value = 'Tower #1 VFD'
sheet['A78'].value = 'Notes:' # StretchGoal: Increase row height, delete comment rows below
print('04-05')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_engineer_values():
# Engineering Values
# Local Variables
center = Alignment(horizontal='center', vertical='center')
right = Alignment(horizontal='right', vertical='bottom')
columnEven = [2, 4, 6, 8]
columnOdd = [3, 5, 7, 9]
# Yes or No values
rows = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 22, 24]
# cells = []
for col in columnEven:
for row in rows:
sheet.cell(row=row, column=col).value = 'Yes / No'
sheet.cell(row=row, column=col).alignment = center
sheet.cell(row=row, column=col).font = Font(size = 8, i=True, color='000000')
# ✓ X values
rowsCheck = [6, 7, 8, 9, 10, 15, 16, 17, 25, 26]
for col in columnEven:
for row in rowsCheck:
# print(col, row)
sheet.cell(row=row, column=col).value = '✓ or X'
sheet.cell(row=row, column=col).alignment = center
sheet.cell(row=row, column=col).font = Font(size=9, color='DCDCDC')
''' # Hz
rowsHZ = [18]
for col in columnOdd:
for row in rowsHZ:
# print(col, row)
sheet.cell(row=row, column=col).value = 'Hz'
sheet.cell(row=row, column=col).alignment = right
sheet.cell(row=row, column=col).font = Font(size=8, color='000000') '''
print('04-06')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_colored_cells():
# Local Variables
rowsColor = [1, 12, 24, 28]
columnsColor = range(1, 10, 1)
for col in columnsColor:
for row in rowsColor:
# print(col, row)
sheet.cell(row=row, column=col).fill = PatternFill(fgColor='DCDCDC', fill_type = 'solid')
print('04-07')
wb.save('Plymouth_Daily_Rounds.xlsx') | archive/page_04_firepprm_docking - Copy.py | print('Start next file, \'page_04\'')
# imports
from openpyxl import load_workbook
from openpyxl.styles import Alignment, Border, Side, NamedStyle, Font, PatternFill
wb = load_workbook(filename = 'Plymouth_Daily_Rounds.xlsx')
sheet = wb["Page_04"]
print('Active sheet is ', sheet)
print('04-01')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_headers():
# center = Alignment(horizontal='center', vertical='center')
# right = Alignment(horizontal='right', vertical='bottom')
# Print Options
sheet.print_area = 'A1:I42' # TODO: set cell region
sheet.print_options.horizontalCentered = True
sheet.print_options.verticalCentered = True
# Page margins
sheet.page_margins.left = 0.25
sheet.page_margins.right = 0.25
sheet.page_margins.top = 0.55
sheet.page_margins.bottom = 0.55
sheet.page_margins.header = 0.25
sheet.page_margins.footer = 0.25
# Headers & Footers
sheet.oddHeader.center.text = "&[File]"
sheet.oddHeader.center.size = 20
sheet.oddHeader.center.font = "Tahoma, Bold"
sheet.oddHeader.center.color = "000000" #
sheet.oddFooter.left.text = "&[Tab] of 11"
sheet.oddFooter.left.size = 10
sheet.oddFooter.left.font = "Tahoma, Bold"
sheet.oddFooter.left.color = "000000" #
sheet.oddFooter.right.text = "&[Path]&[File]"
sheet.oddFooter.right.size = 6
sheet.oddFooter.right.font = "Tahoma, Bold"
sheet.oddFooter.right.color = "000000"
print('04-02')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_merge():
# center = Alignment(horizontal='center', vertical='center')
# right = Alignment(horizontal='right', vertical='bottom')
# Merges 9 cells into 1 in 1 row
for row in (1, 5, 12, 13, 23, 24):
sheet.merge_cells(start_row=row, start_column=1, end_row=row, end_column=9)
# merge 2 cells into 1 in 1 row
columns = [(col, col+1) for col in range(2, 9, 2)]
for row in [2, 3, 4, 6, 7, 8, 9, 10, 11, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]:
for col1, col2 in columns:
sheet.merge_cells(start_row=row, start_column=col1, end_row=row, end_column=col2)
# Column width and Row height
sheet.column_dimensions['A'].width = 30.00
for col in ['B', 'D', 'F', 'H']:
sheet.column_dimensions[col].width = 4.00
for col in ['C', 'E', 'G', 'I']:
sheet.column_dimensions[col].width = 10.00
rows = range(1, 43)
for row in rows:
sheet.row_dimensions[row].Height = 15.00
# Wrap text Column A
rows = range(1, 31)
for row in rows:
for col in columns:
sheet.cell(row, 1).alignment = Alignment(wrap_text=True)
sheet.merge_cells(start_row=30, start_column=1, end_row=32, end_column=1)
print('04-03')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_namedstyle():
center = Alignment(horizontal='center', vertical='center')
thin_border = Border(left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin'))
thick_border = Border(left=Side(style='thick'),
right=Side(style='thick'),
top=Side(style='thick'),
bottom=Side(style='thick'))
# Styles
sheet['A1'].style = 'rooms'
sheet['A12'].style = 'rooms'
sheet['A24'].style = 'rooms'
sheet['A28'].style = 'rooms'
''' sheet['B21'].style = 'rightAlign' # Todo: Add into forLoop
sheet['B24'].style = 'rightAlign'
sheet['B25'].style = 'rightAlign'
sheet['B27'].style = 'rightAlign' '''
sheet.cell(row=30, column=1).alignment = center
sheet['A5'].alignment = center
# Borders
rows = range(1, 80)
columns = range(1, 10)
for row in rows:
for col in columns:
sheet.cell(row, col).border = thin_border
print('04-04')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_cell_values():
# Cell values
sheet['A1'].value = 'CC3'
sheet['A2'].value = 'CC3-B05 (MBB) Breaker is Open'
sheet['A3'].value = 'CC3-B01 (MIB) Breaker is Closed'
sheet['A4'].value = 'CC3-B99 (LBB) Breaker is Open'
sheet['A5'].value = 'Ensure Key is in Locked position before touching STS screen'
sheet['A6'].value = 'STS3A is on preferred source 1'
sheet['A7'].value = 'STS3B is on preferred source 1'
sheet['A8'].value = 'EF 4'
sheet['A9'].value = 'EF 5'
sheet['A10'].value = 'East Electrical Room Leak Detection'
sheet['A11'].value = 'Tear off sticky mat for SR3 (East side)'
sheet['A12'].value = 'Fire Pump/ Pre-Action Room' # Note: Room
sheet['A13'].value = 'Only on the 20:00 rounds check pre action valves to make sure they’re open (if open put a check next to each zone):'
sheet['A14'].value = 'Zone 1'
sheet['B14'].value = 'Zone 2'
sheet['C14'].value = 'Zone 3'
sheet['D14'].value = 'Zone 4'
sheet['E14'].value = 'Zone 5'
sheet['F14'].value = 'Zone 6'
sheet['G14'].value = 'Zone 7'
sheet['H14'].value = 'Wet system level 1-4 '
sheet['I14'].value = 'Wet system level 0 (Corridors)'
sheet['A15'].value = 'Jockey pump controller in Auto '
sheet['A16'].value = 'Fire pump controller in Auto '
sheet['A17'].value = 'Fire pump is on Normal source power'
sheet['A18'].value = 'System water pressure left side of controller (140 -150psi)'
sheet['A19'].value = 'System Nitorgen PSI (inside the red cabinet)'
sheet['A20'].value = 'At Nitrogen tank regulator: (Replace with Extra Dry Nitrogen at 200PSI)'
sheet['A21'].value = 'Main building water meter (Total) readings (Top reading)'
sheet['A22'].value = 'Is Building Main-Drain Water Leaking?'
sheet['A23'].value = 'If drain pipe has water leaking, check the air-bleed-off-valve in the penthouse stairwell for leaks.'
sheet['A24'].value = 'Loading Dock Area' # Note: Room
sheet['A25'].value = 'Do we need to order salt? If yes let the Chief Engineer know.'
sheet['A26'].value = 'Check brine level (should be at the indicating line).'
sheet['A27'].value = 'HP LL- 5 Ok (Fan is ok, If there\'s sweating of pipes check operation of HP)'
sheet['A28'].value = 'Mechanical / Chill Water Units Room' # Note: Room
sheet['A29'].value = 'Cooling Twr. Supply water meter reading.'
sheet['A30'].value = 'Write down the water softener gallon readings from each softener.'
# sheet['A31'].value = '' # Todo: merge with line 29
# sheet['A32'].value = '' # Todo: merge with line 29
sheet['A33'].value = 'Well meter reading'
sheet['A34'].value = 'HP LL- 4 Ok (Fan is ok, If there\'s sweating of pipes check operation of HP)'
sheet['A35'].value = 'CHWP #3'
sheet['A36'].value = 'CHWP #5'
sheet['A37'].value = 'CHWP #2'
sheet['A38'].value = 'CHWP #4'
sheet['A39'].value = 'CHWP #1'
sheet['A40'].value = 'CDW to CHW makeup' # Todo: two line 40's
# sheet['A40'].value = 'CHW'
sheet['A41'].value = 'CHW Filter PSI (23psi)'
sheet['A42'].value = 'Bladder tank pressure (<30)'
sheet['A43'].value = 'CHW Lakos Bag filter'
sheet['A44'].value = 'Condenser Supply Temp. East Side (68 – 85)'
sheet['A45'].value = 'CWP-6 VFD'
sheet['A46'].value = 'CWP-1 VFD'
sheet['A47'].value = 'CWP-4 VFD'
sheet['A48'].value = 'CWP-3 VFD'
sheet['A49'].value = 'CDWF VFD '
sheet['A50'].value = 'CWP-2 VFD'
sheet['A51'].value = 'CWP-5 VFD'
sheet['A52'].value = 'TWR Fan- 6 VFD'
sheet['A53'].value = 'TWR Fan- 5 VFD'
sheet['A54'].value = 'CHWR Header Temp East'
sheet['A55'].value = 'CHWR Temp (Bypass) East'
sheet['A56'].value = 'Lakos Separator (6psi)'
sheet['A57'].value = 'CHWS Temp East'
sheet['A58'].value = 'CHWP #3 VFD'
sheet['A59'].value = 'Well VFD'
sheet['A60'].value = 'CHWP #2 VFD'
sheet['A61'].value = 'CHWP #4 VFD'
sheet['A62'].value = 'CHWP #1 VFD'
sheet['A63'].value = 'CHWP #5 VFD'
sheet['A64'].value = 'EF #6 VFD'
sheet['A65'].value = 'Core Pump #1 VFD'
sheet['A66'].value = 'Core Pump #2 VFD'
sheet['A67'].value = 'HP LL- 3 Ok (Fan is ok, If there\'s sweating of pipes check operation of HP)'
sheet['A68'].value = 'Core Pump #2 (15 - 20 PSID)'
sheet['A69'].value = 'Core Pump #1 (15 - 20 PSID)'
sheet['A70'].value = 'Condenser Supply Temp. West Side (68 – 85)'
sheet['A71'].value = 'Chemical tanks level (above the order lines)'
sheet['A72'].value = 'Nalco controller'
sheet['A73'].value = 'Coupon Rack flow is between 4 – 6 GPM'
sheet['A74'].value = 'Tower #4 VFD'
sheet['A75'].value = 'Tower #3 VFD'
sheet['A76'].value = 'Tower #2 VFD'
sheet['A77'].value = 'Tower #1 VFD'
sheet['A78'].value = 'Notes:' # StretchGoal: Increase row height, delete comment rows below
print('04-05')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_engineer_values():
# Engineering Values
# Local Variables
center = Alignment(horizontal='center', vertical='center')
right = Alignment(horizontal='right', vertical='bottom')
columnEven = [2, 4, 6, 8]
columnOdd = [3, 5, 7, 9]
# Yes or No values
rows = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 22, 24]
# cells = []
for col in columnEven:
for row in rows:
sheet.cell(row=row, column=col).value = 'Yes / No'
sheet.cell(row=row, column=col).alignment = center
sheet.cell(row=row, column=col).font = Font(size = 8, i=True, color='000000')
# ✓ X values
rowsCheck = [6, 7, 8, 9, 10, 15, 16, 17, 25, 26]
for col in columnEven:
for row in rowsCheck:
# print(col, row)
sheet.cell(row=row, column=col).value = '✓ or X'
sheet.cell(row=row, column=col).alignment = center
sheet.cell(row=row, column=col).font = Font(size=9, color='DCDCDC')
''' # Hz
rowsHZ = [18]
for col in columnOdd:
for row in rowsHZ:
# print(col, row)
sheet.cell(row=row, column=col).value = 'Hz'
sheet.cell(row=row, column=col).alignment = right
sheet.cell(row=row, column=col).font = Font(size=8, color='000000') '''
print('04-06')
wb.save('Plymouth_Daily_Rounds.xlsx')
def pg04_colored_cells():
# Local Variables
rowsColor = [1, 12, 24, 28]
columnsColor = range(1, 10, 1)
for col in columnsColor:
for row in rowsColor:
# print(col, row)
sheet.cell(row=row, column=col).fill = PatternFill(fgColor='DCDCDC', fill_type = 'solid')
print('04-07')
wb.save('Plymouth_Daily_Rounds.xlsx') | 0.261897 | 0.259204 |
import random
import string
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.text import slugify
from .utils import upload_track_to, upload_image_to
class Genre(models.Model):
name = models.CharField(max_length=35)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(upload_to=upload_image_to, blank=True)
name = models.CharField(max_length=35)
slug = models.SlugField()
bio = models.TextField(max_length=2000, blank=True)
following = models.ManyToManyField('self', related_name='followers', symmetrical=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Profile, self).save(*args, **kwargs)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
name = 'user' + ''.join([random.choice(string.digits) for n in range(9)])
Profile.objects.create(user=instance, name=name)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Track(models.Model):
track = models.FileField(upload_to=upload_track_to)
title = models.CharField(max_length=100)
slug = models.SlugField()
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
description = models.TextField(max_length=2000, blank=True)
image = models.ImageField(upload_to=upload_image_to, blank=True)
uploader = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Track, self).save(*args, **kwargs)
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
track = models.ForeignKey(Track, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = (('user', 'track'), )
class Comment(models.Model):
text = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE)
track = models.ForeignKey(Track, on_delete=models.CASCADE)
parent_comment = models.ForeignKey('Comment', on_delete=models.CASCADE, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text | edmproducers/models.py | import random
import string
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.text import slugify
from .utils import upload_track_to, upload_image_to
class Genre(models.Model):
name = models.CharField(max_length=35)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(upload_to=upload_image_to, blank=True)
name = models.CharField(max_length=35)
slug = models.SlugField()
bio = models.TextField(max_length=2000, blank=True)
following = models.ManyToManyField('self', related_name='followers', symmetrical=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Profile, self).save(*args, **kwargs)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
name = 'user' + ''.join([random.choice(string.digits) for n in range(9)])
Profile.objects.create(user=instance, name=name)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Track(models.Model):
track = models.FileField(upload_to=upload_track_to)
title = models.CharField(max_length=100)
slug = models.SlugField()
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
description = models.TextField(max_length=2000, blank=True)
image = models.ImageField(upload_to=upload_image_to, blank=True)
uploader = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Track, self).save(*args, **kwargs)
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
track = models.ForeignKey(Track, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = (('user', 'track'), )
class Comment(models.Model):
text = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE)
track = models.ForeignKey(Track, on_delete=models.CASCADE)
parent_comment = models.ForeignKey('Comment', on_delete=models.CASCADE, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text | 0.516595 | 0.065515 |
import getopt
import os
import subprocess
import sys
import toml
# Set the path to the configuration file
CONFIG_PATH = ""
def decrypt(config, key):
# Call gocryptfs process
path_cipher = config[key]["cipher"]
path_plain = config[key]["plain"]
subprocess.run(["gocryptfs", path_cipher, path_plain])
def encrypt(config, key):
# Call unmount process
path_plain = config[key]["plain"]
subprocess.run(["umount", path_plain])
def mount():
# Get all mounted or decrypted folder paths
return subprocess.check_output(["mount"])
def auto(config, key):
# Encrypt or decrypt folder automatically
path = config[key]["plain"].encode()
if path in mount():
encrypt(config, key)
else:
decrypt(config, key)
def summary(config):
# Print complete summary
index = 0
print("{0:<8} {1:<14} {2:<10} {3:<26}".format("Index", "Key", "Mount", "Path"))
print("----------------------------------------------------------------")
for key, value in config.items():
index += 1
path = config[key]["plain"]
active = "on" if path.encode() in mount() else "off"
print("{0:<8} {1:<14} {2:<10} {3:<26}".format(index, key, active, value["plain"]))
def main():
try:
if not CONFIG_PATH:
raise ValueError("No path to configuration file set")
else:
config = toml.load(CONFIG_PATH)
if len(sys.argv) == 1:
summary(config)
else:
opts, args = getopt.getopt(sys.argv[1:], "d:e:s", ["decrypt", "encrypt", "summary"])
for opt, key in opts:
if opt in ("-e", "--encrypt"):
encrypt(config, key)
elif opt in ("-d", "--decrypt"):
decrypt(config, key)
elif opt in ("-s", "--summary"):
summary(config)
for key in args:
auto(config, key)
except KeyError as e:
print("Error@main: '" + e.args[0] + "' is not an encrypted folder!")
except ValueError as e:
print("Error@main: " + e.args[0] + "!")
except getopt.GetoptError as e:
print("Error@main: " + e.args[0] +"!")
if __name__ == "__main__":
main() | main.py | import getopt
import os
import subprocess
import sys
import toml
# Set the path to the configuration file
CONFIG_PATH = ""
def decrypt(config, key):
# Call gocryptfs process
path_cipher = config[key]["cipher"]
path_plain = config[key]["plain"]
subprocess.run(["gocryptfs", path_cipher, path_plain])
def encrypt(config, key):
# Call unmount process
path_plain = config[key]["plain"]
subprocess.run(["umount", path_plain])
def mount():
# Get all mounted or decrypted folder paths
return subprocess.check_output(["mount"])
def auto(config, key):
# Encrypt or decrypt folder automatically
path = config[key]["plain"].encode()
if path in mount():
encrypt(config, key)
else:
decrypt(config, key)
def summary(config):
# Print complete summary
index = 0
print("{0:<8} {1:<14} {2:<10} {3:<26}".format("Index", "Key", "Mount", "Path"))
print("----------------------------------------------------------------")
for key, value in config.items():
index += 1
path = config[key]["plain"]
active = "on" if path.encode() in mount() else "off"
print("{0:<8} {1:<14} {2:<10} {3:<26}".format(index, key, active, value["plain"]))
def main():
try:
if not CONFIG_PATH:
raise ValueError("No path to configuration file set")
else:
config = toml.load(CONFIG_PATH)
if len(sys.argv) == 1:
summary(config)
else:
opts, args = getopt.getopt(sys.argv[1:], "d:e:s", ["decrypt", "encrypt", "summary"])
for opt, key in opts:
if opt in ("-e", "--encrypt"):
encrypt(config, key)
elif opt in ("-d", "--decrypt"):
decrypt(config, key)
elif opt in ("-s", "--summary"):
summary(config)
for key in args:
auto(config, key)
except KeyError as e:
print("Error@main: '" + e.args[0] + "' is not an encrypted folder!")
except ValueError as e:
print("Error@main: " + e.args[0] + "!")
except getopt.GetoptError as e:
print("Error@main: " + e.args[0] +"!")
if __name__ == "__main__":
main() | 0.137532 | 0.103612 |
description = 'Vacuum gauges in the neutron guide'
devices = dict(
vac1 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 1 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.1e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac2 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 2 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.2e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac3 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 3 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.5e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac4 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 4 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.1e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac5 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 5 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.3e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac6 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 6 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.2e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac7 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 7 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.1e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
) | nicos_ess/cspec/setups/vacuum.py | description = 'Vacuum gauges in the neutron guide'
devices = dict(
vac1 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 1 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.1e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac2 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 2 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.2e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac3 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 3 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.5e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac4 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 4 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.1e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac5 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 5 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.3e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac6 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 6 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.2e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
vac7 = device('nicos.devices.generic.VirtualMotor',
description = 'Vacuum sensor 7 in neutron guide',
abslimits = (0, 1000),
pollinterval = 10,
maxage = 12,
unit = 'mbar',
curvalue = 1.1e-4,
fmtstr = '%.2e',
jitter = 1.e-5,
),
) | 0.646906 | 0.520984 |
import numpy as np
from scipy import ndimage
from time import clock
from pygeonet_rasterio import *
from pygeonet_vectorio import *
from pygeonet_plot import *
def Channel_Head_Definition(skeletonFromFlowAndCurvatureArray, geodesicDistanceArray):
# Locating end points
print 'Locating skeleton end points'
structure = np.ones((3, 3))
skeletonLabeledArray, skeletonNumConnectedComponentsList =\
ndimage.label(skeletonFromFlowAndCurvatureArray,
structure=structure)
"""
Through the histogram of skeletonNumElementsSortedList
(skeletonNumElementsList minus the maximum value which
corresponds to the largest connected element of the skeleton) we get the
size of the smallest elements of the skeleton, which will likely
correspond to small isolated convergent areas. These elements will be
excluded from the search of end points.
"""
print 'Counting the number of elements of each connected component'
lbls = np.arange(1, skeletonNumConnectedComponentsList+1)
skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,\
skeletonLabeledArray,\
lbls,np.count_nonzero,\
int,0)
skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
histarray,skeletonNumElementsHistogramX=np.histogram(\
skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList)-1],
int(np.floor(np.sqrt(len(skeletonNumElementsSortedList)))))
if defaults.doPlot == 1:
raster_plot(skeletonLabeledArray, 'Skeleton Labeled Array elements Array')
# Create skeleton gridded array
skeleton_label_set, label_indices = np.unique(skeletonLabeledArray, return_inverse=True)
skeletonNumElementsGriddedArray = np.array([skeletonLabeledArrayNumtuple[x-1] for x in skeleton_label_set])[label_indices].reshape(skeletonLabeledArray.shape)
if defaults.doPlot == 1:
raster_plot(skeletonNumElementsGriddedArray,
'Skeleton Num elements Array')
# Elements smaller than skeletonNumElementsThreshold are not considered in the
# skeletonEndPointsList detection
skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
print 'skeletonNumElementsThreshold',str(skeletonNumElementsThreshold)
# Scan the array for finding the channel heads
print 'Continuing to locate skeleton endpoints'
skeletonEndPointsList = []
nrows = skeletonFromFlowAndCurvatureArray.shape[0]
ncols = skeletonFromFlowAndCurvatureArray.shape[1]
for i in range(nrows):
for j in range(ncols):
if skeletonLabeledArray[i,j]!=0 \
and skeletonNumElementsGriddedArray[i,j]>=skeletonNumElementsThreshold:
# Define search box and ensure it fits within the DTM bounds
my = i-1
py = nrows-i
mx = j-1
px = ncols-j
xMinus = np.min([defaults.endPointSearchBoxSize, mx])
xPlus = np.min([defaults.endPointSearchBoxSize, px])
yMinus = np.min([defaults.endPointSearchBoxSize, my])
yPlus = np.min([defaults.endPointSearchBoxSize, py])
# Extract the geodesic distances geodesicDistanceArray for pixels within the search box
searchGeodesicDistanceBox = geodesicDistanceArray[i-yMinus:i+yPlus, j-xMinus:j+xPlus]
# Extract the skeleton labels for pixels within the search box
searchLabeledSkeletonBox = skeletonLabeledArray[i-yMinus:i+yPlus, j-xMinus:j+xPlus]
# Look in the search box for skeleton points with the same label
# and greater geodesic distance than the current pixel at (i,j)
# - if there are none, then add the current point as a channel head
v = searchLabeledSkeletonBox==skeletonLabeledArray[i,j]
v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i,j]
v3 = np.where(np.any(v1==True,axis=0))
if len(v3[0])==0:
skeletonEndPointsList.append([i,j])
# For loop ends here
skeletonEndPointsListArray = np.transpose(skeletonEndPointsList)
if defaults.doPlot == 1:
raster_point_plot(skeletonFromFlowAndCurvatureArray, skeletonEndPointsListArray,
'Skeleton Num elements Array with channel heads', cm.binary, 'ro')
if defaults.doPlot == 1:
raster_point_plot(geodesicDistanceArray, skeletonEndPointsListArray,
'Geodesic distance Array with channel heads', cm.coolwarm, 'ro')
xx = skeletonEndPointsListArray[1]
yy = skeletonEndPointsListArray[0]
# Write shapefiles of channel heads
write_drainage_nodes(xx,yy,"ChannelHead",
Parameters.pointFileName,Parameters.pointshapefileName)
# Write raster of channel heads
channelheadArray = np.zeros((geodesicDistanceArray.shape))
channelheadArray[skeletonEndPointsListArray[0],
skeletonEndPointsListArray[1]] = 1
outfilepath = Parameters.geonetResultsDir
demName = Parameters.demFileName
outfilename = demName.split('.')[0]+'_channelHeads.tif'
write_geotif_generic(channelheadArray,\
outfilepath,outfilename)
return xx, yy
def main():
outfilepath = Parameters.geonetResultsDir
demName = Parameters.demFileName.split('.')[0]
skeleton_filename = demName+'_skeleton.tif'
skeletonFromFlowAndCurvatureArray = read_geotif_generic(outfilepath, skeleton_filename)
geodesic_filename = demName+'_geodesicDistance.tif'
geodesicDistanceArray = read_geotif_generic(outfilepath, geodesic_filename)
Channel_Head_Definition(skeletonFromFlowAndCurvatureArray, geodesicDistanceArray)
if __name__ == '__main__':
t0 = clock()
main()
t1 = clock()
print "time taken to complete channel head definition:", t1-t0, " seconds" | pygeonet_channel_head_definition.py | import numpy as np
from scipy import ndimage
from time import clock
from pygeonet_rasterio import *
from pygeonet_vectorio import *
from pygeonet_plot import *
def Channel_Head_Definition(skeletonFromFlowAndCurvatureArray, geodesicDistanceArray):
# Locating end points
print 'Locating skeleton end points'
structure = np.ones((3, 3))
skeletonLabeledArray, skeletonNumConnectedComponentsList =\
ndimage.label(skeletonFromFlowAndCurvatureArray,
structure=structure)
"""
Through the histogram of skeletonNumElementsSortedList
(skeletonNumElementsList minus the maximum value which
corresponds to the largest connected element of the skeleton) we get the
size of the smallest elements of the skeleton, which will likely
correspond to small isolated convergent areas. These elements will be
excluded from the search of end points.
"""
print 'Counting the number of elements of each connected component'
lbls = np.arange(1, skeletonNumConnectedComponentsList+1)
skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,\
skeletonLabeledArray,\
lbls,np.count_nonzero,\
int,0)
skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
histarray,skeletonNumElementsHistogramX=np.histogram(\
skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList)-1],
int(np.floor(np.sqrt(len(skeletonNumElementsSortedList)))))
if defaults.doPlot == 1:
raster_plot(skeletonLabeledArray, 'Skeleton Labeled Array elements Array')
# Create skeleton gridded array
skeleton_label_set, label_indices = np.unique(skeletonLabeledArray, return_inverse=True)
skeletonNumElementsGriddedArray = np.array([skeletonLabeledArrayNumtuple[x-1] for x in skeleton_label_set])[label_indices].reshape(skeletonLabeledArray.shape)
if defaults.doPlot == 1:
raster_plot(skeletonNumElementsGriddedArray,
'Skeleton Num elements Array')
# Elements smaller than skeletonNumElementsThreshold are not considered in the
# skeletonEndPointsList detection
skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
print 'skeletonNumElementsThreshold',str(skeletonNumElementsThreshold)
# Scan the array for finding the channel heads
print 'Continuing to locate skeleton endpoints'
skeletonEndPointsList = []
nrows = skeletonFromFlowAndCurvatureArray.shape[0]
ncols = skeletonFromFlowAndCurvatureArray.shape[1]
for i in range(nrows):
for j in range(ncols):
if skeletonLabeledArray[i,j]!=0 \
and skeletonNumElementsGriddedArray[i,j]>=skeletonNumElementsThreshold:
# Define search box and ensure it fits within the DTM bounds
my = i-1
py = nrows-i
mx = j-1
px = ncols-j
xMinus = np.min([defaults.endPointSearchBoxSize, mx])
xPlus = np.min([defaults.endPointSearchBoxSize, px])
yMinus = np.min([defaults.endPointSearchBoxSize, my])
yPlus = np.min([defaults.endPointSearchBoxSize, py])
# Extract the geodesic distances geodesicDistanceArray for pixels within the search box
searchGeodesicDistanceBox = geodesicDistanceArray[i-yMinus:i+yPlus, j-xMinus:j+xPlus]
# Extract the skeleton labels for pixels within the search box
searchLabeledSkeletonBox = skeletonLabeledArray[i-yMinus:i+yPlus, j-xMinus:j+xPlus]
# Look in the search box for skeleton points with the same label
# and greater geodesic distance than the current pixel at (i,j)
# - if there are none, then add the current point as a channel head
v = searchLabeledSkeletonBox==skeletonLabeledArray[i,j]
v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i,j]
v3 = np.where(np.any(v1==True,axis=0))
if len(v3[0])==0:
skeletonEndPointsList.append([i,j])
# For loop ends here
skeletonEndPointsListArray = np.transpose(skeletonEndPointsList)
if defaults.doPlot == 1:
raster_point_plot(skeletonFromFlowAndCurvatureArray, skeletonEndPointsListArray,
'Skeleton Num elements Array with channel heads', cm.binary, 'ro')
if defaults.doPlot == 1:
raster_point_plot(geodesicDistanceArray, skeletonEndPointsListArray,
'Geodesic distance Array with channel heads', cm.coolwarm, 'ro')
xx = skeletonEndPointsListArray[1]
yy = skeletonEndPointsListArray[0]
# Write shapefiles of channel heads
write_drainage_nodes(xx,yy,"ChannelHead",
Parameters.pointFileName,Parameters.pointshapefileName)
# Write raster of channel heads
channelheadArray = np.zeros((geodesicDistanceArray.shape))
channelheadArray[skeletonEndPointsListArray[0],
skeletonEndPointsListArray[1]] = 1
outfilepath = Parameters.geonetResultsDir
demName = Parameters.demFileName
outfilename = demName.split('.')[0]+'_channelHeads.tif'
write_geotif_generic(channelheadArray,\
outfilepath,outfilename)
return xx, yy
def main():
outfilepath = Parameters.geonetResultsDir
demName = Parameters.demFileName.split('.')[0]
skeleton_filename = demName+'_skeleton.tif'
skeletonFromFlowAndCurvatureArray = read_geotif_generic(outfilepath, skeleton_filename)
geodesic_filename = demName+'_geodesicDistance.tif'
geodesicDistanceArray = read_geotif_generic(outfilepath, geodesic_filename)
Channel_Head_Definition(skeletonFromFlowAndCurvatureArray, geodesicDistanceArray)
if __name__ == '__main__':
t0 = clock()
main()
t1 = clock()
print "time taken to complete channel head definition:", t1-t0, " seconds" | 0.479504 | 0.521167 |
import json
from itertools import combinations
from math import log
import scipy.interpolate
from pymatgen.entries.computed_entries import ComputedEntry
from s4.data import open_data
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__maintainer__ = '<NAME>'
__all__ = [
'finite_dg_correction',
]
with open_data('Element_mass.json') as _f:
element_masses = json.load(_f)
with open_data('Element_G.json') as _f:
element_g = json.load(_f)
_interp_x, _interp_y = zip(*element_g.items())
_interp_x = list(map(float, _interp_x))
element_g_interp = {
el: scipy.interpolate.interp1d(_interp_x, [_t[el] for _t in _interp_y], kind='quadratic')
for el in _interp_y[0]
}
def finite_dg_correction(mp_entry: ComputedEntry, temperature: float, dhf: float) -> float:
"""
Compute finite-temperature :math:`dG(T)` correction using Chris Bartel's method,
see [Chris2018]_.
:param mp_entry: The entry to a Materials Project entry, which must contain the
volume of the structure.
:param temperature: Finite temperature for which :math:`dG(T)` is approximated.
:param dhf: Zero-temperature formation enthalpy.
:returns: Interpolated gibbs energy of formation at finite temperature.
.. [Chris2018] Bartel, <NAME>., et al. "Physical descriptor for the Gibbs energy
of inorganic crystalline solids and temperature-dependent materials chemistry."
Nature communications 9.1 (2018): 1-10.
"""
comp = mp_entry.composition
natom = sum(comp.values())
reduced_mass_sum = 0
for element_a, element_b in combinations(comp.keys(), 2):
element_a, element_b = element_a.symbol, element_b.symbol
reduced_mass = element_masses[element_a] * element_masses[element_b] / \
(element_masses[element_a] + element_masses[element_b])
weight = comp[element_a] + comp[element_b]
reduced_mass_sum += weight * reduced_mass
reduced_mass_sum /= (len(comp) - 1) * natom
vol = mp_entry.data['volume'] / natom
gdelta = (
(-2.48e-4 * log(vol) - 8.94e-5 * reduced_mass_sum / vol) * temperature
+ 0.181 * log(temperature) - 0.882
)
refs = 0
for element, fraction in comp.items():
refs += element_g_interp[element.symbol](temperature) * fraction / natom
return dhf + gdelta - refs | s4/thermo/calc/finite_g.py | import json
from itertools import combinations
from math import log
import scipy.interpolate
from pymatgen.entries.computed_entries import ComputedEntry
from s4.data import open_data
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__maintainer__ = '<NAME>'
__all__ = [
'finite_dg_correction',
]
with open_data('Element_mass.json') as _f:
element_masses = json.load(_f)
with open_data('Element_G.json') as _f:
element_g = json.load(_f)
_interp_x, _interp_y = zip(*element_g.items())
_interp_x = list(map(float, _interp_x))
element_g_interp = {
el: scipy.interpolate.interp1d(_interp_x, [_t[el] for _t in _interp_y], kind='quadratic')
for el in _interp_y[0]
}
def finite_dg_correction(mp_entry: ComputedEntry, temperature: float, dhf: float) -> float:
"""
Compute finite-temperature :math:`dG(T)` correction using Chris Bartel's method,
see [Chris2018]_.
:param mp_entry: The entry to a Materials Project entry, which must contain the
volume of the structure.
:param temperature: Finite temperature for which :math:`dG(T)` is approximated.
:param dhf: Zero-temperature formation enthalpy.
:returns: Interpolated gibbs energy of formation at finite temperature.
.. [Chris2018] Bartel, <NAME>., et al. "Physical descriptor for the Gibbs energy
of inorganic crystalline solids and temperature-dependent materials chemistry."
Nature communications 9.1 (2018): 1-10.
"""
comp = mp_entry.composition
natom = sum(comp.values())
reduced_mass_sum = 0
for element_a, element_b in combinations(comp.keys(), 2):
element_a, element_b = element_a.symbol, element_b.symbol
reduced_mass = element_masses[element_a] * element_masses[element_b] / \
(element_masses[element_a] + element_masses[element_b])
weight = comp[element_a] + comp[element_b]
reduced_mass_sum += weight * reduced_mass
reduced_mass_sum /= (len(comp) - 1) * natom
vol = mp_entry.data['volume'] / natom
gdelta = (
(-2.48e-4 * log(vol) - 8.94e-5 * reduced_mass_sum / vol) * temperature
+ 0.181 * log(temperature) - 0.882
)
refs = 0
for element, fraction in comp.items():
refs += element_g_interp[element.symbol](temperature) * fraction / natom
return dhf + gdelta - refs | 0.744471 | 0.263671 |
import unicodedata
combining = set()
col_widths = [7, 54, 20]
rows = [['MacRom', 'UTF-8 NFC', 'UTF-8 NFD']]
for i in range(256):
rows.append(['[%02X]' % i])
for form in ('NFC', 'NFD'):
unistr = bytes([i]).decode('mac_roman')
unistr = unicodedata.normalize(form, unistr)
codepoints = []
if len(unistr) > 1: combining.add(unistr)
for cp in unistr:
utf8hex = cp.encode('utf-8').hex().upper()
name = unicodedata.name(cp, 'U+%04X' % ord(cp))
codepoints.append(f'[{utf8hex}] {name}')
rows[-1].append(' + '.join(codepoints))
for row in rows:
accum = ''
for wid, col in zip(col_widths, row):
accum += (col + ' ').ljust(wid)
accum = accum.rstrip()
print(accum)
thelist = {}
for pair in combining:
thelist.setdefault(pair[1], []).append(pair[0])
for combining, bases in thelist.items():
print(f'case {hex(ord(combining))}: // {unicodedata.name(combining)}')
print(' switch mac[-1] {')
for base in sorted(bases, key=ord):
better = unicodedata.normalize('NFC', base + combining).encode('mac_roman')[0]
print(f' case \'{base}\':')
print(f' mac [-1] = {hex(better)}')
print(' default:')
print(' goto fail')
print(' }')
print(' continue')
transtable = [
0x0000, 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700,
0x0800, 0x0900, 0x0a00, 0x0b00, 0x0c00, 0x0d00, 0x0e00, 0x0f00,
0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500, 0x1600, 0x1700,
0x1800, 0x1900, 0x1a00, 0x1b00, 0x1c00, 0x1d00, 0x1e00, 0x1f00,
0x2000, 0x2100, 0x2200, 0x2300, 0x2400, 0x2500, 0x2600, 0x2700,
0x2800, 0x2900, 0x2a00, 0x2b00, 0x2c00, 0x2d00, 0x2e00, 0x2f00,
0x3000, 0x3100, 0x3200, 0x3300, 0x3400, 0x3500, 0x3600, 0x3700,
0x3800, 0x3900, 0x3a00, 0x3b00, 0x3c00, 0x3d00, 0x3e00, 0x3f00,
0x4000, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700,
0x4800, 0x4900, 0x4a00, 0x4b00, 0x4c00, 0x4d00, 0x4e00, 0x4f00,
0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700,
0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, 0x5e00, 0x5f00,
0x6100, 0x4180, 0x4280, 0x4380, 0x4480, 0x4580, 0x4680, 0x4780,
0x4880, 0x4980, 0x4a80, 0x4b80, 0x4c80, 0x4d80, 0x4e80, 0x4f80,
0x5080, 0x5180, 0x5280, 0x5380, 0x5480, 0x5580, 0x5680, 0x5780,
0x5880, 0x5980, 0x5a80, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00,
0x4108, 0x410c, 0x4310, 0x4502, 0x4e0a, 0x4f08, 0x5508, 0x4182,
0x4184, 0x4186, 0x4188, 0x418a, 0x418c, 0x4390, 0x4582, 0x4584,
0x4586, 0x4588, 0x4982, 0x4984, 0x4986, 0x4988, 0x4e8a, 0x4f82,
0x4f84, 0x4f86, 0x4f88, 0x4f8a, 0x5582, 0x5584, 0x5586, 0x5588,
0xa000, 0xa100, 0xa200, 0xa300, 0xa400, 0xa500, 0xa600, 0x5382,
0xa800, 0xa900, 0xaa00, 0xab00, 0xac00, 0xad00, 0x4114, 0x4f0e,
0xb000, 0xb100, 0xb200, 0xb300, 0xb400, 0xb500, 0xb600, 0xb700,
0xb800, 0xb900, 0xba00, 0x4192, 0x4f92, 0xbd00, 0x4194, 0x4f8e,
0xc000, 0xc100, 0xc200, 0xc300, 0xc400, 0xc500, 0xc600, 0x2206,
0x2208, 0xc900, 0x2000, 0x4104, 0x410a, 0x4f0a, 0x4f14, 0x4f94,
0xd000, 0xd100, 0x2202, 0x2204, 0x2702, 0x2704, 0xd600, 0xd700,
0x5988, 0xd900, 0xda00, 0xdb00, 0xdc00, 0xdd00, 0xde00, 0xdf00,
0xe000, 0xe100, 0xe200, 0xe300, 0xe400, 0xe500, 0xe600, 0xe700,
0xe800, 0xe900, 0xea00, 0xeb00, 0xec00, 0xed00, 0xee00, 0xef00,
0xf000, 0xf100, 0xf200, 0xf300, 0xf400, 0xf500, 0xf600, 0xf700,
0xf800, 0xf900, 0xfa00, 0xfb00, 0xfc00, 0xfd00, 0xfe00, 0xff00,
]
idxlist = sorted(transtable)
print(['0x%02x' % (idxlist.index(n)) for n in transtable]) | MacRomanExploration.py |
import unicodedata
combining = set()
col_widths = [7, 54, 20]
rows = [['MacRom', 'UTF-8 NFC', 'UTF-8 NFD']]
for i in range(256):
rows.append(['[%02X]' % i])
for form in ('NFC', 'NFD'):
unistr = bytes([i]).decode('mac_roman')
unistr = unicodedata.normalize(form, unistr)
codepoints = []
if len(unistr) > 1: combining.add(unistr)
for cp in unistr:
utf8hex = cp.encode('utf-8').hex().upper()
name = unicodedata.name(cp, 'U+%04X' % ord(cp))
codepoints.append(f'[{utf8hex}] {name}')
rows[-1].append(' + '.join(codepoints))
for row in rows:
accum = ''
for wid, col in zip(col_widths, row):
accum += (col + ' ').ljust(wid)
accum = accum.rstrip()
print(accum)
thelist = {}
for pair in combining:
thelist.setdefault(pair[1], []).append(pair[0])
for combining, bases in thelist.items():
print(f'case {hex(ord(combining))}: // {unicodedata.name(combining)}')
print(' switch mac[-1] {')
for base in sorted(bases, key=ord):
better = unicodedata.normalize('NFC', base + combining).encode('mac_roman')[0]
print(f' case \'{base}\':')
print(f' mac [-1] = {hex(better)}')
print(' default:')
print(' goto fail')
print(' }')
print(' continue')
transtable = [
0x0000, 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700,
0x0800, 0x0900, 0x0a00, 0x0b00, 0x0c00, 0x0d00, 0x0e00, 0x0f00,
0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500, 0x1600, 0x1700,
0x1800, 0x1900, 0x1a00, 0x1b00, 0x1c00, 0x1d00, 0x1e00, 0x1f00,
0x2000, 0x2100, 0x2200, 0x2300, 0x2400, 0x2500, 0x2600, 0x2700,
0x2800, 0x2900, 0x2a00, 0x2b00, 0x2c00, 0x2d00, 0x2e00, 0x2f00,
0x3000, 0x3100, 0x3200, 0x3300, 0x3400, 0x3500, 0x3600, 0x3700,
0x3800, 0x3900, 0x3a00, 0x3b00, 0x3c00, 0x3d00, 0x3e00, 0x3f00,
0x4000, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700,
0x4800, 0x4900, 0x4a00, 0x4b00, 0x4c00, 0x4d00, 0x4e00, 0x4f00,
0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700,
0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, 0x5e00, 0x5f00,
0x6100, 0x4180, 0x4280, 0x4380, 0x4480, 0x4580, 0x4680, 0x4780,
0x4880, 0x4980, 0x4a80, 0x4b80, 0x4c80, 0x4d80, 0x4e80, 0x4f80,
0x5080, 0x5180, 0x5280, 0x5380, 0x5480, 0x5580, 0x5680, 0x5780,
0x5880, 0x5980, 0x5a80, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00,
0x4108, 0x410c, 0x4310, 0x4502, 0x4e0a, 0x4f08, 0x5508, 0x4182,
0x4184, 0x4186, 0x4188, 0x418a, 0x418c, 0x4390, 0x4582, 0x4584,
0x4586, 0x4588, 0x4982, 0x4984, 0x4986, 0x4988, 0x4e8a, 0x4f82,
0x4f84, 0x4f86, 0x4f88, 0x4f8a, 0x5582, 0x5584, 0x5586, 0x5588,
0xa000, 0xa100, 0xa200, 0xa300, 0xa400, 0xa500, 0xa600, 0x5382,
0xa800, 0xa900, 0xaa00, 0xab00, 0xac00, 0xad00, 0x4114, 0x4f0e,
0xb000, 0xb100, 0xb200, 0xb300, 0xb400, 0xb500, 0xb600, 0xb700,
0xb800, 0xb900, 0xba00, 0x4192, 0x4f92, 0xbd00, 0x4194, 0x4f8e,
0xc000, 0xc100, 0xc200, 0xc300, 0xc400, 0xc500, 0xc600, 0x2206,
0x2208, 0xc900, 0x2000, 0x4104, 0x410a, 0x4f0a, 0x4f14, 0x4f94,
0xd000, 0xd100, 0x2202, 0x2204, 0x2702, 0x2704, 0xd600, 0xd700,
0x5988, 0xd900, 0xda00, 0xdb00, 0xdc00, 0xdd00, 0xde00, 0xdf00,
0xe000, 0xe100, 0xe200, 0xe300, 0xe400, 0xe500, 0xe600, 0xe700,
0xe800, 0xe900, 0xea00, 0xeb00, 0xec00, 0xed00, 0xee00, 0xef00,
0xf000, 0xf100, 0xf200, 0xf300, 0xf400, 0xf500, 0xf600, 0xf700,
0xf800, 0xf900, 0xfa00, 0xfb00, 0xfc00, 0xfd00, 0xfe00, 0xff00,
]
idxlist = sorted(transtable)
print(['0x%02x' % (idxlist.index(n)) for n in transtable]) | 0.094278 | 0.503113 |
import math
import time
t1 = time.time()
size = 2000
sizet = size*size
s = [0]*sizet
for k in range(1,56):
s[k-1] = (100003-200003*k+300007*k*k*k)%1000000-500000
for k in range(56,4000001):
s[k-1] = (s[k-1-24]+s[k-1-55]+1000000)%1000000-500000
#print(s[10-1],s[100-1])
'''
# test case
s = [-2,5,3,2,9,-6,5,1,3,2,7,3,-1,8,-4,8]
'''
def getrc(n):
return [n//size,n%size]
def ton(r,c):
return r*size+c
# 1-dimension solution
def getla(tset):
maxSoFar = 0
maxToHere = 0
for i in tset:
maxToHere = max(maxToHere+i,0)
maxSoFar = max(maxToHere,maxSoFar)
return maxSoFar
la = 0
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(size):
maxToHere = max(maxToHere+s[ton(i,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for j in range(size):
maxSoFar = 0
maxToHere = 0
for i in range(size):
maxToHere = max(maxToHere+s[ton(i,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(i+1):
maxToHere = max(maxToHere+s[ton(i-j,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(1,size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(size-1-j,i+j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(j,i+j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(1,size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(i+j,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
print(la)
print("time:",time.time()-t1) | Problem 001-150 Python/pb149.py | import math
import time
t1 = time.time()
size = 2000
sizet = size*size
s = [0]*sizet
for k in range(1,56):
s[k-1] = (100003-200003*k+300007*k*k*k)%1000000-500000
for k in range(56,4000001):
s[k-1] = (s[k-1-24]+s[k-1-55]+1000000)%1000000-500000
#print(s[10-1],s[100-1])
'''
# test case
s = [-2,5,3,2,9,-6,5,1,3,2,7,3,-1,8,-4,8]
'''
def getrc(n):
return [n//size,n%size]
def ton(r,c):
return r*size+c
# 1-dimension solution
def getla(tset):
maxSoFar = 0
maxToHere = 0
for i in tset:
maxToHere = max(maxToHere+i,0)
maxSoFar = max(maxToHere,maxSoFar)
return maxSoFar
la = 0
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(size):
maxToHere = max(maxToHere+s[ton(i,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for j in range(size):
maxSoFar = 0
maxToHere = 0
for i in range(size):
maxToHere = max(maxToHere+s[ton(i,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(i+1):
maxToHere = max(maxToHere+s[ton(i-j,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(1,size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(size-1-j,i+j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(j,i+j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(1,size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(i+j,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
print(la)
print("time:",time.time()-t1) | 0.07107 | 0.239161 |
from datetime import datetime
from flask import request
from flask_restx import Resource
import json
from io import StringIO
import boto3
import pandas as pd
import numpy as np
from .security import require_auth
from . import api_rest
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
@api_rest.route('/resource/<string:resource_id>')
class ResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
def post(self, resource_id):
json_payload = request.json
return {'timestamp': json_payload}, 201
@api_rest.route('/secure-resource/<string:resource_id>')
class SecureResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
@api_rest.route('/price-elasticity/roots', defaults={'ticket_type': None, 'season': None, 'workday': None, 'intercept': 0})
@api_rest.route('/price-elasticity/roots/<string:ticket_type>/<string:season>/<string:workday>/<int:intercept>/<string:pc>/<string:qt>')
class PriceElasticiyRoots(Resource):
def get(self, ticket_type, season, workday, intercept, pc, qt):
from app.price_elasticity import price
from app.model.table import Config
data = price.get_data(ticket_type=ticket_type,
season=season, workday=workday)
res = []
for t in data.ticket_type.unique():
for s in data.season.unique():
for w in data.workday.unique():
print([t, s, w, intercept])
df = data[data.ticket_type == t]
df = df[df.workday == w]
df = df[df.season == s]
if not df.empty:
try:
bins = int(Config.query.filter_by(
config_name='pe_bins').first().config_value)
df = price.prep_data(df, bins)
model = price.get_model(df, bool(int(intercept)))
print(model.summary())
if pc == 'all' and qt == 'all':
p, q = price.get_extrenum(model)
else:
if qt != 'all':
try:
q = float(qt)
print(qt)
a = model.params.get(
'np.square(average_price)')
b = model.params.get('average_price')
c = model.params.get('Intercept')
if c is None:
c = 0
c = c - np.log(q)
x1, x2 = price.get_roots(a, b, c)
if np.isnan(x1) or np.isnan(x2):
p = 'Количество больше экстренума модели'
else:
p = f'{round(x1, 2)} - {round(x2,2 )}'
except Exception as e:
print(e)
q = qt
p = 'Ошибка'
elif pc != 'all':
try:
pc = float(pc)
p = pc
q = round(
float(np.e ** model.predict({'average_price': p})), 2)
except:
p = pc
q = 'Ошибка'
except Exception:
p, q = ('Ошибка', 'Ошибка')
res.append({'type': t, 'season': s, 'workday': str(w),
'p': str(p), 'q': str(q), 'adj_r': str(getattr(model, 'rsquared_adj', 'Ошибка'))})
else:
res.append({'type': t, 'season': s, 'workday': str(w),
'p': 'Недостаточно данных',
'q': 'Недостатчно данных',
'adj_r': 'Недостаточно данных'})
return {'status': 'OK', 'message': res}
@api_rest.route('/price-elasticity/data')
class PriceElasticityData(Resource):
def post(self):
print('Uploading the file to s3')
from app.model.table import Config
from app import db
f = request.files['file']
df = pd.read_excel(f)
# get config values (prob should do single table read, but the table is not big
# enough to see significant performance increase)
high = Config.query.filter_by(
config_name='pe_season_high').first().config_value
weak = Config.query.filter_by(
config_name='pe_season_weak').first().config_value
price_col = Config.query.filter_by(
config_name='pe_price_column').first().config_value
type_col = Config.query.filter_by(
config_name='pe_ticket_type_column').first().config_value
quantity_col = Config.query.filter_by(
config_name='pe_quantity_column').first().config_value
date_col = Config.query.filter_by(
config_name='pe_date_column').first().config_value
bucket_name = Config.query.filter_by(
config_name='bucket_name').first().config_value
df = df[[type_col, date_col, price_col, quantity_col]]
pe_season_high = json.loads(high)
pe_season_weak = json.loads(weak)
def check_season(x):
if x.month in pe_season_high:
return 'high'
elif x.month in pe_season_weak:
return 'mid'
else:
return 'low'
df['season'] = df[date_col].apply(check_season)
df['day_week'] = df[date_col].apply(lambda x: x.weekday())
df['workday'] = df.day_week.apply(lambda x: 1 if x < 5 else 0)
df['year'] = df[date_col].apply(lambda x: x.year)
df.columns = ['ticket_type', 'date', 'price',
'qt', 'season', 'day_week', 'workday', 'year']
# save data on s3 in csv format
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=False)
s3_resource = boto3.resource('s3')
# deleting file
try:
s3_resource.Object(bucket_name, 'pe_data.csv').delete()
except:
print('File did not exist')
s3_bucket = s3_resource.Bucket(bucket_name)
s3_bucket.put_object(
Key='pe_data.csv', Body=csv_buffer.getvalue(), ACL='public-read', )
print('DONE!')
return {'status': 'OK', 'message': 'OK'}
@api_rest.route('/price-elasticity/ticket-types')
class PriceElasticityTicketTypes(Resource):
def get(self):
from app.price_elasticity import price
df = price.get_data('all', 'all', 'all')
ticket_types = df.ticket_type.unique()
return {'status': 'OK', 'message': list(ticket_types)}
@api_rest.route('/price-elasticity/config')
class PriceElasticityConfig(Resource):
def get(self):
from app.model.table import Config
configs = Config.query.all()
config_values = {}
for c in configs:
config_values[c.config_name] = c.config_value
return {'status': 'OK', 'message': config_values}
def post(self):
from app.model.table import Config
print(request.get_json())
payload = request.json
for key, value in payload.items():
c = Config.query.filter_by(config_name=key).first()
c.config_value = value
c.update()
return {'status': 'OK', 'message': {}} | app/api/resources.py | from datetime import datetime
from flask import request
from flask_restx import Resource
import json
from io import StringIO
import boto3
import pandas as pd
import numpy as np
from .security import require_auth
from . import api_rest
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
@api_rest.route('/resource/<string:resource_id>')
class ResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
def post(self, resource_id):
json_payload = request.json
return {'timestamp': json_payload}, 201
@api_rest.route('/secure-resource/<string:resource_id>')
class SecureResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
@api_rest.route('/price-elasticity/roots', defaults={'ticket_type': None, 'season': None, 'workday': None, 'intercept': 0})
@api_rest.route('/price-elasticity/roots/<string:ticket_type>/<string:season>/<string:workday>/<int:intercept>/<string:pc>/<string:qt>')
class PriceElasticiyRoots(Resource):
def get(self, ticket_type, season, workday, intercept, pc, qt):
from app.price_elasticity import price
from app.model.table import Config
data = price.get_data(ticket_type=ticket_type,
season=season, workday=workday)
res = []
for t in data.ticket_type.unique():
for s in data.season.unique():
for w in data.workday.unique():
print([t, s, w, intercept])
df = data[data.ticket_type == t]
df = df[df.workday == w]
df = df[df.season == s]
if not df.empty:
try:
bins = int(Config.query.filter_by(
config_name='pe_bins').first().config_value)
df = price.prep_data(df, bins)
model = price.get_model(df, bool(int(intercept)))
print(model.summary())
if pc == 'all' and qt == 'all':
p, q = price.get_extrenum(model)
else:
if qt != 'all':
try:
q = float(qt)
print(qt)
a = model.params.get(
'np.square(average_price)')
b = model.params.get('average_price')
c = model.params.get('Intercept')
if c is None:
c = 0
c = c - np.log(q)
x1, x2 = price.get_roots(a, b, c)
if np.isnan(x1) or np.isnan(x2):
p = 'Количество больше экстренума модели'
else:
p = f'{round(x1, 2)} - {round(x2,2 )}'
except Exception as e:
print(e)
q = qt
p = 'Ошибка'
elif pc != 'all':
try:
pc = float(pc)
p = pc
q = round(
float(np.e ** model.predict({'average_price': p})), 2)
except:
p = pc
q = 'Ошибка'
except Exception:
p, q = ('Ошибка', 'Ошибка')
res.append({'type': t, 'season': s, 'workday': str(w),
'p': str(p), 'q': str(q), 'adj_r': str(getattr(model, 'rsquared_adj', 'Ошибка'))})
else:
res.append({'type': t, 'season': s, 'workday': str(w),
'p': 'Недостаточно данных',
'q': 'Недостатчно данных',
'adj_r': 'Недостаточно данных'})
return {'status': 'OK', 'message': res}
@api_rest.route('/price-elasticity/data')
class PriceElasticityData(Resource):
def post(self):
print('Uploading the file to s3')
from app.model.table import Config
from app import db
f = request.files['file']
df = pd.read_excel(f)
# get config values (prob should do single table read, but the table is not big
# enough to see significant performance increase)
high = Config.query.filter_by(
config_name='pe_season_high').first().config_value
weak = Config.query.filter_by(
config_name='pe_season_weak').first().config_value
price_col = Config.query.filter_by(
config_name='pe_price_column').first().config_value
type_col = Config.query.filter_by(
config_name='pe_ticket_type_column').first().config_value
quantity_col = Config.query.filter_by(
config_name='pe_quantity_column').first().config_value
date_col = Config.query.filter_by(
config_name='pe_date_column').first().config_value
bucket_name = Config.query.filter_by(
config_name='bucket_name').first().config_value
df = df[[type_col, date_col, price_col, quantity_col]]
pe_season_high = json.loads(high)
pe_season_weak = json.loads(weak)
def check_season(x):
if x.month in pe_season_high:
return 'high'
elif x.month in pe_season_weak:
return 'mid'
else:
return 'low'
df['season'] = df[date_col].apply(check_season)
df['day_week'] = df[date_col].apply(lambda x: x.weekday())
df['workday'] = df.day_week.apply(lambda x: 1 if x < 5 else 0)
df['year'] = df[date_col].apply(lambda x: x.year)
df.columns = ['ticket_type', 'date', 'price',
'qt', 'season', 'day_week', 'workday', 'year']
# save data on s3 in csv format
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=False)
s3_resource = boto3.resource('s3')
# deleting file
try:
s3_resource.Object(bucket_name, 'pe_data.csv').delete()
except:
print('File did not exist')
s3_bucket = s3_resource.Bucket(bucket_name)
s3_bucket.put_object(
Key='pe_data.csv', Body=csv_buffer.getvalue(), ACL='public-read', )
print('DONE!')
return {'status': 'OK', 'message': 'OK'}
@api_rest.route('/price-elasticity/ticket-types')
class PriceElasticityTicketTypes(Resource):
def get(self):
from app.price_elasticity import price
df = price.get_data('all', 'all', 'all')
ticket_types = df.ticket_type.unique()
return {'status': 'OK', 'message': list(ticket_types)}
@api_rest.route('/price-elasticity/config')
class PriceElasticityConfig(Resource):
def get(self):
from app.model.table import Config
configs = Config.query.all()
config_values = {}
for c in configs:
config_values[c.config_name] = c.config_value
return {'status': 'OK', 'message': config_values}
def post(self):
from app.model.table import Config
print(request.get_json())
payload = request.json
for key, value in payload.items():
c = Config.query.filter_by(config_name=key).first()
c.config_value = value
c.update()
return {'status': 'OK', 'message': {}} | 0.460046 | 0.144209 |
"""Create a new CA pool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.privateca import base as privateca_base
from googlecloudsdk.api_lib.privateca import request_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.privateca import flags_v1
from googlecloudsdk.command_lib.privateca import operations
from googlecloudsdk.command_lib.privateca import resource_args
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
r"""Create a new CA Pool.
## EXAMPLES
To create a CA pool in the dev ops tier:
$ {command} my-pool --location=us-west1 \
--tier=devops
To create a CA pool and restrict what it can issue:
$ {command} my-pool --location=us-west1 \
--issuance-policy=policy.yaml
To create a CA pool that doesn't publicly publish CA certificates and CRLs:
$ {command} my-pool --location=us-west1 \
--issuance-policy=policy.yaml \
--no-publish-ca-cert \
--no-publish-crl
"""
@staticmethod
def Args(parser):
resource_args.AddCaPoolPositionalResourceArg(parser, 'to create')
flags_v1.AddTierFlag(parser)
flags_v1.AddPublishCrlFlag(parser, use_update_help_text=True)
flags_v1.AddPublishCaCertFlag(parser, use_update_help_text=True)
flags_v1.AddCaPoolIssuancePolicyFlag(parser)
labels_util.AddCreateLabelsFlags(parser)
def Run(self, args):
client = privateca_base.GetClientInstance('v1')
messages = privateca_base.GetMessagesModule('v1')
ca_pool_ref = args.CONCEPTS.ca_pool.Parse()
issuance_policy = flags_v1.ParseIssuancePolicy(args)
publishing_options = flags_v1.ParsePublishingOptions(args)
tier = flags_v1.ParseTierFlag(args)
labels = labels_util.ParseCreateArgs(args, messages.CaPool.LabelsValue)
new_ca_pool = messages.CaPool(
issuancePolicy=issuance_policy,
publishingOptions=publishing_options,
tier=tier,
labels=labels)
operation = client.projects_locations_caPools.Create(
messages.PrivatecaProjectsLocationsCaPoolsCreateRequest(
caPool=new_ca_pool,
caPoolId=ca_pool_ref.Name(),
parent=ca_pool_ref.Parent().RelativeName(),
requestId=request_utils.GenerateRequestId()))
ca_pool_response = operations.Await(
operation, 'Creating CA Pool.', api_version='v1')
ca_pool = operations.GetMessageFromResponse(ca_pool_response,
messages.CaPool)
log.status.Print('Created CA Pool [{}].'.format(ca_pool.name)) | lib/surface/privateca/pools/create.py | """Create a new CA pool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.privateca import base as privateca_base
from googlecloudsdk.api_lib.privateca import request_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.privateca import flags_v1
from googlecloudsdk.command_lib.privateca import operations
from googlecloudsdk.command_lib.privateca import resource_args
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
r"""Create a new CA Pool.
## EXAMPLES
To create a CA pool in the dev ops tier:
$ {command} my-pool --location=us-west1 \
--tier=devops
To create a CA pool and restrict what it can issue:
$ {command} my-pool --location=us-west1 \
--issuance-policy=policy.yaml
To create a CA pool that doesn't publicly publish CA certificates and CRLs:
$ {command} my-pool --location=us-west1 \
--issuance-policy=policy.yaml \
--no-publish-ca-cert \
--no-publish-crl
"""
@staticmethod
def Args(parser):
resource_args.AddCaPoolPositionalResourceArg(parser, 'to create')
flags_v1.AddTierFlag(parser)
flags_v1.AddPublishCrlFlag(parser, use_update_help_text=True)
flags_v1.AddPublishCaCertFlag(parser, use_update_help_text=True)
flags_v1.AddCaPoolIssuancePolicyFlag(parser)
labels_util.AddCreateLabelsFlags(parser)
def Run(self, args):
client = privateca_base.GetClientInstance('v1')
messages = privateca_base.GetMessagesModule('v1')
ca_pool_ref = args.CONCEPTS.ca_pool.Parse()
issuance_policy = flags_v1.ParseIssuancePolicy(args)
publishing_options = flags_v1.ParsePublishingOptions(args)
tier = flags_v1.ParseTierFlag(args)
labels = labels_util.ParseCreateArgs(args, messages.CaPool.LabelsValue)
new_ca_pool = messages.CaPool(
issuancePolicy=issuance_policy,
publishingOptions=publishing_options,
tier=tier,
labels=labels)
operation = client.projects_locations_caPools.Create(
messages.PrivatecaProjectsLocationsCaPoolsCreateRequest(
caPool=new_ca_pool,
caPoolId=ca_pool_ref.Name(),
parent=ca_pool_ref.Parent().RelativeName(),
requestId=request_utils.GenerateRequestId()))
ca_pool_response = operations.Await(
operation, 'Creating CA Pool.', api_version='v1')
ca_pool = operations.GetMessageFromResponse(ca_pool_response,
messages.CaPool)
log.status.Print('Created CA Pool [{}].'.format(ca_pool.name)) | 0.727104 | 0.120983 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...core.models import TimeStampedModel
from ...core.utils.slug import slugify_uniquely_for_queryset
from ..choices import RANK_OPTIONS
from ..mixins import DueDateMixin
from .. import models as proj_models
class IssueStatus(models.Model):
""" Available status options for :model:Issue """
name = models.CharField(
max_length=100, null=False, blank=False,
verbose_name=_("name"))
slug = models.SlugField(
max_length=100, null=False, blank=True,
verbose_name=_("slug"))
order = models.IntegerField(
default=10, null=False, blank=False,
verbose_name=_("order"))
is_closed = models.BooleanField(
default=False, null=False, blank=True,
verbose_name=_("is closed"))
color = models.CharField(
max_length=20, null=False, blank=False, default="#999999",
verbose_name=_("color"))
project = models.ForeignKey(
"Project", on_delete=models.CASCADE,
null=False, blank=False,
related_name="issue_statuses", verbose_name=_("project"))
class Meta:
verbose_name = "issue status"
verbose_name_plural = "issue statuses"
ordering = ["project", "order", "name"]
unique_together = (("project", "name"), ("project", "slug"))
def __str__(self):
return self.name
def save(self, *args, **kwargs):
qs = self.project.issue_statuses
if self.id:
qs = qs.exclude(id=self.id)
self.slug = slugify_uniquely_for_queryset(self.name, qs)
return super().save(*args, **kwargs)
class IssueProgress(TimeStampedModel, models.Model):
""" Model containing updates on :model:Issue resolution """
issue = models.ForeignKey(
'Issue', on_delete=models.CASCADE,
related_name='progress_notes', )
progress = models.TextField(
null=True, blank=True,
help_text=_('Update on issue resolution (may not be edited later).')
)
class Meta:
ordering = ['-created']
class Issue(TimeStampedModel, DueDateMixin, models.Model):
""" Stores information about an Issue """
project = models.ForeignKey(
proj_models.Project, on_delete=models.CASCADE,
related_name='issues')
name = models.CharField(
_('name'), max_length=250, )
slug = models.SlugField(
max_length=250, unique=True, blank=True,
help_text=_('Used to create the Issue URL.'))
description = models.TextField(
_('description'),
help_text=_('Detailed description of issue including effects on project.')
)
task = models.ManyToManyField(
proj_models.Task, related_name='issues',
help_text=_('Task(s) this issue affects or is related to.')
)
category = models.ManyToManyField(
proj_models.Category, related_name='issues',
verbose_name=_('categories'))
impact = models.TextField(
null=True, blank=True,
verbose_name=_('project impact'),
help_text=_('How will the issue impact the scope, schedule and/or cost of the project?')
)
importance = models.IntegerField(
choices=RANK_OPTIONS, default=1,
help_text=_('How CRITICAL is it to the project?'))
urgency = models.IntegerField(
choices=RANK_OPTIONS, default=1,
help_text=_('How IMMEDIATELY is it needed?'))
priority = models.IntegerField(
null=True, help_text=_('(CALCULATED)'))
status = models.ForeignKey(
IssueStatus, on_delete=models.CASCADE,
null=True, blank=True,
related_name="issues", verbose_name=_("status"))
owner = models.ForeignKey(
proj_models.Stakeholder, on_delete=models.SET_NULL,
null=True, blank=True, default=None,
related_name='owned_issues', verbose_name=_('business owner'),
help_text=_('Stakeholder affected by / knowledgeable about this issue.'))
assigned_to = models.ForeignKey(
proj_models.Stakeholder, on_delete=models.SET_NULL,
blank=True, null=True, default=None,
related_name="issues_assigned_to_me",
verbose_name=_("assigned to"),
help_text=_('Person accountable for resolution.'))
resolution_plan = models.TextField(
null=True, blank=True,
help_text=_('Plan to resolve issue.'))
resolved = models.DateField(
null=True, blank=True, verbose_name=_('date completed'))
class Meta:
""" Issue model Meta """
ordering = ['priority', 'project', 'name'] | project_dashboard/projects/models/issue.py |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...core.models import TimeStampedModel
from ...core.utils.slug import slugify_uniquely_for_queryset
from ..choices import RANK_OPTIONS
from ..mixins import DueDateMixin
from .. import models as proj_models
class IssueStatus(models.Model):
""" Available status options for :model:Issue """
name = models.CharField(
max_length=100, null=False, blank=False,
verbose_name=_("name"))
slug = models.SlugField(
max_length=100, null=False, blank=True,
verbose_name=_("slug"))
order = models.IntegerField(
default=10, null=False, blank=False,
verbose_name=_("order"))
is_closed = models.BooleanField(
default=False, null=False, blank=True,
verbose_name=_("is closed"))
color = models.CharField(
max_length=20, null=False, blank=False, default="#999999",
verbose_name=_("color"))
project = models.ForeignKey(
"Project", on_delete=models.CASCADE,
null=False, blank=False,
related_name="issue_statuses", verbose_name=_("project"))
class Meta:
verbose_name = "issue status"
verbose_name_plural = "issue statuses"
ordering = ["project", "order", "name"]
unique_together = (("project", "name"), ("project", "slug"))
def __str__(self):
return self.name
def save(self, *args, **kwargs):
qs = self.project.issue_statuses
if self.id:
qs = qs.exclude(id=self.id)
self.slug = slugify_uniquely_for_queryset(self.name, qs)
return super().save(*args, **kwargs)
class IssueProgress(TimeStampedModel, models.Model):
""" Model containing updates on :model:Issue resolution """
issue = models.ForeignKey(
'Issue', on_delete=models.CASCADE,
related_name='progress_notes', )
progress = models.TextField(
null=True, blank=True,
help_text=_('Update on issue resolution (may not be edited later).')
)
class Meta:
ordering = ['-created']
class Issue(TimeStampedModel, DueDateMixin, models.Model):
""" Stores information about an Issue """
project = models.ForeignKey(
proj_models.Project, on_delete=models.CASCADE,
related_name='issues')
name = models.CharField(
_('name'), max_length=250, )
slug = models.SlugField(
max_length=250, unique=True, blank=True,
help_text=_('Used to create the Issue URL.'))
description = models.TextField(
_('description'),
help_text=_('Detailed description of issue including effects on project.')
)
task = models.ManyToManyField(
proj_models.Task, related_name='issues',
help_text=_('Task(s) this issue affects or is related to.')
)
category = models.ManyToManyField(
proj_models.Category, related_name='issues',
verbose_name=_('categories'))
impact = models.TextField(
null=True, blank=True,
verbose_name=_('project impact'),
help_text=_('How will the issue impact the scope, schedule and/or cost of the project?')
)
importance = models.IntegerField(
choices=RANK_OPTIONS, default=1,
help_text=_('How CRITICAL is it to the project?'))
urgency = models.IntegerField(
choices=RANK_OPTIONS, default=1,
help_text=_('How IMMEDIATELY is it needed?'))
priority = models.IntegerField(
null=True, help_text=_('(CALCULATED)'))
status = models.ForeignKey(
IssueStatus, on_delete=models.CASCADE,
null=True, blank=True,
related_name="issues", verbose_name=_("status"))
owner = models.ForeignKey(
proj_models.Stakeholder, on_delete=models.SET_NULL,
null=True, blank=True, default=None,
related_name='owned_issues', verbose_name=_('business owner'),
help_text=_('Stakeholder affected by / knowledgeable about this issue.'))
assigned_to = models.ForeignKey(
proj_models.Stakeholder, on_delete=models.SET_NULL,
blank=True, null=True, default=None,
related_name="issues_assigned_to_me",
verbose_name=_("assigned to"),
help_text=_('Person accountable for resolution.'))
resolution_plan = models.TextField(
null=True, blank=True,
help_text=_('Plan to resolve issue.'))
resolved = models.DateField(
null=True, blank=True, verbose_name=_('date completed'))
class Meta:
""" Issue model Meta """
ordering = ['priority', 'project', 'name'] | 0.567337 | 0.086671 |
import time
import os
import mido
from mido import Message, MidiFile, MidiTrack, tempo2bpm
from pynput import keyboard
key_dict = {
# c4
"a4+":22, "b4-":22, "b4": 23,
# c3
"c3": 24, "c3+": 25, "d3-": 25, "d3": 26, "d3+": 27, "e3-": 27, "e3": 28,
"f3": 29, "f3+": 30, "g3-": 30, "g3": 31, "g3+": 32, "a3-": 32, "a3": 33,
"a3+": 34, "b3-": 34, "b3": 35,
# c2
"c2": 36, "c2+": 37, "d2-": 37, "d2": 38, "d2+": 39, "e2-": 39, "e2": 40,
"f2": 41, "f2+": 42, "g2-": 42, "g2": 43, "g2+": 44, "a2-": 44, "a2": 45,
"a2+": 46, "b2-": 46, "b2": 47,
# c1
"c1": 48, "c1+": 49, "d1-": 49, "d1": 50, "d1+": 51, "e1-": 51, "e1": 52,
"f1": 53, "f1+": 54, "g1-": 54, "g1": 55, "g1+": 56, "a1-": 56, "a1": 57,
"a1+": 58, "b1-": 58, "b1": 59,
# c
"c": 60, "c+": 61, "d-": 61, "d": 62, "d+": 63, "e-": 63, "e": 64,
"f": 65, "f+": 66, "g-": 66, "g": 67, "g+": 68, "a-": 68, "a": 69,
"a+": 70, "b-": 70, "b": 71,
# C1
"C1": 72, "C1+": 73, "D1-": 73, "D1": 74, "D1+": 75, "E1-": 75, "E1": 76,
"F1": 77, "F1+": 78, "G1-": 78, "G1": 79, "G1+": 80, "A1-": 80, "A1": 81,
"A1+": 82, "B1-": 82, "B1": 83,
# C2
"C2": 84, "C2+": 85, "D2-": 85, "D2": 86, "D2+": 87, "E2-": 87, "E2": 88,
"F2": 89, "F2+": 90, "G2-": 90, "G2": 91, "G2+": 92, "A2-": 92, "A2": 93,
"A2+": 94, "B2-": 94, "B2": 95,
# C3
"C3": 84, "C3+": 85, "D3-": 85, "D3": 86, "D3+": 87, "E3-": 87, "E3": 88,
"F3": 89, "F3+": 90, "G3-": 90, "G3": 91, "G3+": 92, "A3-": 92, "A3": 93,
"A3+": 94, "B3-": 94, "B3": 95,
# C4
"C4": 96, "C4+": 97, "D4-": 97
}
class Composer:
def __init__(self, tempo=720, midi_type=1):
self.tempo = tempo
self.midi_file = MidiFile(type=midi_type)
def track(self, program=1):
midi_track = MidiTrack()
self.midi_file.tracks.append(midi_track)
midi_track.append(Message('program_change', program=program))
return midi_track
def program(self, track, program):
track.append(Message('program_change', program=program))
def note(self, track, note):
midi_note = key_dict[note[0]]
midi_time = int(self.tempo * note[1])
track.append(Message('note_on', note=midi_note, velocity=100, time=0))
track.append(Message('note_off', note=midi_note, velocity=0, time=midi_time))
#track.append(Message('note_on', note=0, velocity=100, time=0))
#track.append(Message('note_off', note=0, velocity=0, time=midi_time*10))
def save(self, name):
self.midi_file.save(name)
joy_notes = [
["e", 1./4.], ["e", 1./4.], ["f", 1./4.], ["g", 1./4.],
["g", 1./4.], ["f", 1./4.], ["e", 1./4.], ["d", 1./4.],
["c", 1./4.], ["c", 1./4.], ["d", 1./4.], ["e", 1./4.],
["e", 1./2.], ["d", 1./4.], ["d", 1./4.],
["e", 1./4.], ["e", 1./4.], ["f", 1./4.], ["g", 1./4.],
["g", 1./4.], ["f", 1./4.], ["e", 1./4.], ["d", 1./4.],
["c", 1./4.], ["c", 1./4.], ["d", 1./4.], ["e", 1./4.],
["d", 1./2.], ["c", 1./4.], ["c", 1./4.],
]
def demo1():
demo = Composer()
track = demo.track()
for n in joy_notes:
demo.note(track, n)
demo.save('joy.mid')
def play_note():
midi_note = 64
midi_time = 640
for i in range(10):
with mido.open_output('FLUID Synth (25699):Synth input port (25699:0) 129:0') as output:
output.send(Message('program_change', program=1))
output.send(Message('note_on', note=midi_note, velocity=100, time=0))
output.send(Message('note_on', note=midi_note + 4, velocity=100, time=0))
output.send(Message('note_on', note=midi_note + 7, velocity=100, time=0))
time.sleep(1)
output.send(Message('note_off', note=midi_note, velocity=0, time=midi_time))
output.send(Message('note_off', note=midi_note + 4, velocity=100, time=midi_time))
output.send(Message('note_off', note=midi_note + 7, velocity=100, time=midi_time))
def play_note2():
fluid_port = 'FLUID Synth (2552):Synth input port (2552:0) 129:0'
midi_keymap = {
"a": ["c", False],
"s": ["d", False],
"d": ["e", False],
"f": ["f", False],
"g": ["g", False],
"h": ["a", False],
"j": ["b", False],
"k": ["C1", False],
"l": ["D1", False],
}
def send_note_on(key, midi_port):
attr = midi_keymap.get(key)
if not attr:
return
midi_note = key_dict.get(attr[0])
if attr[1] == False:
midi_port.send(Message('note_on', note=midi_note, velocity=100, time=0))
attr[1] = True
def send_note_off(key, midi_port):
attr = midi_keymap.get(key)
if not attr:
return
midi_note = key_dict.get(attr[0])
if attr[1] == True:
midi_port.send(Message('note_off', note=midi_note, velocity=0, time=0))
attr[1] = False
def on_press(key):
try:
send_note_on(key.char, midi_port)
except AttributeError:
print('special key {0} pressed'.format(
key))
def on_release(key):
if key == keyboard.Key.esc:
# Stop listener
return False
k = list(str(key))[1]
send_note_off(k, midi_port)
# Collect events until released
with mido.open_output(fluid_port) as midi_port:
midi_port.send(Message('program_change', program=1))
os.system("stty -echo")
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
os.system("stty echo")
def play_note3():
fluid_port = 'FLUID Synth (2552):Synth input port (2552:0) 129:0'
tempo = 100.
with mido.open_output(fluid_port) as midi_port:
midi_port.send(Message('program_change', program=1))
for n in joy_notes:
midi_note = key_dict.get(n[0])
midi_time = (60. / tempo) * n[1]
midi_port.send(Message('note_on', note=midi_note, velocity=100, time=0))
pre = time.time()
while True:
cur = time.time()
if (cur - pre) > midi_time:
midi_port.send(Message('note_off', note=midi_note, velocity=100, time=0))
break
time.sleep(0.001)
if __name__ == "__main__":
play_note2() | vimusic.py |
import time
import os
import mido
from mido import Message, MidiFile, MidiTrack, tempo2bpm
from pynput import keyboard
key_dict = {
# c4
"a4+":22, "b4-":22, "b4": 23,
# c3
"c3": 24, "c3+": 25, "d3-": 25, "d3": 26, "d3+": 27, "e3-": 27, "e3": 28,
"f3": 29, "f3+": 30, "g3-": 30, "g3": 31, "g3+": 32, "a3-": 32, "a3": 33,
"a3+": 34, "b3-": 34, "b3": 35,
# c2
"c2": 36, "c2+": 37, "d2-": 37, "d2": 38, "d2+": 39, "e2-": 39, "e2": 40,
"f2": 41, "f2+": 42, "g2-": 42, "g2": 43, "g2+": 44, "a2-": 44, "a2": 45,
"a2+": 46, "b2-": 46, "b2": 47,
# c1
"c1": 48, "c1+": 49, "d1-": 49, "d1": 50, "d1+": 51, "e1-": 51, "e1": 52,
"f1": 53, "f1+": 54, "g1-": 54, "g1": 55, "g1+": 56, "a1-": 56, "a1": 57,
"a1+": 58, "b1-": 58, "b1": 59,
# c
"c": 60, "c+": 61, "d-": 61, "d": 62, "d+": 63, "e-": 63, "e": 64,
"f": 65, "f+": 66, "g-": 66, "g": 67, "g+": 68, "a-": 68, "a": 69,
"a+": 70, "b-": 70, "b": 71,
# C1
"C1": 72, "C1+": 73, "D1-": 73, "D1": 74, "D1+": 75, "E1-": 75, "E1": 76,
"F1": 77, "F1+": 78, "G1-": 78, "G1": 79, "G1+": 80, "A1-": 80, "A1": 81,
"A1+": 82, "B1-": 82, "B1": 83,
# C2
"C2": 84, "C2+": 85, "D2-": 85, "D2": 86, "D2+": 87, "E2-": 87, "E2": 88,
"F2": 89, "F2+": 90, "G2-": 90, "G2": 91, "G2+": 92, "A2-": 92, "A2": 93,
"A2+": 94, "B2-": 94, "B2": 95,
# C3
"C3": 84, "C3+": 85, "D3-": 85, "D3": 86, "D3+": 87, "E3-": 87, "E3": 88,
"F3": 89, "F3+": 90, "G3-": 90, "G3": 91, "G3+": 92, "A3-": 92, "A3": 93,
"A3+": 94, "B3-": 94, "B3": 95,
# C4
"C4": 96, "C4+": 97, "D4-": 97
}
class Composer:
def __init__(self, tempo=720, midi_type=1):
self.tempo = tempo
self.midi_file = MidiFile(type=midi_type)
def track(self, program=1):
midi_track = MidiTrack()
self.midi_file.tracks.append(midi_track)
midi_track.append(Message('program_change', program=program))
return midi_track
def program(self, track, program):
track.append(Message('program_change', program=program))
def note(self, track, note):
midi_note = key_dict[note[0]]
midi_time = int(self.tempo * note[1])
track.append(Message('note_on', note=midi_note, velocity=100, time=0))
track.append(Message('note_off', note=midi_note, velocity=0, time=midi_time))
#track.append(Message('note_on', note=0, velocity=100, time=0))
#track.append(Message('note_off', note=0, velocity=0, time=midi_time*10))
def save(self, name):
self.midi_file.save(name)
joy_notes = [
["e", 1./4.], ["e", 1./4.], ["f", 1./4.], ["g", 1./4.],
["g", 1./4.], ["f", 1./4.], ["e", 1./4.], ["d", 1./4.],
["c", 1./4.], ["c", 1./4.], ["d", 1./4.], ["e", 1./4.],
["e", 1./2.], ["d", 1./4.], ["d", 1./4.],
["e", 1./4.], ["e", 1./4.], ["f", 1./4.], ["g", 1./4.],
["g", 1./4.], ["f", 1./4.], ["e", 1./4.], ["d", 1./4.],
["c", 1./4.], ["c", 1./4.], ["d", 1./4.], ["e", 1./4.],
["d", 1./2.], ["c", 1./4.], ["c", 1./4.],
]
def demo1():
demo = Composer()
track = demo.track()
for n in joy_notes:
demo.note(track, n)
demo.save('joy.mid')
def play_note():
midi_note = 64
midi_time = 640
for i in range(10):
with mido.open_output('FLUID Synth (25699):Synth input port (25699:0) 129:0') as output:
output.send(Message('program_change', program=1))
output.send(Message('note_on', note=midi_note, velocity=100, time=0))
output.send(Message('note_on', note=midi_note + 4, velocity=100, time=0))
output.send(Message('note_on', note=midi_note + 7, velocity=100, time=0))
time.sleep(1)
output.send(Message('note_off', note=midi_note, velocity=0, time=midi_time))
output.send(Message('note_off', note=midi_note + 4, velocity=100, time=midi_time))
output.send(Message('note_off', note=midi_note + 7, velocity=100, time=midi_time))
def play_note2():
fluid_port = 'FLUID Synth (2552):Synth input port (2552:0) 129:0'
midi_keymap = {
"a": ["c", False],
"s": ["d", False],
"d": ["e", False],
"f": ["f", False],
"g": ["g", False],
"h": ["a", False],
"j": ["b", False],
"k": ["C1", False],
"l": ["D1", False],
}
def send_note_on(key, midi_port):
attr = midi_keymap.get(key)
if not attr:
return
midi_note = key_dict.get(attr[0])
if attr[1] == False:
midi_port.send(Message('note_on', note=midi_note, velocity=100, time=0))
attr[1] = True
def send_note_off(key, midi_port):
attr = midi_keymap.get(key)
if not attr:
return
midi_note = key_dict.get(attr[0])
if attr[1] == True:
midi_port.send(Message('note_off', note=midi_note, velocity=0, time=0))
attr[1] = False
def on_press(key):
try:
send_note_on(key.char, midi_port)
except AttributeError:
print('special key {0} pressed'.format(
key))
def on_release(key):
if key == keyboard.Key.esc:
# Stop listener
return False
k = list(str(key))[1]
send_note_off(k, midi_port)
# Collect events until released
with mido.open_output(fluid_port) as midi_port:
midi_port.send(Message('program_change', program=1))
os.system("stty -echo")
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
os.system("stty echo")
def play_note3():
fluid_port = 'FLUID Synth (2552):Synth input port (2552:0) 129:0'
tempo = 100.
with mido.open_output(fluid_port) as midi_port:
midi_port.send(Message('program_change', program=1))
for n in joy_notes:
midi_note = key_dict.get(n[0])
midi_time = (60. / tempo) * n[1]
midi_port.send(Message('note_on', note=midi_note, velocity=100, time=0))
pre = time.time()
while True:
cur = time.time()
if (cur - pre) > midi_time:
midi_port.send(Message('note_off', note=midi_note, velocity=100, time=0))
break
time.sleep(0.001)
if __name__ == "__main__":
play_note2() | 0.406862 | 0.403861 |
from numpy.core.arrayprint import BoolFormat
from game import *
from encoder import *
from arena import *
from dataManager import *
from network import *
class Program:
def __init__(self,the_game):
self.the_game = the_game
self.best_network = readNeuralNetwork("networks/best_network")
self.new_network = NeuralNetwork([264,30,30,30,30,1],0.6514)
def select_move(self):
return ARENA.select_move_net(self.best_network, self.the_game)
def train_network(self, it = 1, EPOCHS = 1000, both_datasets = True, coupled_dataset = False, dataset = "classic", write = True):
lenTest = 0
if not(both_datasets):
if coupled_dataset:
if dataset == "classic":
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_","loss_",720000)
else:
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_filtered_","loss_filtered_",720000)
lenTest = 72000
else:
if dataset == "classic":
x,y = DATA_MANAGER.import_x_y("unfinished_",135000)
else:
x,y = DATA_MANAGER.import_x_y("unfinished_filtered_",135000)
lenTest = 13500
else:
if dataset == "classic":
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_","loss_",720000)
x_2,y_2 = DATA_MANAGER.import_x_y("unfinished_",135000)
x = np.vstack((x,x_2))
y = np.vstack((y,y_2))
else:
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_filtered_","loss_filtered_",720000)
x_2,y_2 = DATA_MANAGER.import_x_y("unfinished_filtered_",135000)
x = np.vstack((x,x_2))
y = np.vstack((y,y_2))
lenTest = 72000 + 13500
x_train,y_train,x_test,y_test = DATA_MANAGER.create_train_test_sets(x,y,lenTest)
self.new_network.supervised_learning(x_train,y_train,x_test,y_test,lenTest,it=it,EPOCH=EPOCHS,batch_size=100,dataset=dataset,write=write)
def set_network_structure(self,sizes,learning_rate):
self.new_network = NeuralNetwork(sizes,learning_rate)
def study_against_random(self, dataset = 1, classic = False):
if dataset == 1 and not(classic):
net = readNeuralNetwork("networks/net_dataset_1_filters")
score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0]
elif dataset == 2 and not(classic):
net = readNeuralNetwork("networks/net_dataset_2_filters")
score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0]
elif dataset == 3 and not(classic):
net = readNeuralNetwork("networks/net_dataset_1&2_filters")
score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0]
return score | program_test.py | from numpy.core.arrayprint import BoolFormat
from game import *
from encoder import *
from arena import *
from dataManager import *
from network import *
class Program:
def __init__(self,the_game):
self.the_game = the_game
self.best_network = readNeuralNetwork("networks/best_network")
self.new_network = NeuralNetwork([264,30,30,30,30,1],0.6514)
def select_move(self):
return ARENA.select_move_net(self.best_network, self.the_game)
def train_network(self, it = 1, EPOCHS = 1000, both_datasets = True, coupled_dataset = False, dataset = "classic", write = True):
lenTest = 0
if not(both_datasets):
if coupled_dataset:
if dataset == "classic":
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_","loss_",720000)
else:
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_filtered_","loss_filtered_",720000)
lenTest = 72000
else:
if dataset == "classic":
x,y = DATA_MANAGER.import_x_y("unfinished_",135000)
else:
x,y = DATA_MANAGER.import_x_y("unfinished_filtered_",135000)
lenTest = 13500
else:
if dataset == "classic":
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_","loss_",720000)
x_2,y_2 = DATA_MANAGER.import_x_y("unfinished_",135000)
x = np.vstack((x,x_2))
y = np.vstack((y,y_2))
else:
x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_filtered_","loss_filtered_",720000)
x_2,y_2 = DATA_MANAGER.import_x_y("unfinished_filtered_",135000)
x = np.vstack((x,x_2))
y = np.vstack((y,y_2))
lenTest = 72000 + 13500
x_train,y_train,x_test,y_test = DATA_MANAGER.create_train_test_sets(x,y,lenTest)
self.new_network.supervised_learning(x_train,y_train,x_test,y_test,lenTest,it=it,EPOCH=EPOCHS,batch_size=100,dataset=dataset,write=write)
def set_network_structure(self,sizes,learning_rate):
self.new_network = NeuralNetwork(sizes,learning_rate)
def study_against_random(self, dataset = 1, classic = False):
if dataset == 1 and not(classic):
net = readNeuralNetwork("networks/net_dataset_1_filters")
score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0]
elif dataset == 2 and not(classic):
net = readNeuralNetwork("networks/net_dataset_2_filters")
score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0]
elif dataset == 3 and not(classic):
net = readNeuralNetwork("networks/net_dataset_1&2_filters")
score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0]
return score | 0.446253 | 0.278994 |
import flask
import glob
import json
import os
import pandas as pd
import sys
import webbrowser
from datetime import datetime
from flask import Flask, request
from flask_cors import CORS
app = Flask(__name__, static_url_path='')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 # TODO remove in prod
CORS(app)
project_dir = os.path.abspath(os.path.join(app.root_path, '..'))
"""
DATA LOADING
"""
def date_format(date):
day, month, year = date.split('.')
return "{}.{}.{}".format(day, month, "20" + year)
def danger_format(danger):
if len(danger) > 1 and danger[0] == '-':
danger = danger[1:]
return danger
accidents_file = os.path.join(project_dir, "data/accidents/accidents.csv")
accidents_data = pd.read_csv(accidents_file)
accidents_data.Date = accidents_data.Date.apply(date_format)
accidents_data['Danger level'] = accidents_data['Danger level'].apply(danger_format)
accidents_json = accidents_data.to_json(orient='index')
with open('accidents.json', 'w') as f:
f.write(accidents_json)
maps_dirs = [os.path.join(project_dir, dir_) for dir_ in ["json-maps", "json-snowmaps"]]
maps_files = [f for dir_ in maps_dirs for f in glob.glob(os.path.join(dir_, "*.json"))]
maps_files_with_date = [(datetime.strptime(os.path.basename(f)[:8], "%Y%m%d"), f) for f in maps_files]
"""
ROUTING
"""
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/accidents')
def accident_data():
response = app.response_class(
response=accidents_json,
status=200,
mimetype='application/json'
)
return response
@app.route('/maps', methods=['GET'])
def maps():
"""Serves maps JSON files
Expect url of the form
localhost:5000/maps?from=2012-10-08&to=2012-10-20
"""
from_date = request.args.get('from')
to_date = request.args.get('to')
assert from_date and to_date, 'Unable to serve request: missing from or to date'
from_date = datetime.strptime(from_date, "%Y-%m-%d")
to_date = datetime.strptime(to_date, "%Y-%m-%d")
selected_files = [(date, file) for date, file in maps_files_with_date if date >= from_date and date <= to_date]
json_to_send = {datetime.strftime(date, "%Y-%m-%d"): json.load(open(f, 'r')) for (date, f) in selected_files}
return flask.jsonify(json_to_send)
webbrowser.open('http://localhost:5000/') | tools/server.py | import flask
import glob
import json
import os
import pandas as pd
import sys
import webbrowser
from datetime import datetime
from flask import Flask, request
from flask_cors import CORS
app = Flask(__name__, static_url_path='')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 # TODO remove in prod
CORS(app)
project_dir = os.path.abspath(os.path.join(app.root_path, '..'))
"""
DATA LOADING
"""
def date_format(date):
day, month, year = date.split('.')
return "{}.{}.{}".format(day, month, "20" + year)
def danger_format(danger):
if len(danger) > 1 and danger[0] == '-':
danger = danger[1:]
return danger
accidents_file = os.path.join(project_dir, "data/accidents/accidents.csv")
accidents_data = pd.read_csv(accidents_file)
accidents_data.Date = accidents_data.Date.apply(date_format)
accidents_data['Danger level'] = accidents_data['Danger level'].apply(danger_format)
accidents_json = accidents_data.to_json(orient='index')
with open('accidents.json', 'w') as f:
f.write(accidents_json)
maps_dirs = [os.path.join(project_dir, dir_) for dir_ in ["json-maps", "json-snowmaps"]]
maps_files = [f for dir_ in maps_dirs for f in glob.glob(os.path.join(dir_, "*.json"))]
maps_files_with_date = [(datetime.strptime(os.path.basename(f)[:8], "%Y%m%d"), f) for f in maps_files]
"""
ROUTING
"""
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/accidents')
def accident_data():
response = app.response_class(
response=accidents_json,
status=200,
mimetype='application/json'
)
return response
@app.route('/maps', methods=['GET'])
def maps():
"""Serves maps JSON files
Expect url of the form
localhost:5000/maps?from=2012-10-08&to=2012-10-20
"""
from_date = request.args.get('from')
to_date = request.args.get('to')
assert from_date and to_date, 'Unable to serve request: missing from or to date'
from_date = datetime.strptime(from_date, "%Y-%m-%d")
to_date = datetime.strptime(to_date, "%Y-%m-%d")
selected_files = [(date, file) for date, file in maps_files_with_date if date >= from_date and date <= to_date]
json_to_send = {datetime.strftime(date, "%Y-%m-%d"): json.load(open(f, 'r')) for (date, f) in selected_files}
return flask.jsonify(json_to_send)
webbrowser.open('http://localhost:5000/') | 0.186391 | 0.109634 |
import logging
import os
import re
import pandas as pd
import gamechangerml.src.text_classif.utils.entity_mentions as em
from gamechangerml.src.text_classif.utils.predict_glob import predict_glob
from gamechangerml.src.text_classif.utils.top_k_entities import top_k_entities
logger = logging.getLogger(__name__)
class EntityLink(object):
def __init__(
self, entity_csv=None, mentions_json=None, use_na=True, topk=3
):
"""
Links a statement to an entity using a type of 'nearest entity' method.
If such linking is not possible, the top k most frequently occurring
entities is used.
Args:
entity_csv (str): csv containing entity,abbreviation if
an abbreviation exists
mentions_json (str): name of the entity mentions json produced by
`entity_mentions.py`
use_na (bool): if True, use self.NA instead of the top k mentions
when entity linking fails
topk (int): top k mentions to use when an entity has failed
"""
if not os.path.isfile(entity_csv):
raise FileExistsError("no entity file, got {}".format(entity_csv))
if not os.path.isfile(mentions_json):
raise FileExistsError(
"no mentions file {}, got".format(mentions_json)
)
topk = max(1, topk)
logger.info("top k : {}".format(topk))
self.top_k_in_doc = top_k_entities(mentions_json, top_k=topk)
self.abbrv_re, self.entity_re = em.make_entity_re(entity_csv)
self.use_na = use_na
self.RESP = "RESPONSIBILITIES"
self.SENT = "sentence"
# NB: KW can be any valid regex like "shall|will"
self.KW = "shall"
self.KW_RE = re.compile("\\b" + self.KW + "\\b[:,]?")
self.NA = "Unable to connect Responsibility to Entity"
self.TOPCLASS = "top_class"
self.ENT = "entity"
self.SRC = "src"
self.USC_DOT = "U.S.C."
self.USC = "USC"
self.USC_RE = "\\b" + self.USC + "\\b"
self.PL = "P.L."
self.PL_DOT = "P. L."
self.PL_RE = "\\b" + self.PL_DOT + "\\b"
self.EO = "E.O."
self.EO_DOT = "E. O."
self.EO_RE = "\\b" + self.EO_DOT + "\\b"
self.dotted = [self.USC_DOT, self.PL, self.EO]
self.subs = [self.USC, self.PL, self.EO]
self.sub_back = [self.USC_DOT, self.PL_DOT, self.EO_DOT]
self.unsub_re = [self.USC_RE, self.PL_RE, self.EO_RE]
self.pop_entities = None
self.failed = list()
def _new_edict(self, value=None):
if value is None:
value = self.NA
return {self.ENT: value}
def _re_sub(self, sentence):
for regex, sub in zip(self.dotted, self.subs):
sentence = re.sub(regex, sub, sentence)
return sentence
def _unsub_df(self, df, regex, sub):
df[self.SENT] = [re.sub(regex, sub, str(x)) for x in df[self.SENT]]
def _resolve_na(self, doc_name):
if self.use_na:
return self.NA
if doc_name in self.top_k_in_doc:
ent = ";".join(self.top_k_in_doc[doc_name])
logger.debug("entity : {}".format(self.top_k_in_doc[doc_name]))
return ent
else:
logger.warning("can't find {} for lookup".format(doc_name))
return self.NA
def _link_entity(self, output_list, entity_list, default_ent):
curr_entity = default_ent
for prediction in output_list:
sentence = prediction[self.SENT]
sentence = self._re_sub(sentence)
new_entry = self._new_edict(value=curr_entity)
new_entry.update(prediction)
if prediction[self.TOPCLASS] == 0:
new_entry[self.ENT] = default_ent
match_obj = re.search(self.KW, sentence)
if match_obj is not None:
cand_entity = re.split(self.KW_RE, sentence, maxsplit=1)[
0
].strip()
ent_list = em.contains_entity(
cand_entity, self.entity_re, self.abbrv_re
)
if ent_list:
curr_entity = cand_entity
elif prediction[self.TOPCLASS] == 1:
new_entry[self.ENT] = curr_entity
else:
msg = "unknown prediction for '{}', ".format(sentence)
msg += "got {}".format(prediction[self.TOPCLASS])
logger.warning(msg)
entity_list.append(new_entry)
def _populate_entity(self, output_list):
entity_list = list()
for idx, entry in enumerate(output_list):
doc_name = entry[self.SRC]
default_ent = self._resolve_na(doc_name)
e_dict = self._new_edict(value=self._resolve_na(doc_name))
e_dict.update(entry)
if e_dict[self.TOPCLASS] == 0 and self.RESP in entry[self.SENT]:
entity_list.append(e_dict)
self._link_entity(
output_list[idx + 1 :], entity_list, default_ent
)
return entity_list
else:
entity_list.append(e_dict)
return entity_list
def make_table(self, model_path, data_path, glob, max_seq_len, batch_size):
"""
Loop through the documents, predict each piece of text and attach
an entity.
The arguments are shown below in `args`.
A list entry looks like:
{'top_class': 0,
'prob': 0.997,
'src': 'DoDD 5105.21.json',
'label': 0,
'sentence': 'Department of...'}
--> `top_class` is the predicted label
Returns:
None
"""
self.pop_entities = list()
for output_list, file_name in predict_glob(
model_path, data_path, glob, max_seq_len, batch_size
):
logger.info("num input : {:>4,d}".format(len(output_list)))
pop_list = self._populate_entity(output_list)
logger.info(
"processed : {:>4,d} {}".format(len(pop_list), file_name)
)
self.pop_entities.extend(pop_list)
def _to_df(self):
if not self.pop_entities:
raise ValueError("no data to convert; please run `make_table()`?")
else:
return pd.DataFrame(self.pop_entities)
def to_df(self):
"""
Creates a pandas data frame from the populated entities list
Returns:
pd.DataFrame
"""
df = self._to_df()
for regex, sub in zip(self.unsub_re, self.sub_back):
self._unsub_df(df, regex, sub)
return df
def to_csv(self, output_csv):
df = self._to_df()
df.to_csv(output_csv, index=False) | gamechangerml/src/text_classif/utils/entity_link.py | import logging
import os
import re
import pandas as pd
import gamechangerml.src.text_classif.utils.entity_mentions as em
from gamechangerml.src.text_classif.utils.predict_glob import predict_glob
from gamechangerml.src.text_classif.utils.top_k_entities import top_k_entities
logger = logging.getLogger(__name__)
class EntityLink(object):
def __init__(
self, entity_csv=None, mentions_json=None, use_na=True, topk=3
):
"""
Links a statement to an entity using a type of 'nearest entity' method.
If such linking is not possible, the top k most frequently occurring
entities is used.
Args:
entity_csv (str): csv containing entity,abbreviation if
an abbreviation exists
mentions_json (str): name of the entity mentions json produced by
`entity_mentions.py`
use_na (bool): if True, use self.NA instead of the top k mentions
when entity linking fails
topk (int): top k mentions to use when an entity has failed
"""
if not os.path.isfile(entity_csv):
raise FileExistsError("no entity file, got {}".format(entity_csv))
if not os.path.isfile(mentions_json):
raise FileExistsError(
"no mentions file {}, got".format(mentions_json)
)
topk = max(1, topk)
logger.info("top k : {}".format(topk))
self.top_k_in_doc = top_k_entities(mentions_json, top_k=topk)
self.abbrv_re, self.entity_re = em.make_entity_re(entity_csv)
self.use_na = use_na
self.RESP = "RESPONSIBILITIES"
self.SENT = "sentence"
# NB: KW can be any valid regex like "shall|will"
self.KW = "shall"
self.KW_RE = re.compile("\\b" + self.KW + "\\b[:,]?")
self.NA = "Unable to connect Responsibility to Entity"
self.TOPCLASS = "top_class"
self.ENT = "entity"
self.SRC = "src"
self.USC_DOT = "U.S.C."
self.USC = "USC"
self.USC_RE = "\\b" + self.USC + "\\b"
self.PL = "P.L."
self.PL_DOT = "P. L."
self.PL_RE = "\\b" + self.PL_DOT + "\\b"
self.EO = "E.O."
self.EO_DOT = "E. O."
self.EO_RE = "\\b" + self.EO_DOT + "\\b"
self.dotted = [self.USC_DOT, self.PL, self.EO]
self.subs = [self.USC, self.PL, self.EO]
self.sub_back = [self.USC_DOT, self.PL_DOT, self.EO_DOT]
self.unsub_re = [self.USC_RE, self.PL_RE, self.EO_RE]
self.pop_entities = None
self.failed = list()
def _new_edict(self, value=None):
if value is None:
value = self.NA
return {self.ENT: value}
def _re_sub(self, sentence):
for regex, sub in zip(self.dotted, self.subs):
sentence = re.sub(regex, sub, sentence)
return sentence
def _unsub_df(self, df, regex, sub):
df[self.SENT] = [re.sub(regex, sub, str(x)) for x in df[self.SENT]]
def _resolve_na(self, doc_name):
if self.use_na:
return self.NA
if doc_name in self.top_k_in_doc:
ent = ";".join(self.top_k_in_doc[doc_name])
logger.debug("entity : {}".format(self.top_k_in_doc[doc_name]))
return ent
else:
logger.warning("can't find {} for lookup".format(doc_name))
return self.NA
def _link_entity(self, output_list, entity_list, default_ent):
curr_entity = default_ent
for prediction in output_list:
sentence = prediction[self.SENT]
sentence = self._re_sub(sentence)
new_entry = self._new_edict(value=curr_entity)
new_entry.update(prediction)
if prediction[self.TOPCLASS] == 0:
new_entry[self.ENT] = default_ent
match_obj = re.search(self.KW, sentence)
if match_obj is not None:
cand_entity = re.split(self.KW_RE, sentence, maxsplit=1)[
0
].strip()
ent_list = em.contains_entity(
cand_entity, self.entity_re, self.abbrv_re
)
if ent_list:
curr_entity = cand_entity
elif prediction[self.TOPCLASS] == 1:
new_entry[self.ENT] = curr_entity
else:
msg = "unknown prediction for '{}', ".format(sentence)
msg += "got {}".format(prediction[self.TOPCLASS])
logger.warning(msg)
entity_list.append(new_entry)
def _populate_entity(self, output_list):
entity_list = list()
for idx, entry in enumerate(output_list):
doc_name = entry[self.SRC]
default_ent = self._resolve_na(doc_name)
e_dict = self._new_edict(value=self._resolve_na(doc_name))
e_dict.update(entry)
if e_dict[self.TOPCLASS] == 0 and self.RESP in entry[self.SENT]:
entity_list.append(e_dict)
self._link_entity(
output_list[idx + 1 :], entity_list, default_ent
)
return entity_list
else:
entity_list.append(e_dict)
return entity_list
def make_table(self, model_path, data_path, glob, max_seq_len, batch_size):
"""
Loop through the documents, predict each piece of text and attach
an entity.
The arguments are shown below in `args`.
A list entry looks like:
{'top_class': 0,
'prob': 0.997,
'src': 'DoDD 5105.21.json',
'label': 0,
'sentence': 'Department of...'}
--> `top_class` is the predicted label
Returns:
None
"""
self.pop_entities = list()
for output_list, file_name in predict_glob(
model_path, data_path, glob, max_seq_len, batch_size
):
logger.info("num input : {:>4,d}".format(len(output_list)))
pop_list = self._populate_entity(output_list)
logger.info(
"processed : {:>4,d} {}".format(len(pop_list), file_name)
)
self.pop_entities.extend(pop_list)
def _to_df(self):
if not self.pop_entities:
raise ValueError("no data to convert; please run `make_table()`?")
else:
return pd.DataFrame(self.pop_entities)
def to_df(self):
"""
Creates a pandas data frame from the populated entities list
Returns:
pd.DataFrame
"""
df = self._to_df()
for regex, sub in zip(self.unsub_re, self.sub_back):
self._unsub_df(df, regex, sub)
return df
def to_csv(self, output_csv):
df = self._to_df()
df.to_csv(output_csv, index=False) | 0.66072 | 0.150778 |
import urllib
import urllib2
import requests
import threading
import json
from time import sleep
url = 'http://localhost:8545/'
import os.path
def get_result(json_content):
content = json.loads(json_content)
try:
return content["result"]
except Exception as e:
print e
print json_content
class MyThread(threading.Thread):
def __init__(self, index):
threading.Thread.__init__(self)
self.data_getCode = {'jsonrpc': '2.0',
'method': 'eth_getCode',
'params': ["0x9bA082240DBa3F9ef90038b9357649Fa569fd763", 'latest'],
'id': 1 + index * 100}
self.data_TX_count = {'jsonrpc': '2.0',
'method': 'eth_getBlockTransactionCountByNumber',
'params': [],
'id': 2 + index * 100}
self.data_blockNumber = {'jsonrpc': '2.0',
'method': 'eth_blockNumber',
'params': [],
'id': 3 + index * 100}
self.data_get_TX_by_index = {'jsonrpc': '2.0',
'method': 'eth_getTransactionByBlockNumberAndIndex',
'params': [],
'id': 4 + index * 100}
self.data_get_TX = {'jsonrpc': '2.0',
'method': 'eth_getTransactionByHash',
'params': [],
'id': 5 + index * 100}
self.data_get_TX_receipt = {'jsonrpc': '2.0',
'method': 'eth_getTransactionReceipt',
'params': [],
'id': 6 + index * 100}
self.list_address = []
self.list_contract = {}
self.index = index
self.low = index*10000
self.high = (index + 1)*10000
# print self.low, self.high
self.sess = requests.Session()
self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
self.sess.mount('http://', self.adapter)
def run(self):
for i in range(self.low, self.high):
if i%1000 == 0:
print 'Thread ' + str(self.index) + ' is processing block: ' + str(i)
print "Number of contracts in Thread " + str(self.index) + " so far: " + str(len(self.list_contract))
# with open('contract_' + str(i) + '.json', 'w') as outfile:
# json.dump(self.list_contract, outfile)
# self.list_contract.clear()
self.data_TX_count['params'] = [str(hex(i))]
r = self.sess.get(url, data=json.dumps(self.data_TX_count), allow_redirects=True)
tx_count = int(get_result(r.content), 16)
r.close()
for tx_id in range(tx_count):
self.data_get_TX_by_index['params'] = [str(hex(i)), str(hex(tx_id))]
r = self.sess.get(url, data=json.dumps(self.data_get_TX_by_index), allow_redirects=True)
tx = get_result(r.content)
r.close()
if (tx['to'] == None): # this TX creates a contract
self.data_get_TX_receipt['params'] = [tx['hash']]
r = self.sess.get(url, data=json.dumps(self.data_get_TX_receipt), allow_redirects=True)
tx_receipt = get_result(r.content)
r.close()
if tx_receipt['contractAddress'] == None:
continue
self.data_getCode['params'][0] = tx_receipt['contractAddress']
r = self.sess.get(url, data=json.dumps(self.data_getCode), allow_redirects=True)
code = get_result(r.content)
r.close()
if len(code) > 2:
self.data_get_TX['params'] = [tx['hash']]
r = self.sess.get(url, data=json.dumps(self.data_get_TX), allow_redirects=True)
tx_detail = get_result(r.content)
r.close()
tx_input = tx_detail['input']
# init_data = tx_input[:len(tx_input)-len(code)+2]
self.list_contract[tx_receipt['contractAddress']] = [tx_input, code, tx['hash']]
# Print the last run
print 'Thread ' + str(self.index) + ' is processing block: ' + str(i)
print "Number of contracts in Thread " + str(self.index) + " so far: " + str(len(self.list_contract))
with open('contract_' + str(self.high) + '.json', 'w') as outfile:
json.dump(self.list_contract, outfile)
self.list_contract.clear()
list_threads = []
try:
for i in range(0, 4):
new_thread = MyThread(i)
list_threads.append(new_thread)
for my_thread in list_threads:
my_thread.start()
except Exception as e:
print e
print "Error: unable to start thread" | contract_data/contracts_collector.py | import urllib
import urllib2
import requests
import threading
import json
from time import sleep
url = 'http://localhost:8545/'
import os.path
def get_result(json_content):
content = json.loads(json_content)
try:
return content["result"]
except Exception as e:
print e
print json_content
class MyThread(threading.Thread):
def __init__(self, index):
threading.Thread.__init__(self)
self.data_getCode = {'jsonrpc': '2.0',
'method': 'eth_getCode',
'params': ["0x9bA082240DBa3F9ef90038b9357649Fa569fd763", 'latest'],
'id': 1 + index * 100}
self.data_TX_count = {'jsonrpc': '2.0',
'method': 'eth_getBlockTransactionCountByNumber',
'params': [],
'id': 2 + index * 100}
self.data_blockNumber = {'jsonrpc': '2.0',
'method': 'eth_blockNumber',
'params': [],
'id': 3 + index * 100}
self.data_get_TX_by_index = {'jsonrpc': '2.0',
'method': 'eth_getTransactionByBlockNumberAndIndex',
'params': [],
'id': 4 + index * 100}
self.data_get_TX = {'jsonrpc': '2.0',
'method': 'eth_getTransactionByHash',
'params': [],
'id': 5 + index * 100}
self.data_get_TX_receipt = {'jsonrpc': '2.0',
'method': 'eth_getTransactionReceipt',
'params': [],
'id': 6 + index * 100}
self.list_address = []
self.list_contract = {}
self.index = index
self.low = index*10000
self.high = (index + 1)*10000
# print self.low, self.high
self.sess = requests.Session()
self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
self.sess.mount('http://', self.adapter)
def run(self):
for i in range(self.low, self.high):
if i%1000 == 0:
print 'Thread ' + str(self.index) + ' is processing block: ' + str(i)
print "Number of contracts in Thread " + str(self.index) + " so far: " + str(len(self.list_contract))
# with open('contract_' + str(i) + '.json', 'w') as outfile:
# json.dump(self.list_contract, outfile)
# self.list_contract.clear()
self.data_TX_count['params'] = [str(hex(i))]
r = self.sess.get(url, data=json.dumps(self.data_TX_count), allow_redirects=True)
tx_count = int(get_result(r.content), 16)
r.close()
for tx_id in range(tx_count):
self.data_get_TX_by_index['params'] = [str(hex(i)), str(hex(tx_id))]
r = self.sess.get(url, data=json.dumps(self.data_get_TX_by_index), allow_redirects=True)
tx = get_result(r.content)
r.close()
if (tx['to'] == None): # this TX creates a contract
self.data_get_TX_receipt['params'] = [tx['hash']]
r = self.sess.get(url, data=json.dumps(self.data_get_TX_receipt), allow_redirects=True)
tx_receipt = get_result(r.content)
r.close()
if tx_receipt['contractAddress'] == None:
continue
self.data_getCode['params'][0] = tx_receipt['contractAddress']
r = self.sess.get(url, data=json.dumps(self.data_getCode), allow_redirects=True)
code = get_result(r.content)
r.close()
if len(code) > 2:
self.data_get_TX['params'] = [tx['hash']]
r = self.sess.get(url, data=json.dumps(self.data_get_TX), allow_redirects=True)
tx_detail = get_result(r.content)
r.close()
tx_input = tx_detail['input']
# init_data = tx_input[:len(tx_input)-len(code)+2]
self.list_contract[tx_receipt['contractAddress']] = [tx_input, code, tx['hash']]
# Print the last run
print 'Thread ' + str(self.index) + ' is processing block: ' + str(i)
print "Number of contracts in Thread " + str(self.index) + " so far: " + str(len(self.list_contract))
with open('contract_' + str(self.high) + '.json', 'w') as outfile:
json.dump(self.list_contract, outfile)
self.list_contract.clear()
list_threads = []
try:
for i in range(0, 4):
new_thread = MyThread(i)
list_threads.append(new_thread)
for my_thread in list_threads:
my_thread.start()
except Exception as e:
print e
print "Error: unable to start thread" | 0.080936 | 0.089097 |
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from pyspark import sql
from pyspark.sql import functions
from cape_privacy.spark import dtypes
from cape_privacy.spark.transformations import base
from cape_privacy.utils import typecheck
_FREQUENCY_TO_DELTA_FN = {
"YEAR": lambda noise: pd.to_timedelta(noise * 365, unit="days"),
"MONTH": lambda noise: pd.to_timedelta(noise * 30, unit="days"),
"DAY": lambda noise: pd.to_timedelta(noise, unit="days"),
"HOUR": lambda noise: pd.to_timedelta(noise, unit="hours"),
"minutes": lambda noise: pd.to_timedelta(noise, unit="minutes"),
"seconds": lambda noise: pd.to_timedelta(noise, unit="seconds"),
}
IntTuple = Union[int, Tuple[int, ...]]
StrTuple = Union[str, Tuple[str, ...]]
class NumericPerturbation(base.Transformation):
"""Add uniform random noise to a numeric series
Mask a numeric series by adding uniform random noise to each value.
The amount of noise is drawn from the interval [min, max).
Attributes:
dtype (dtypes.Numerics): series type
min (int, float): the values generated will be greater or equal to min
max (int, float): the values generated will be less than max
seed (int), optional: a seed to initialize the random generator
"""
identifier = "numeric-perturbation"
type_signature = "col->col"
def __init__(
self,
dtype: dtypes.DType,
min: (int, float),
max: (int, float),
seed: Optional[int] = None,
):
assert dtype in dtypes.Numerics
typecheck.check_arg(min, (int, float))
typecheck.check_arg(max, (int, float))
typecheck.check_arg(seed, (int, type(None)))
super().__init__(dtype)
self._min = min
self._max = max
self._seed = seed
def __call__(self, x: sql.Column):
uniform_noise = functions.rand(seed=self._seed)
if self._seed is not None:
self._seed += 1
affine_noise = self._min + uniform_noise * (self._max - self._min)
if self._dtype is not dtypes.Double:
affine_noise = affine_noise.astype(self._dtype)
return x + affine_noise
class DatePerturbation(base.Transformation):
"""Add uniform random noise to a Pandas series of timestamps
Mask a series by adding uniform random noise to the specified
frequencies of timestamps. The amount of noise for each frequency
is drawn from the internal [min_freq, max_freq).
Note that seeds are currently not supported.
Attributes:
frequency (str, str list): one or more frequencies to perturbate
min (int, int list): the frequency value will be greater or equal to min
max (int, int list): the frequency value will be less than max
"""
identifier = "date-perturbation"
type_signature = "col->col"
def __init__(
self, frequency: StrTuple, min: IntTuple, max: IntTuple,
):
super().__init__(dtypes.Date)
self._frequency = _check_freq_arg(frequency)
self._min = _check_minmax_arg(min)
self._max = _check_minmax_arg(max)
self._perturb_date = None
def __call__(self, x: sql.Column):
if self._perturb_date is None:
self._perturb_date = self._make_perturb_udf()
return self._perturb_date(x)
def _make_perturb_udf(self):
@functions.pandas_udf(dtypes.Date)
def perturb_date(x: pd.Series) -> pd.Series:
rng = np.random.default_rng()
for f, mn, mx in zip(self._frequency, self._min, self._max):
# TODO can we switch to a lower dtype than np.int64?
noise = rng.integers(mn, mx, size=x.shape)
delta_fn = _FREQUENCY_TO_DELTA_FN.get(f, None)
if delta_fn is None:
raise ValueError(
"Frequency {} must be one of {}.".format(
f, list(_FREQUENCY_TO_DELTA_FN.keys())
)
)
x += delta_fn(noise)
return x
return perturb_date
def _check_minmax_arg(arg):
"""Checks that arg is an integer or a flat collection of integers."""
if not isinstance(arg, (tuple, list)):
if not isinstance(arg, int):
raise ValueError
return [arg]
else:
for a in arg:
if not isinstance(a, int):
raise ValueError
return arg
def _check_freq_arg(arg):
"""Checks that arg is string or a flat collection of strings."""
if not isinstance(arg, (tuple, list)):
if not isinstance(arg, str):
raise ValueError
return [arg]
else:
for a in arg:
if not isinstance(a, str):
raise ValueError
return arg | cape_privacy/spark/transformations/perturbation.py | from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from pyspark import sql
from pyspark.sql import functions
from cape_privacy.spark import dtypes
from cape_privacy.spark.transformations import base
from cape_privacy.utils import typecheck
_FREQUENCY_TO_DELTA_FN = {
"YEAR": lambda noise: pd.to_timedelta(noise * 365, unit="days"),
"MONTH": lambda noise: pd.to_timedelta(noise * 30, unit="days"),
"DAY": lambda noise: pd.to_timedelta(noise, unit="days"),
"HOUR": lambda noise: pd.to_timedelta(noise, unit="hours"),
"minutes": lambda noise: pd.to_timedelta(noise, unit="minutes"),
"seconds": lambda noise: pd.to_timedelta(noise, unit="seconds"),
}
IntTuple = Union[int, Tuple[int, ...]]
StrTuple = Union[str, Tuple[str, ...]]
class NumericPerturbation(base.Transformation):
"""Add uniform random noise to a numeric series
Mask a numeric series by adding uniform random noise to each value.
The amount of noise is drawn from the interval [min, max).
Attributes:
dtype (dtypes.Numerics): series type
min (int, float): the values generated will be greater or equal to min
max (int, float): the values generated will be less than max
seed (int), optional: a seed to initialize the random generator
"""
identifier = "numeric-perturbation"
type_signature = "col->col"
def __init__(
self,
dtype: dtypes.DType,
min: (int, float),
max: (int, float),
seed: Optional[int] = None,
):
assert dtype in dtypes.Numerics
typecheck.check_arg(min, (int, float))
typecheck.check_arg(max, (int, float))
typecheck.check_arg(seed, (int, type(None)))
super().__init__(dtype)
self._min = min
self._max = max
self._seed = seed
def __call__(self, x: sql.Column):
uniform_noise = functions.rand(seed=self._seed)
if self._seed is not None:
self._seed += 1
affine_noise = self._min + uniform_noise * (self._max - self._min)
if self._dtype is not dtypes.Double:
affine_noise = affine_noise.astype(self._dtype)
return x + affine_noise
class DatePerturbation(base.Transformation):
"""Add uniform random noise to a Pandas series of timestamps
Mask a series by adding uniform random noise to the specified
frequencies of timestamps. The amount of noise for each frequency
is drawn from the internal [min_freq, max_freq).
Note that seeds are currently not supported.
Attributes:
frequency (str, str list): one or more frequencies to perturbate
min (int, int list): the frequency value will be greater or equal to min
max (int, int list): the frequency value will be less than max
"""
identifier = "date-perturbation"
type_signature = "col->col"
def __init__(
self, frequency: StrTuple, min: IntTuple, max: IntTuple,
):
super().__init__(dtypes.Date)
self._frequency = _check_freq_arg(frequency)
self._min = _check_minmax_arg(min)
self._max = _check_minmax_arg(max)
self._perturb_date = None
def __call__(self, x: sql.Column):
if self._perturb_date is None:
self._perturb_date = self._make_perturb_udf()
return self._perturb_date(x)
def _make_perturb_udf(self):
@functions.pandas_udf(dtypes.Date)
def perturb_date(x: pd.Series) -> pd.Series:
rng = np.random.default_rng()
for f, mn, mx in zip(self._frequency, self._min, self._max):
# TODO can we switch to a lower dtype than np.int64?
noise = rng.integers(mn, mx, size=x.shape)
delta_fn = _FREQUENCY_TO_DELTA_FN.get(f, None)
if delta_fn is None:
raise ValueError(
"Frequency {} must be one of {}.".format(
f, list(_FREQUENCY_TO_DELTA_FN.keys())
)
)
x += delta_fn(noise)
return x
return perturb_date
def _check_minmax_arg(arg):
"""Checks that arg is an integer or a flat collection of integers."""
if not isinstance(arg, (tuple, list)):
if not isinstance(arg, int):
raise ValueError
return [arg]
else:
for a in arg:
if not isinstance(a, int):
raise ValueError
return arg
def _check_freq_arg(arg):
"""Checks that arg is string or a flat collection of strings."""
if not isinstance(arg, (tuple, list)):
if not isinstance(arg, str):
raise ValueError
return [arg]
else:
for a in arg:
if not isinstance(a, str):
raise ValueError
return arg | 0.897741 | 0.624637 |
import random
regs = ['ra', 'rb', 'rc', 'rd', 're']
def generate_imm():
return hex(random.randint(0, 0xffffffffffffffff))
def generate_mpc():
if random.randint(0, 1) == 0:
return 'mpc {}'.format(random.choice(regs))
else:
return 'mpc {} #{}'.format(random.choice(regs), generate_imm())
def generate_enq():
if random.randint(0, 1) == 0:
return 'enq {}'.format(random.choice(regs))
else:
return 'enq {} #{}'.format(random.choice(regs), generate_imm())
def generate_deq():
ch = random.randint(0, 2)
if ch == 0:
return 'deq'
elif ch == 1:
return 'deq {}'.format(random.choice(regs))
else:
return 'deq {} #{}'.format(random.choice(regs), generate_imm())
def generate_jsz():
return 'jsz {} {} {}'.format(random.choice(regs), random.choice(regs), random.choice(regs))
def generate_allrmprcivri():
if random.randint(0, 1) == 0:
return 'allrmprcivri {} {} {}'.format(random.choice(regs), random.choice(regs), random.choice(regs))
else:
return 'allrmprcivri {} #{} #{}'.format(random.choice(regs), generate_imm(), generate_imm())
def generate_mooq():
return 'mooq'
def generate_rv():
if random.randint(0, 1) == 0:
return 'rv {} {}'.format(random.choice(regs), random.choice(regs))
else:
return 'rv {} {} #{}'.format(random.choice(regs), random.choice(regs), generate_imm())
def generate_lar():
return 'lar {} #{}'.format(random.choice(regs), generate_imm())
def generate_aml():
ch = random.randint(0, 2)
if ch == 0:
return 'aml'
elif ch == 1:
return 'aml {}'.format(random.choice(regs))
else:
return 'aml #{}'.format(generate_imm())
def generate_gml():
if random.randint(0, 1) == 0:
return 'gml {}'.format(random.choice(regs))
else:
return 'gml #{}'.format(generate_imm())
def generate_sq():
if random.randint(0, 1) == 0:
return 'sq {}'.format(random.choice(regs))
else:
return 'sq #{}'.format(generate_imm())
funcs = [generate_mpc, generate_enq, generate_deq, generate_jsz, generate_allrmprcivri, generate_mooq, generate_rv, generate_lar, generate_aml, generate_gml, generate_sq]
for i in range(0x1337):
print(random.choice(funcs)() + ";") | b01lers-ctf-2020/300_railed/src/generate_random_instructions.py | import random
regs = ['ra', 'rb', 'rc', 'rd', 're']
def generate_imm():
return hex(random.randint(0, 0xffffffffffffffff))
def generate_mpc():
if random.randint(0, 1) == 0:
return 'mpc {}'.format(random.choice(regs))
else:
return 'mpc {} #{}'.format(random.choice(regs), generate_imm())
def generate_enq():
if random.randint(0, 1) == 0:
return 'enq {}'.format(random.choice(regs))
else:
return 'enq {} #{}'.format(random.choice(regs), generate_imm())
def generate_deq():
ch = random.randint(0, 2)
if ch == 0:
return 'deq'
elif ch == 1:
return 'deq {}'.format(random.choice(regs))
else:
return 'deq {} #{}'.format(random.choice(regs), generate_imm())
def generate_jsz():
return 'jsz {} {} {}'.format(random.choice(regs), random.choice(regs), random.choice(regs))
def generate_allrmprcivri():
if random.randint(0, 1) == 0:
return 'allrmprcivri {} {} {}'.format(random.choice(regs), random.choice(regs), random.choice(regs))
else:
return 'allrmprcivri {} #{} #{}'.format(random.choice(regs), generate_imm(), generate_imm())
def generate_mooq():
return 'mooq'
def generate_rv():
if random.randint(0, 1) == 0:
return 'rv {} {}'.format(random.choice(regs), random.choice(regs))
else:
return 'rv {} {} #{}'.format(random.choice(regs), random.choice(regs), generate_imm())
def generate_lar():
return 'lar {} #{}'.format(random.choice(regs), generate_imm())
def generate_aml():
ch = random.randint(0, 2)
if ch == 0:
return 'aml'
elif ch == 1:
return 'aml {}'.format(random.choice(regs))
else:
return 'aml #{}'.format(generate_imm())
def generate_gml():
if random.randint(0, 1) == 0:
return 'gml {}'.format(random.choice(regs))
else:
return 'gml #{}'.format(generate_imm())
def generate_sq():
if random.randint(0, 1) == 0:
return 'sq {}'.format(random.choice(regs))
else:
return 'sq #{}'.format(generate_imm())
funcs = [generate_mpc, generate_enq, generate_deq, generate_jsz, generate_allrmprcivri, generate_mooq, generate_rv, generate_lar, generate_aml, generate_gml, generate_sq]
for i in range(0x1337):
print(random.choice(funcs)() + ";") | 0.115025 | 0.200969 |
import torch
import torchvision
import torchvision.transforms as transforms
class BinaryDataset(torch.utils.data.Dataset):
def __init__(self, root, transform=None, return_idx=False):
x, y = torch.load(root)
self.data = x
self.labels = y
self.root = root
self.transform = transform
self.return_idx = return_idx
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x_t = self.data[idx].type(torch.float)
y_t = self.labels[idx]
if self.transform:
x_t = self.transform(x_t)
if self.return_idx:
return (x_t, y_t, idx)
else:
return (x_t, y_t)
class IndexedDataset(torch.utils.data.Dataset):
"""
Wraps another dataset to sample from. Returns the sampled indices during iteration.
In other words, instead of producing (X, y) it produces (X, y, idx)
source: https://github.com/tneumann/minimal_glo/blob/master/glo.py
"""
def __init__(self, base_dataset):
self.base = base_dataset
def __len__(self):
return len(self.base)
def __getitem__(self, idx):
img, label = self.base[idx]
return (img, label, idx)
def get_dataset(name, batch_size, test_batch=10000, embedding=False, return_idx=False):
if name == 'mnist':
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
trainset = torchvision.datasets.MNIST(root='./data',train=True,download=True,transform=transform)
testset = torchvision.datasets.MNIST(root='./data',train=False,download=True,transform=transform)
if return_idx:
trainset = IndexedDataset(trainset)
testset = IndexedDataset(testset)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 60000
test_size = 10000
num_of_classes = 10
elif name == 'emnist':
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
trainset = torchvision.datasets.EMNIST(root='./data',train=True,split='balanced',download=True,transform=transform)
testset = torchvision.datasets.EMNIST(root='./data',train=False,split='balanced',download=True,transform=transform)
trainset.train_data = trainset.train_data.permute(0, 2, 1)
testset.test_data = testset.test_data.permute(0, 2, 1)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 112800
test_size = 10000
num_of_classes = 47
elif name == 'fashion':
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
trainset = torchvision.datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 60000
test_size = 10000
num_of_classes = 10
elif name == 'cifar':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
trainset = torchvision.datasets.CIFAR10(root='./data',train=True,download=True,transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data',train=False,download=True,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 50000
test_size = 10000
num_of_classes = 10
elif name == 'stl':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
trainset = torchvision.datasets.STL10(root='./data', split='train', download=True,transform=transform)
testset = torchvision.datasets.STL10(root='./data', split='test', download=True,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 5000
test_size = 8000
num_of_classes = 10
else:
if embedding:
X = torch.load(name)
dev = torch.device('cuda:0')
X = X.to(dev)
mu = X.mean(dim=0)
std = X.std(dim=0)
X = ((X-mu)/std).cpu()
dataset_size = X.shape[0]
Y = torch.zeros(dataset_size, dtype=torch.int)
dataset = torch.utils.data.TensorDataset(X, Y)
else:
dataset = BinaryDataset(name, transform=transforms.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]), return_idx=return_idx)
dataset_size = dataset.__len__()
R = torch.randperm(dataset_size)
train_indices = torch.utils.data.SubsetRandomSampler(R[test_batch:])
test_indices = torch.utils.data.SubsetRandomSampler(R[:test_batch])
trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_indices)
testloader = torch.utils.data.DataLoader(dataset, batch_size=100, sampler=test_indices)
num_of_classes = 1
train_size = dataset_size-test_batch
test_size = test_batch
if embedding:
return trainloader, testloader, train_size, test_size, num_of_classes, mu, std
else:
return trainloader, testloader, train_size, test_size, num_of_classes | dataset.py | import torch
import torchvision
import torchvision.transforms as transforms
class BinaryDataset(torch.utils.data.Dataset):
def __init__(self, root, transform=None, return_idx=False):
x, y = torch.load(root)
self.data = x
self.labels = y
self.root = root
self.transform = transform
self.return_idx = return_idx
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x_t = self.data[idx].type(torch.float)
y_t = self.labels[idx]
if self.transform:
x_t = self.transform(x_t)
if self.return_idx:
return (x_t, y_t, idx)
else:
return (x_t, y_t)
class IndexedDataset(torch.utils.data.Dataset):
"""
Wraps another dataset to sample from. Returns the sampled indices during iteration.
In other words, instead of producing (X, y) it produces (X, y, idx)
source: https://github.com/tneumann/minimal_glo/blob/master/glo.py
"""
def __init__(self, base_dataset):
self.base = base_dataset
def __len__(self):
return len(self.base)
def __getitem__(self, idx):
img, label = self.base[idx]
return (img, label, idx)
def get_dataset(name, batch_size, test_batch=10000, embedding=False, return_idx=False):
if name == 'mnist':
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
trainset = torchvision.datasets.MNIST(root='./data',train=True,download=True,transform=transform)
testset = torchvision.datasets.MNIST(root='./data',train=False,download=True,transform=transform)
if return_idx:
trainset = IndexedDataset(trainset)
testset = IndexedDataset(testset)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 60000
test_size = 10000
num_of_classes = 10
elif name == 'emnist':
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
trainset = torchvision.datasets.EMNIST(root='./data',train=True,split='balanced',download=True,transform=transform)
testset = torchvision.datasets.EMNIST(root='./data',train=False,split='balanced',download=True,transform=transform)
trainset.train_data = trainset.train_data.permute(0, 2, 1)
testset.test_data = testset.test_data.permute(0, 2, 1)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 112800
test_size = 10000
num_of_classes = 47
elif name == 'fashion':
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
trainset = torchvision.datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 60000
test_size = 10000
num_of_classes = 10
elif name == 'cifar':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
trainset = torchvision.datasets.CIFAR10(root='./data',train=True,download=True,transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data',train=False,download=True,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 50000
test_size = 10000
num_of_classes = 10
elif name == 'stl':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
trainset = torchvision.datasets.STL10(root='./data', split='train', download=True,transform=transform)
testset = torchvision.datasets.STL10(root='./data', split='test', download=True,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, num_workers=4)
train_size = 5000
test_size = 8000
num_of_classes = 10
else:
if embedding:
X = torch.load(name)
dev = torch.device('cuda:0')
X = X.to(dev)
mu = X.mean(dim=0)
std = X.std(dim=0)
X = ((X-mu)/std).cpu()
dataset_size = X.shape[0]
Y = torch.zeros(dataset_size, dtype=torch.int)
dataset = torch.utils.data.TensorDataset(X, Y)
else:
dataset = BinaryDataset(name, transform=transforms.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]), return_idx=return_idx)
dataset_size = dataset.__len__()
R = torch.randperm(dataset_size)
train_indices = torch.utils.data.SubsetRandomSampler(R[test_batch:])
test_indices = torch.utils.data.SubsetRandomSampler(R[:test_batch])
trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_indices)
testloader = torch.utils.data.DataLoader(dataset, batch_size=100, sampler=test_indices)
num_of_classes = 1
train_size = dataset_size-test_batch
test_size = test_batch
if embedding:
return trainloader, testloader, train_size, test_size, num_of_classes, mu, std
else:
return trainloader, testloader, train_size, test_size, num_of_classes | 0.912801 | 0.675737 |
import json
__author__ = '<NAME>'
class SiteInfo:
def __init__(self,dataname='SiteData',sitedatafile=[]):
"""
__init__: initialization
"""
self.sitedatafile = sitedatafile
self.nCase = 0
self.nameCase = []
self.SiteCase = {}
self.__load_data()
def __load_data(self):
"""
__loadata: loading site data
"""
print("Loading site data.")
# Site data
if len(self.sitedatafile):
with open(self.sitedatafile) as f:
data = json.load(f)
self.nCase = data['Number of cases']
self.nameCase = data['Case name']
for tagcase in self.nameCase:
self.SiteCase[tagcase] = data[tagcase]
print("Site data loaded.")
def add_case(self,sitedatafile=[]):
"""
add_case: adding cases into the current site data
"""
print("Adding case(s).")
# Site data
if len(self.sitedatafile):
with open(self.sitedatafile) as f:
data = json.load(f)
for tagcase in data['Case name']:
# checking any duplication
if tagcase in self.nameCase:
print("Case name already existed: "+tagcase+".")
return
else:
self.nameCase.append(tagcase)
self.SiteCase[tagcase] = data[tagcase]
self.nCase = self.nCase+1
print("Case: "+tagcase+" added.")
def remove_case(self,casename=[]):
"""
remove_case: removing cases from the current site data
"""
print("Removing case(s).")
# Site data
for tagcase in casename:
# checking any duplication
if tagcase in self.nameCase:
self.nameCase.remove(tagcase)
del self.SiteCase[tagcase]
self.nCase = self.nCase-1
print("Case: "+tagcase+" removed.")
else:
print("Case does not exist: "+tagcase)
return | pyhca/SiteSpecificInformation.py |
import json
__author__ = '<NAME>'
class SiteInfo:
def __init__(self,dataname='SiteData',sitedatafile=[]):
"""
__init__: initialization
"""
self.sitedatafile = sitedatafile
self.nCase = 0
self.nameCase = []
self.SiteCase = {}
self.__load_data()
def __load_data(self):
"""
__loadata: loading site data
"""
print("Loading site data.")
# Site data
if len(self.sitedatafile):
with open(self.sitedatafile) as f:
data = json.load(f)
self.nCase = data['Number of cases']
self.nameCase = data['Case name']
for tagcase in self.nameCase:
self.SiteCase[tagcase] = data[tagcase]
print("Site data loaded.")
def add_case(self,sitedatafile=[]):
"""
add_case: adding cases into the current site data
"""
print("Adding case(s).")
# Site data
if len(self.sitedatafile):
with open(self.sitedatafile) as f:
data = json.load(f)
for tagcase in data['Case name']:
# checking any duplication
if tagcase in self.nameCase:
print("Case name already existed: "+tagcase+".")
return
else:
self.nameCase.append(tagcase)
self.SiteCase[tagcase] = data[tagcase]
self.nCase = self.nCase+1
print("Case: "+tagcase+" added.")
def remove_case(self,casename=[]):
"""
remove_case: removing cases from the current site data
"""
print("Removing case(s).")
# Site data
for tagcase in casename:
# checking any duplication
if tagcase in self.nameCase:
self.nameCase.remove(tagcase)
del self.SiteCase[tagcase]
self.nCase = self.nCase-1
print("Case: "+tagcase+" removed.")
else:
print("Case does not exist: "+tagcase)
return | 0.089318 | 0.166404 |
from mvnc import mvncapi as mvnc
import NeuralNetwork
import cv2
import argparse
import time
import threading
#Argument parser
arg = argparse.ArgumentParser()
arg.add_argument("-m", "--mode", required=True, type=str, default="image", help="Mode of Neural Network, options: image, video")
arg.add_argument("-n", "--num", required=False, type=int, default=1, help="Number of NCS you want to use")
arg.add_argument("-i", "--image", required=False, type=str, help="The path to the image you want to process")
arg.add_argument("-v", "--video", required=False, help="The path to the video you want to process or enter a integer if you want to use your webcam")
args = vars( arg.parse_args() )
#Neural network
video_mode = True if args["mode"] == "video" else False
NN = NeuralNetwork.Net( video = video_mode )
#Intel's Neural Compute Stick
mvnc.global_set_option( mvnc.GlobalOption.RW_LOG_LEVEL, 2 )
devices = mvnc.enumerate_devices()
if len(devices) == 0:
print( "No devices found..." )
quit()
elif args["num"] > len(devices):
print( "There aren't that many NCS's available..." )
quit()
elif args["num"] == 0:
print( "One NCS is required to run..." )
quit()
with open( './model/graph', mode='rb' ) as f:
graphfile = f.read()
graph = mvnc.Graph( 'graph' )
class feed_forward_thread( threading.Thread ):
def __init__( self, device, args, NN, graph, delay=0, video=False ):
threading.Thread.__init__( self )
self.device = None
self.fifoIn = None
self.fifoOut = None
self.video_mode = video
self.args = args
self.NN = NN
self.graph = graph
self.delay = delay
self.open_device_load_graph( device )
def open_device_load_graph( self, device ):
self.device = mvnc.Device( device )
self.device.open()
self.fifoIn, self.fifoOut = self.graph.allocate_with_fifos( self.device, graphfile )
def run( self ):
if self.delay > 0:
time.sleep( self.delay )
if self.video_mode:
fps = 0.0
#Webcam mode, else video file mode
if self.args["video"].isdigit():
self.args["video"] = int( self.args["video"])
cap = cv2.VideoCapture( self.args["video"] )
while True:
start = time.time()
ret, display_image = cap.read()
if not ret:
print( "No image found from source, exiting" )
break
output_image, boxes = self.run_interference( display_image )
fps = ( fps + ( 1 / (time.time() - start) ) ) / 2
output_image = cv2.putText( output_image, "fps: {:.1f}".format(fps), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, 4 )
cv2.imshow( self.NN.cv_window_name, output_image )
if cv2.getWindowProperty( self.NN.cv_window_name, cv2.WND_PROP_ASPECT_RATIO ) < 0.0:
print( "Window closed" )
break
elif cv2.waitKey( 1 ) & 0xFF == ord( 'q' ):
print( "Q pressed" )
break
cap.release()
cv2.destroyAllWindows()
else:
start = time.time()
image = cv2.imread( self.args["image"] )
output_image, boxes = self.run_interference( image )
print( "Time took: {:.1f} sec".format(time.time() - start) )
cv2.imshow( self.NN.cv_window_name, output_image )
cv2.waitKey( 0 )
#Close device and with it the thread
self.graph.destroy()
self.fifoIn.destroy()
self.fifoOut.destroy()
self.device.close()
def run_interference( self, image ):
resize_image, inputs = self.NN.preproces_image( image )
self.graph.queue_inference_with_fifo_elem( self.fifoIn, self.fifoOut, inputs, 'user object' )
prediction, _ = self.fifoOut.read_elem()
return self.NN.postprocess( prediction, resize_image, 0.3, 0.3 )
#Run script
threads = []
delay = 0
for i in range(args["num"]):
threads.append( feed_forward_thread( devices[i], args, NN, graph, delay=delay, video=video_mode) )
delay += (170/(args["num"]*(i+1)))
#run thread
for thread in threads:
thread.start()
#wait until threads are done
for thread in threads:
thread.join()
#Done!!
print('Finished') | run.py | from mvnc import mvncapi as mvnc
import NeuralNetwork
import cv2
import argparse
import time
import threading
#Argument parser
arg = argparse.ArgumentParser()
arg.add_argument("-m", "--mode", required=True, type=str, default="image", help="Mode of Neural Network, options: image, video")
arg.add_argument("-n", "--num", required=False, type=int, default=1, help="Number of NCS you want to use")
arg.add_argument("-i", "--image", required=False, type=str, help="The path to the image you want to process")
arg.add_argument("-v", "--video", required=False, help="The path to the video you want to process or enter a integer if you want to use your webcam")
args = vars( arg.parse_args() )
#Neural network
video_mode = True if args["mode"] == "video" else False
NN = NeuralNetwork.Net( video = video_mode )
#Intel's Neural Compute Stick
mvnc.global_set_option( mvnc.GlobalOption.RW_LOG_LEVEL, 2 )
devices = mvnc.enumerate_devices()
if len(devices) == 0:
print( "No devices found..." )
quit()
elif args["num"] > len(devices):
print( "There aren't that many NCS's available..." )
quit()
elif args["num"] == 0:
print( "One NCS is required to run..." )
quit()
with open( './model/graph', mode='rb' ) as f:
graphfile = f.read()
graph = mvnc.Graph( 'graph' )
class feed_forward_thread( threading.Thread ):
def __init__( self, device, args, NN, graph, delay=0, video=False ):
threading.Thread.__init__( self )
self.device = None
self.fifoIn = None
self.fifoOut = None
self.video_mode = video
self.args = args
self.NN = NN
self.graph = graph
self.delay = delay
self.open_device_load_graph( device )
def open_device_load_graph( self, device ):
self.device = mvnc.Device( device )
self.device.open()
self.fifoIn, self.fifoOut = self.graph.allocate_with_fifos( self.device, graphfile )
def run( self ):
if self.delay > 0:
time.sleep( self.delay )
if self.video_mode:
fps = 0.0
#Webcam mode, else video file mode
if self.args["video"].isdigit():
self.args["video"] = int( self.args["video"])
cap = cv2.VideoCapture( self.args["video"] )
while True:
start = time.time()
ret, display_image = cap.read()
if not ret:
print( "No image found from source, exiting" )
break
output_image, boxes = self.run_interference( display_image )
fps = ( fps + ( 1 / (time.time() - start) ) ) / 2
output_image = cv2.putText( output_image, "fps: {:.1f}".format(fps), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, 4 )
cv2.imshow( self.NN.cv_window_name, output_image )
if cv2.getWindowProperty( self.NN.cv_window_name, cv2.WND_PROP_ASPECT_RATIO ) < 0.0:
print( "Window closed" )
break
elif cv2.waitKey( 1 ) & 0xFF == ord( 'q' ):
print( "Q pressed" )
break
cap.release()
cv2.destroyAllWindows()
else:
start = time.time()
image = cv2.imread( self.args["image"] )
output_image, boxes = self.run_interference( image )
print( "Time took: {:.1f} sec".format(time.time() - start) )
cv2.imshow( self.NN.cv_window_name, output_image )
cv2.waitKey( 0 )
#Close device and with it the thread
self.graph.destroy()
self.fifoIn.destroy()
self.fifoOut.destroy()
self.device.close()
def run_interference( self, image ):
resize_image, inputs = self.NN.preproces_image( image )
self.graph.queue_inference_with_fifo_elem( self.fifoIn, self.fifoOut, inputs, 'user object' )
prediction, _ = self.fifoOut.read_elem()
return self.NN.postprocess( prediction, resize_image, 0.3, 0.3 )
#Run script
threads = []
delay = 0
for i in range(args["num"]):
threads.append( feed_forward_thread( devices[i], args, NN, graph, delay=delay, video=video_mode) )
delay += (170/(args["num"]*(i+1)))
#run thread
for thread in threads:
thread.start()
#wait until threads are done
for thread in threads:
thread.join()
#Done!!
print('Finished') | 0.143023 | 0.138695 |
import os
import re
from .ply import lex, yacc
from collections import OrderedDict
import sublime
class Parser:
"""
Base class for a lexer/parser that has the rules defined as methods
"""
tokens = ()
precedence = ()
def __init__(self, **kw):
self.debug = kw.get('debug', 0)
self.names = {}
try:
modname = os.path.split(os.path.splitext(__file__)[0])[
1] + "_" + self.__class__.__name__
except:
modname = "parser" + "_" + self.__class__.__name__
self.debugfile = modname + ".dbg"
# print self.debugfile
# Build the lexer and parser
lex.lex(module=self, debug=self.debug)
yacc.yacc(module=self,
debug=self.debug,
debugfile=self.debugfile)
def parse(self, s):
return yacc.parse(s)
class ProtoParser(Parser):
tokens = (
'BOOL', 'NAME', 'FLOAT', 'INTEGER', 'STRING'
)
literals = ['{', '}', '[', ']', ':']
# Tokens
t_BOOL = r'true|false'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))?)|((\d+)e(\+|-)?(\d+))([lL]|[fF])'
t_INTEGER = r'-?([0-9]+)(\.[0-9]+)?([eE][-+]?[0-9]+)?'
def t_STRING(self, t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
def xint(s):
is_hex = False
if s[0] in ('x', 'X'):
s = s[1:]
is_hex = True
return int(s, 16 if is_hex else 8)
def byterepl(m):
s = m.group(0).split('\\')[1:]
b = bytearray()
b.extend(map(xint, s))
try:
return b.decode()
except UnicodeError as err:
print(f'{m.group(0) = }\n{err = }')
return m.group(0)
# Transform octal '\nnn' or hex '\xnn' byte sequences to string object
t.value = re.sub(r'((\\[0-7]{3})|(\\x[\da-fA-F]{2}))+', byterepl, t.value)
return t
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
print("Illegal character '%s'" % t.value[0])
# Parsing rules
def p_statement_expr(self, p):
"""statement : pair_list
| object"""
p[0] = p[1]
def p_expression_key(self, p):
"""key : NAME
| INTEGER"""
p[0] = p[1]
def p_expression_literal(self, p):
"""literal : NAME
| BOOL
| FLOAT
| INTEGER
| STRING"""
# NAME support enum
p[0] = p[1]
def p_expression_pair(self, p):
"""pair : key ':' literal
| key object"""
if p[2] == ':':
p[0] = OrderedDict({p[1]: p[3]})
else:
p[0] = OrderedDict({p[1]: p[2]})
def p_expression_pair_list(self, p):
"""pair_list : pair
| pair_list pair"""
p[0] = p[1]
if len(p) <= 2:
return
for k, v in p[2].items():
if k not in p[0]:
p[0][k] = v
elif isinstance(p[0][k], list):
p[0][k].append(v)
else:
p[0][k] = [p[0][k], v]
def p_expression_object(self, p):
"""object : '{' '}'
| '{' pair_list '}'"""
if p[2] == '}':
p[0] = OrderedDict()
else:
p[0] = p[2]
def p_error(self, p):
if p:
print("Syntax error at '%s'" % p.value)
else:
print("Syntax error at EOF")
class ProtoSettings:
__instance = None
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super().__new__(cls)
return cls.__instance
def __init__(self):
self.__settings = sublime.load_settings('Pretty Protobuf.sublime-settings')
self.__spaces = self.__settings.get('indent', 4)
self.__sort_keys = self.__settings.get('sort_keys', False)
self.__use_entire_file = self.__settings.get('use_entire_file_if_no_selection', True)
self.__clang_format_path = self.__settings.get('clang_format_path', '')
@property
def spaces(self):
return self.__spaces
@property
def sort_keys(self):
return self.__sort_keys
@property
def use_entire_file(self):
return self.__use_entire_file
@property
def clang_format_path(self):
return self.__clang_format_path or 'clang-format'
class DictFormatter:
def __init__(self, obj):
self.__settings = ProtoSettings()
self.__obj = obj
self.__lst = []
self.__seperator = ' '
def format(self):
self.__format('', self.__obj)
return '\n'.join(self.__lst)
def __format(self, name, obj, times=0):
if isinstance(obj, dict):
spaces = self.__seperator * times
self.__append(f'{spaces}{name} {{' if name else f'{spaces}{{')
if self.__settings.sort_keys:
obj = dict(sorted(obj.items(), key=lambda x: x[0]))
for k, v in obj.items():
self.__format(k, v, times + self.__settings.spaces)
self.__append(f'{spaces}}}')
elif isinstance(obj, list):
for item in obj:
self.__format(name, item, times)
elif isinstance(obj, str):
self.__append(f'{self.__seperator * times}{name}: {obj}')
else:
pass
def __append(self, s):
self.__lst.append(s)
class ProtoFormatter:
parser = ProtoParser()
def __init__(self, debug_str):
# Keep original debug string
self.__debug_string = debug_str
def format(self):
try:
obj = self.parser.parse(self.__debug_string)
return DictFormatter(obj).format()
except lex.LexError as err:
print(f'{self.__debug_string = }\n{err = }')
return '' | proto_formatter.py |
import os
import re
from .ply import lex, yacc
from collections import OrderedDict
import sublime
class Parser:
"""
Base class for a lexer/parser that has the rules defined as methods
"""
tokens = ()
precedence = ()
def __init__(self, **kw):
self.debug = kw.get('debug', 0)
self.names = {}
try:
modname = os.path.split(os.path.splitext(__file__)[0])[
1] + "_" + self.__class__.__name__
except:
modname = "parser" + "_" + self.__class__.__name__
self.debugfile = modname + ".dbg"
# print self.debugfile
# Build the lexer and parser
lex.lex(module=self, debug=self.debug)
yacc.yacc(module=self,
debug=self.debug,
debugfile=self.debugfile)
def parse(self, s):
return yacc.parse(s)
class ProtoParser(Parser):
tokens = (
'BOOL', 'NAME', 'FLOAT', 'INTEGER', 'STRING'
)
literals = ['{', '}', '[', ']', ':']
# Tokens
t_BOOL = r'true|false'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))?)|((\d+)e(\+|-)?(\d+))([lL]|[fF])'
t_INTEGER = r'-?([0-9]+)(\.[0-9]+)?([eE][-+]?[0-9]+)?'
def t_STRING(self, t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
def xint(s):
is_hex = False
if s[0] in ('x', 'X'):
s = s[1:]
is_hex = True
return int(s, 16 if is_hex else 8)
def byterepl(m):
s = m.group(0).split('\\')[1:]
b = bytearray()
b.extend(map(xint, s))
try:
return b.decode()
except UnicodeError as err:
print(f'{m.group(0) = }\n{err = }')
return m.group(0)
# Transform octal '\nnn' or hex '\xnn' byte sequences to string object
t.value = re.sub(r'((\\[0-7]{3})|(\\x[\da-fA-F]{2}))+', byterepl, t.value)
return t
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
print("Illegal character '%s'" % t.value[0])
# Parsing rules
def p_statement_expr(self, p):
"""statement : pair_list
| object"""
p[0] = p[1]
def p_expression_key(self, p):
"""key : NAME
| INTEGER"""
p[0] = p[1]
def p_expression_literal(self, p):
"""literal : NAME
| BOOL
| FLOAT
| INTEGER
| STRING"""
# NAME support enum
p[0] = p[1]
def p_expression_pair(self, p):
"""pair : key ':' literal
| key object"""
if p[2] == ':':
p[0] = OrderedDict({p[1]: p[3]})
else:
p[0] = OrderedDict({p[1]: p[2]})
def p_expression_pair_list(self, p):
"""pair_list : pair
| pair_list pair"""
p[0] = p[1]
if len(p) <= 2:
return
for k, v in p[2].items():
if k not in p[0]:
p[0][k] = v
elif isinstance(p[0][k], list):
p[0][k].append(v)
else:
p[0][k] = [p[0][k], v]
def p_expression_object(self, p):
"""object : '{' '}'
| '{' pair_list '}'"""
if p[2] == '}':
p[0] = OrderedDict()
else:
p[0] = p[2]
def p_error(self, p):
if p:
print("Syntax error at '%s'" % p.value)
else:
print("Syntax error at EOF")
class ProtoSettings:
__instance = None
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super().__new__(cls)
return cls.__instance
def __init__(self):
self.__settings = sublime.load_settings('Pretty Protobuf.sublime-settings')
self.__spaces = self.__settings.get('indent', 4)
self.__sort_keys = self.__settings.get('sort_keys', False)
self.__use_entire_file = self.__settings.get('use_entire_file_if_no_selection', True)
self.__clang_format_path = self.__settings.get('clang_format_path', '')
@property
def spaces(self):
return self.__spaces
@property
def sort_keys(self):
return self.__sort_keys
@property
def use_entire_file(self):
return self.__use_entire_file
@property
def clang_format_path(self):
return self.__clang_format_path or 'clang-format'
class DictFormatter:
def __init__(self, obj):
self.__settings = ProtoSettings()
self.__obj = obj
self.__lst = []
self.__seperator = ' '
def format(self):
self.__format('', self.__obj)
return '\n'.join(self.__lst)
def __format(self, name, obj, times=0):
if isinstance(obj, dict):
spaces = self.__seperator * times
self.__append(f'{spaces}{name} {{' if name else f'{spaces}{{')
if self.__settings.sort_keys:
obj = dict(sorted(obj.items(), key=lambda x: x[0]))
for k, v in obj.items():
self.__format(k, v, times + self.__settings.spaces)
self.__append(f'{spaces}}}')
elif isinstance(obj, list):
for item in obj:
self.__format(name, item, times)
elif isinstance(obj, str):
self.__append(f'{self.__seperator * times}{name}: {obj}')
else:
pass
def __append(self, s):
self.__lst.append(s)
class ProtoFormatter:
parser = ProtoParser()
def __init__(self, debug_str):
# Keep original debug string
self.__debug_string = debug_str
def format(self):
try:
obj = self.parser.parse(self.__debug_string)
return DictFormatter(obj).format()
except lex.LexError as err:
print(f'{self.__debug_string = }\n{err = }')
return '' | 0.430267 | 0.172677 |
import os
import re
import logging
from unidecode import unidecode
from onecodex.exceptions import OneCodexException, UploadException
R1_FILENAME_RE = re.compile(".*[._][Rr]?[1][_.].*")
R2_FILENAME_RE = re.compile(".*[._][Rr]?[2][_.].*")
log = logging.getLogger("onecodex")
def _check_for_ascii_filename(filename, coerce_ascii):
"""Check that the filename is ASCII.
If it isn't, convert it to ASCII & return it if the ascii flag
has been set otherwise raise an exception.
"""
try:
# python2
ascii_fname = unidecode(unicode(filename))
except NameError:
ascii_fname = unidecode(filename)
if filename != ascii_fname:
if coerce_ascii:
# TODO: Consider warnings.warn here instead
log.warning(
"Renaming {} to {}, must be ASCII\n".format(filename.encode("utf-8"), ascii_fname)
)
filename = ascii_fname
else:
raise OneCodexException("Filenames must be ascii. Try using --coerce-ascii")
return filename
def get_fastx_format(file_path):
"""Return format of given file: fasta or fastq.
Assumes Illumina-style naming conventions where each file has _R1_ or _R2_ in its name.
If the file is not fasta or fastq, raises an exception
"""
new_filename, ext = os.path.splitext(os.path.basename(file_path))
if ext in {".gz", ".gzip", ".bz", ".bz2", ".bzip"}:
new_filename, ext = os.path.splitext(new_filename)
if ext in {".fa", ".fna", ".fasta"}:
return "fasta"
elif ext in {".fq", ".fastq"}:
return "fastq"
else:
raise UploadException(
"{}: extension must be one of .fa, .fna, .fasta, .fq, .fastq".format(file_path)
)
class FilePassthru(object):
"""Wrapper around `file` object that updates a progress bar and guesses mime-type.
Parameters
----------
file_path : `string`
Path to file.
progressbar : `click.progressbar`, optional
The progress bar to update.
"""
def __init__(self, file_path, progressbar=None):
self._fp = open(file_path, mode="rb")
self._fsize = os.path.getsize(file_path)
self.progressbar = progressbar
_, ext = os.path.splitext(file_path)
self.filename = os.path.basename(file_path)
if self._fsize == 0:
raise UploadException("{}: empty files can not be uploaded".format(self.filename))
if ext in {".gz", ".gzip"}:
self.mime_type = "application/x-gzip"
elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}:
self.mime_type = "application/x-bzip2"
else:
self.mime_type = "text/plain"
def read(self, size=-1):
bytes_read = self._fp.read(size)
if self.progressbar:
self.progressbar.update(len(bytes_read))
return bytes_read
def size(self):
return self._fsize
@property
def len(self):
"""Size of data left to be read."""
return self._fsize - self._fp.tell()
def seek(self, loc):
"""Seek to a position in the file.
Notes
-----
This is called if an upload fails and must be retried.
"""
assert loc == 0
# rewind progress bar
if self.progressbar:
self.progressbar.update(-self._fp.tell())
self._fp.seek(loc)
def close(self):
self._fp.close()
def enforce_ascii_filename(self, coerce_ascii):
"""Update the filename to be ASCII. Raises an exception if `coerce_ascii` is `False` and the filename is not ASCII."""
self.filename = _check_for_ascii_filename(self.filename, coerce_ascii)
class PairedEndFiles(object):
def __init__(self, files, progressbar=None):
if len(files) != 2:
raise OneCodexException("Paired files uploading can only take 2 files")
for f in files:
if get_fastx_format(f) != "fastq":
raise OneCodexException("Interleaving FASTA files is currently unsupported")
if R1_FILENAME_RE.match(files[0]) and R2_FILENAME_RE.match(files[1]):
file1 = files[0]
file2 = files[1]
elif R2_FILENAME_RE.match(files[0]) and R1_FILENAME_RE.match(files[1]):
file1 = files[1]
file2 = files[0]
else:
raise OneCodexException("Paired files need to have _R1/_1 and _R2/_2 in their name")
self.r1 = FilePassthru(file1, progressbar)
self.r2 = FilePassthru(file2, progressbar)
def enforce_ascii_filename(self, coerce_ascii):
self.r1.enforce_ascii_filename(coerce_ascii)
self.r2.enforce_ascii_filename(coerce_ascii)
def get_file_wrapper(file, coerce_ascii, bar):
"""Take a str or tuple (str) and return the corresponding file wrapper object.
If there is more than one file, it must be a paired end uploads and the filenames will be validated.
"""
if isinstance(file, tuple):
fobj = PairedEndFiles(file, bar)
fobj.enforce_ascii_filename(coerce_ascii)
return fobj
fobj = FilePassthru(file, bar)
fobj.enforce_ascii_filename(coerce_ascii)
return fobj | onecodex/lib/files.py | import os
import re
import logging
from unidecode import unidecode
from onecodex.exceptions import OneCodexException, UploadException
R1_FILENAME_RE = re.compile(".*[._][Rr]?[1][_.].*")
R2_FILENAME_RE = re.compile(".*[._][Rr]?[2][_.].*")
log = logging.getLogger("onecodex")
def _check_for_ascii_filename(filename, coerce_ascii):
"""Check that the filename is ASCII.
If it isn't, convert it to ASCII & return it if the ascii flag
has been set otherwise raise an exception.
"""
try:
# python2
ascii_fname = unidecode(unicode(filename))
except NameError:
ascii_fname = unidecode(filename)
if filename != ascii_fname:
if coerce_ascii:
# TODO: Consider warnings.warn here instead
log.warning(
"Renaming {} to {}, must be ASCII\n".format(filename.encode("utf-8"), ascii_fname)
)
filename = ascii_fname
else:
raise OneCodexException("Filenames must be ascii. Try using --coerce-ascii")
return filename
def get_fastx_format(file_path):
"""Return format of given file: fasta or fastq.
Assumes Illumina-style naming conventions where each file has _R1_ or _R2_ in its name.
If the file is not fasta or fastq, raises an exception
"""
new_filename, ext = os.path.splitext(os.path.basename(file_path))
if ext in {".gz", ".gzip", ".bz", ".bz2", ".bzip"}:
new_filename, ext = os.path.splitext(new_filename)
if ext in {".fa", ".fna", ".fasta"}:
return "fasta"
elif ext in {".fq", ".fastq"}:
return "fastq"
else:
raise UploadException(
"{}: extension must be one of .fa, .fna, .fasta, .fq, .fastq".format(file_path)
)
class FilePassthru(object):
"""Wrapper around `file` object that updates a progress bar and guesses mime-type.
Parameters
----------
file_path : `string`
Path to file.
progressbar : `click.progressbar`, optional
The progress bar to update.
"""
def __init__(self, file_path, progressbar=None):
self._fp = open(file_path, mode="rb")
self._fsize = os.path.getsize(file_path)
self.progressbar = progressbar
_, ext = os.path.splitext(file_path)
self.filename = os.path.basename(file_path)
if self._fsize == 0:
raise UploadException("{}: empty files can not be uploaded".format(self.filename))
if ext in {".gz", ".gzip"}:
self.mime_type = "application/x-gzip"
elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}:
self.mime_type = "application/x-bzip2"
else:
self.mime_type = "text/plain"
def read(self, size=-1):
bytes_read = self._fp.read(size)
if self.progressbar:
self.progressbar.update(len(bytes_read))
return bytes_read
def size(self):
return self._fsize
@property
def len(self):
"""Size of data left to be read."""
return self._fsize - self._fp.tell()
def seek(self, loc):
"""Seek to a position in the file.
Notes
-----
This is called if an upload fails and must be retried.
"""
assert loc == 0
# rewind progress bar
if self.progressbar:
self.progressbar.update(-self._fp.tell())
self._fp.seek(loc)
def close(self):
self._fp.close()
def enforce_ascii_filename(self, coerce_ascii):
"""Update the filename to be ASCII. Raises an exception if `coerce_ascii` is `False` and the filename is not ASCII."""
self.filename = _check_for_ascii_filename(self.filename, coerce_ascii)
class PairedEndFiles(object):
def __init__(self, files, progressbar=None):
if len(files) != 2:
raise OneCodexException("Paired files uploading can only take 2 files")
for f in files:
if get_fastx_format(f) != "fastq":
raise OneCodexException("Interleaving FASTA files is currently unsupported")
if R1_FILENAME_RE.match(files[0]) and R2_FILENAME_RE.match(files[1]):
file1 = files[0]
file2 = files[1]
elif R2_FILENAME_RE.match(files[0]) and R1_FILENAME_RE.match(files[1]):
file1 = files[1]
file2 = files[0]
else:
raise OneCodexException("Paired files need to have _R1/_1 and _R2/_2 in their name")
self.r1 = FilePassthru(file1, progressbar)
self.r2 = FilePassthru(file2, progressbar)
def enforce_ascii_filename(self, coerce_ascii):
self.r1.enforce_ascii_filename(coerce_ascii)
self.r2.enforce_ascii_filename(coerce_ascii)
def get_file_wrapper(file, coerce_ascii, bar):
"""Take a str or tuple (str) and return the corresponding file wrapper object.
If there is more than one file, it must be a paired end uploads and the filenames will be validated.
"""
if isinstance(file, tuple):
fobj = PairedEndFiles(file, bar)
fobj.enforce_ascii_filename(coerce_ascii)
return fobj
fobj = FilePassthru(file, bar)
fobj.enforce_ascii_filename(coerce_ascii)
return fobj | 0.4206 | 0.176601 |
import time
import board
import neopixel
import threading
from datetime import datetime
from gpiozero import Button
from signal import pause
#Setup the pin
#GPIO.setmode(GPIO.BOARD)
buttonPin = 16 # board.D23
button = Button(23)
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 50
led_pattern = 0
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.5, auto_write=False, pixel_order=ORDER
)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos * 3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos * 3)
g = 0
b = int(pos * 3)
else:
pos -= 170
r = 0
g = int(pos * 3)
b = int(255 - pos * 3)
return (r, g, b) if ORDER in (neopixel.RGB, neopixel.GRB) else (r, g, b, 0)
def rainbow_cycle(wait):
for j in range(255):
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(wait)
# alternating between rainbow and red
def setRunningLightTrack(count, rgb):
for i in range(num_pixels):
interval = count % 3
if (i + interval) % 3 == 0:
pixels[i] = rgb
else:
pixels[i] = (0, 0, 0)
pixels.show()
# alternating between rainbow and red
def redLightTrack():
global endLedEffect
for seconds in range(3600):
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setRunningLightTrack(seconds, (255, 0, 0))
pixels.show()
time.sleep(0.125)
def blueLightTrack():
global endLedEffect
for seconds in range(3600):
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setRunningLightTrack(seconds, (0, 0, 255))
pixels.show()
time.sleep(0.125)
def greenLightTrack():
global endLedEffect
for seconds in range(3600):
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setRunningLightTrack(seconds, (0, 255, 0))
pixels.show()
time.sleep(0.125)
# track lighting move the leds blank 2 one on moving.
def setPixelDecimal(countingNumber, pixelOffset, numberOfPixels, rgb):
for secondsIndex in range(pixelOffset, pixelOffset + numberOfPixels):
if(countingNumber > (secondsIndex - pixelOffset)):
pixels[secondsIndex] = rgb
else:
pixels[secondsIndex] = (0, 0, 0)
def setTimeInPixels(totalSeconds):
totalSeconds = totalSeconds // 1
print('printing the current time.')
secondsOffset = 0 # 0 through 9
tensOfSecondsOffset = 11 # 11 through 16
minutesOffset = 18 # 18 through 29
tensOfMinutesOffset = 31 # 31 through 36
hoursOffset = 38 # 38 through 47
tensOfHoursOffset = 48 # 48 through 50
# number of seconds in a day.
#hours
hours = totalSeconds // 3600
minutes = totalSeconds // 60
seconds = totalSeconds % 10
tensOfSeconds = (totalSeconds % 60) // 10
tensOfMinutes = (minutes % 60) // 10
minutes = minutes % 10
tensOfHours = hours // 10
hours = hours % 10
print(f'{tensOfHours}{hours}:{tensOfMinutes}{minutes}:{tensOfSeconds}{seconds}')
# seconds
setPixelDecimal(seconds, secondsOffset, 10, (255, 255, 0))
setPixelDecimal(tensOfSeconds, tensOfSecondsOffset, 6, (0, 255, 0))
# Minutes
setPixelDecimal(minutes, minutesOffset, 10, (0, 255, 255))
setPixelDecimal(tensOfMinutes, tensOfMinutesOffset, 6, (0, 0, 255))
# hours
setPixelDecimal(hours, hoursOffset, 10, (255, 0, 255))
setPixelDecimal(tensOfHours, tensOfHoursOffset, 2, (255, 0, 0))
pixels.show()
def currentTime():
global endLedEffect
print('Current time ')
currentTime = datetime.now()
# number of seconds in a day.
while endLedEffect != True:
currentTime = datetime.now()
seconds_since_midnight = (currentTime - currentTime.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
setTimeInPixels(seconds_since_midnight)
time.sleep(1)
pixels.fill((0,0,0))
pixels.show()
endLedEffect = False
def countingLed():
global endLedEffect
print('Counting LEDs')
# number of seconds in a day.
for totalSeconds in range(86400):
if endLedEffect == True:
print('cancel the clock Effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setTimeInPixels(totalSeconds)
time.sleep(1)
pixels.fill((0, 0, 0))
pixels.show()
ledThread = threading.Thread(name='LedThread')
endLedEffect = False
def ledRainbow():
global endLedEffect
for x in range(3600):
# check to see if we have aborted the thread
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
rainbow_cycle(0.001)
print(x)
pixels.show()
# turn off the LEDs
pixels.fill((0, 0, 0))
pixels.show()
def slowLedRainbow():
global endLedEffect
for x in range(3600):
# check to see if we have aborted the thread
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
rainbow_cycle(0.01)
print(x)
pixels.show()
# turn off the LEDs
pixels.fill((0, 0, 0))
pixels.show()
def ledFadeRed():
global endLedEffect
print('LED Fade up and down')
for numberOfFades in range(10):
for x in range(510):
if endLedEffect == True:
print('cancel the LED Fade effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
redIn = x
if x > 255:
redIn = 510 - x
print('red: ')
print(redIn)
pixels.fill((redIn, 0, 0))
pixels.show()
time.sleep(0.01)
# turn off the LEDs
pixels.fill((0, 0, 0))
pixels.show()
def handle_button_press():
global led_pattern
global endLedEffect
global ledThread
print('button was pressed')
# TODO update the led pattern to be an array/dictionary
led_pattern = led_pattern + 1
if led_pattern > 8:
print('Restarting led count')
led_pattern = 0
endLedEffect = True
ledThread.join()
return
print('Current Led Pattern:')
print(led_pattern)
if led_pattern == 1:
tempThread = threading.Thread(target=ledRainbow)
elif led_pattern == 2:
tempThread = threading.Thread(target=ledFadeRed)
elif led_pattern == 3:
tempThread = threading.Thread(target=countingLed)
elif led_pattern == 4:
tempThread = threading.Thread(target=currentTime)
elif led_pattern == 5:
tempThread = threading.Thread(target=blueLightTrack)
elif led_pattern == 6:
tempThread = threading.Thread(target=redLightTrack)
elif led_pattern == 7:
tempThread = threading.Thread(target=greenLightTrack)
elif led_pattern == 8:
tempThread = threading.Thread(target=slowLedRainbow)
try:
print (ledThread)
except UnboundLocalError:
ledThread = tempThread
while ledThread.is_alive():
endLedEffect = True
ledThread.join()
time.sleep(1)
ledThread = tempThread
ledThread.start()
def handle_button_held():
global led_pattern
global endLedEffect
print('button was Held')
endLedEffect = True
ledThread.join()
# led_pattern = 0
# The amount of time you have to hold the button before it
button.hold_time = 2
button.when_held = handle_button_held
button.when_pressed = handle_button_press
print('WELCOME to LED CHRISTMAS SHOW')
print('press the button to start')
#blueLightTrack()
button.wait_for_press()
while True:
time.sleep(0.1) | main.py | import time
import board
import neopixel
import threading
from datetime import datetime
from gpiozero import Button
from signal import pause
#Setup the pin
#GPIO.setmode(GPIO.BOARD)
buttonPin = 16 # board.D23
button = Button(23)
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 50
led_pattern = 0
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.5, auto_write=False, pixel_order=ORDER
)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos * 3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos * 3)
g = 0
b = int(pos * 3)
else:
pos -= 170
r = 0
g = int(pos * 3)
b = int(255 - pos * 3)
return (r, g, b) if ORDER in (neopixel.RGB, neopixel.GRB) else (r, g, b, 0)
def rainbow_cycle(wait):
for j in range(255):
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(wait)
# alternating between rainbow and red
def setRunningLightTrack(count, rgb):
for i in range(num_pixels):
interval = count % 3
if (i + interval) % 3 == 0:
pixels[i] = rgb
else:
pixels[i] = (0, 0, 0)
pixels.show()
# alternating between rainbow and red
def redLightTrack():
global endLedEffect
for seconds in range(3600):
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setRunningLightTrack(seconds, (255, 0, 0))
pixels.show()
time.sleep(0.125)
def blueLightTrack():
global endLedEffect
for seconds in range(3600):
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setRunningLightTrack(seconds, (0, 0, 255))
pixels.show()
time.sleep(0.125)
def greenLightTrack():
global endLedEffect
for seconds in range(3600):
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setRunningLightTrack(seconds, (0, 255, 0))
pixels.show()
time.sleep(0.125)
# track lighting move the leds blank 2 one on moving.
def setPixelDecimal(countingNumber, pixelOffset, numberOfPixels, rgb):
for secondsIndex in range(pixelOffset, pixelOffset + numberOfPixels):
if(countingNumber > (secondsIndex - pixelOffset)):
pixels[secondsIndex] = rgb
else:
pixels[secondsIndex] = (0, 0, 0)
def setTimeInPixels(totalSeconds):
totalSeconds = totalSeconds // 1
print('printing the current time.')
secondsOffset = 0 # 0 through 9
tensOfSecondsOffset = 11 # 11 through 16
minutesOffset = 18 # 18 through 29
tensOfMinutesOffset = 31 # 31 through 36
hoursOffset = 38 # 38 through 47
tensOfHoursOffset = 48 # 48 through 50
# number of seconds in a day.
#hours
hours = totalSeconds // 3600
minutes = totalSeconds // 60
seconds = totalSeconds % 10
tensOfSeconds = (totalSeconds % 60) // 10
tensOfMinutes = (minutes % 60) // 10
minutes = minutes % 10
tensOfHours = hours // 10
hours = hours % 10
print(f'{tensOfHours}{hours}:{tensOfMinutes}{minutes}:{tensOfSeconds}{seconds}')
# seconds
setPixelDecimal(seconds, secondsOffset, 10, (255, 255, 0))
setPixelDecimal(tensOfSeconds, tensOfSecondsOffset, 6, (0, 255, 0))
# Minutes
setPixelDecimal(minutes, minutesOffset, 10, (0, 255, 255))
setPixelDecimal(tensOfMinutes, tensOfMinutesOffset, 6, (0, 0, 255))
# hours
setPixelDecimal(hours, hoursOffset, 10, (255, 0, 255))
setPixelDecimal(tensOfHours, tensOfHoursOffset, 2, (255, 0, 0))
pixels.show()
def currentTime():
global endLedEffect
print('Current time ')
currentTime = datetime.now()
# number of seconds in a day.
while endLedEffect != True:
currentTime = datetime.now()
seconds_since_midnight = (currentTime - currentTime.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
setTimeInPixels(seconds_since_midnight)
time.sleep(1)
pixels.fill((0,0,0))
pixels.show()
endLedEffect = False
def countingLed():
global endLedEffect
print('Counting LEDs')
# number of seconds in a day.
for totalSeconds in range(86400):
if endLedEffect == True:
print('cancel the clock Effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
setTimeInPixels(totalSeconds)
time.sleep(1)
pixels.fill((0, 0, 0))
pixels.show()
ledThread = threading.Thread(name='LedThread')
endLedEffect = False
def ledRainbow():
global endLedEffect
for x in range(3600):
# check to see if we have aborted the thread
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
rainbow_cycle(0.001)
print(x)
pixels.show()
# turn off the LEDs
pixels.fill((0, 0, 0))
pixels.show()
def slowLedRainbow():
global endLedEffect
for x in range(3600):
# check to see if we have aborted the thread
if endLedEffect == True:
print('cancel the LED effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
rainbow_cycle(0.01)
print(x)
pixels.show()
# turn off the LEDs
pixels.fill((0, 0, 0))
pixels.show()
def ledFadeRed():
global endLedEffect
print('LED Fade up and down')
for numberOfFades in range(10):
for x in range(510):
if endLedEffect == True:
print('cancel the LED Fade effect early')
pixels.fill((0, 0, 0))
pixels.show()
endLedEffect = False
return
redIn = x
if x > 255:
redIn = 510 - x
print('red: ')
print(redIn)
pixels.fill((redIn, 0, 0))
pixels.show()
time.sleep(0.01)
# turn off the LEDs
pixels.fill((0, 0, 0))
pixels.show()
def handle_button_press():
global led_pattern
global endLedEffect
global ledThread
print('button was pressed')
# TODO update the led pattern to be an array/dictionary
led_pattern = led_pattern + 1
if led_pattern > 8:
print('Restarting led count')
led_pattern = 0
endLedEffect = True
ledThread.join()
return
print('Current Led Pattern:')
print(led_pattern)
if led_pattern == 1:
tempThread = threading.Thread(target=ledRainbow)
elif led_pattern == 2:
tempThread = threading.Thread(target=ledFadeRed)
elif led_pattern == 3:
tempThread = threading.Thread(target=countingLed)
elif led_pattern == 4:
tempThread = threading.Thread(target=currentTime)
elif led_pattern == 5:
tempThread = threading.Thread(target=blueLightTrack)
elif led_pattern == 6:
tempThread = threading.Thread(target=redLightTrack)
elif led_pattern == 7:
tempThread = threading.Thread(target=greenLightTrack)
elif led_pattern == 8:
tempThread = threading.Thread(target=slowLedRainbow)
try:
print (ledThread)
except UnboundLocalError:
ledThread = tempThread
while ledThread.is_alive():
endLedEffect = True
ledThread.join()
time.sleep(1)
ledThread = tempThread
ledThread.start()
def handle_button_held():
global led_pattern
global endLedEffect
print('button was Held')
endLedEffect = True
ledThread.join()
# led_pattern = 0
# The amount of time you have to hold the button before it
button.hold_time = 2
button.when_held = handle_button_held
button.when_pressed = handle_button_press
print('WELCOME to LED CHRISTMAS SHOW')
print('press the button to start')
#blueLightTrack()
button.wait_for_press()
while True:
time.sleep(0.1) | 0.487795 | 0.418697 |
import os
import sqlite3
import pandas as pd
import pymongo
from dotenv import load_dotenv
'''
"How was working with MongoDB different from working with PostgreSQL?
What was easier, and what was harder?"
I would say that my biggest hurdle was simply figuring out how to get
data into each system, once I was past that and knew the steps, they're
kind of the same in terms of ease of use.
Kind of.
One thing I noticed about Mongo is that it really doesn't give a damn
about the rules. You can access (and even create if it doesn't exist
already) collections fairly easily, and it doesn't seem to mind what
data you feed into it all that much.
That said, that can also be a hinderance when you accidentally throw,
say, a cleric's data into the fighters collection and it won't raise an
error about how you messed up. Definitely gives me javascript vibes,
and not in a good way.
That said, not having to learn SQL is a bonus, as trying to suss out
how proper querying works and taking the time to learn the language can
be cumbersome. Thankfully, pandas does have some stuff which makes data
flow a bit simpler.
All in all, I can't really gauge which one is easier or harder,
but I will say that I like SQLite, and by association postgres, better.
I've gotten used to SQL and its querying, and while it can be a bit
finnicky sometimes, I like how it's structured and easily readable.
And how you have to yell it.
SELECT!! FROM!!! WHILE!!!!
'''
# Set up .env variables to connect to postgres later
envpath = os.path.join(os.getcwd(),'..', '.env')
# print(envpath)
load_dotenv(envpath)
# grab .env data for mongo database for later
CLUSTER_NAME = os.getenv('MONGO_CLUSTER_NAME')
DB_USER = os.getenv('MONGO_USER')
DB_PASSWORD = os.getenv('MONGO_PASSWORD')
# grab filepath for rpg sqlite db
RPG_FILEPATH = os.path.join(os.getcwd(),'..',
'module1-introduction-to-sql','rpg_db.sqlite3')
# print(RPG_FILEPATH)
# Connect first to the RPG database and pull data from it to
# transfer it to postgres
conn = sqlite3.connect(RPG_FILEPATH)
print('CONNECTION IN:', conn)
def q(q_query, q_conn):
return pd.read_sql(q_query, q_conn)
# Get all tables in the sqlite database
q_in = 'SELECT name FROM sqlite_master WHERE type = "table";'
rpg_names = q(
q_in, conn
)
# put them into a list
rpg_names = list(rpg_names['name'].values)
rpg_tables = {}
print(rpg_names)
# get each table as a dataframe to post to nosqr
for table in rpg_names:
q_in = 'SELECT * FROM ' + table
rpg_tables[table] = q(q_in, conn)
print(rpg_tables[table].head())
conn.commit()
conn.close()
print('CONNECTION CLOSED:',conn)
# All data grabbed from sqlite, now to post to MongoDB
print('CONNECTING TO MONGODB...')
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}-siyeu.mongodb.net/test?retryWrites=true&w=majority"
print('------------------------')
print('URI:',connection_uri)
client = pymongo.MongoClient(connection_uri)
print('------------------------')
print('CLIENT:', type(client), client)
db = client.rpg_db
print('------------------------')
print('DB:', type(db), db,'\n\n')
print(f'List of collections in database:\n{db.list_collection_names()}')
# put the tables from sqlite into collections in Mongo
for table in rpg_names:
# MongoDB usually creates a collection when it's first referenced
# but I don't think I can say collection = db.table
# because it won't use the value of table, it'll try to create a
# collection called table, So I need to use
# db.create_collection.-
# check if the collection with that name already exists and
# replace it if need be
if table in db.list_collection_names():
print(f'Collection {table} already exists: {type(db[table])}')
db[table].drop()
# print(f'Does collection {table} exist? {table in db.list_collection_names()}')
collection = db.create_collection(table)
# I need to de-dataframe-ize the stored dataframes I have into
# dict entries stored in lists
# print(rpg_tables[table].head())
# Reset to_mongo beforehand to avoid bad stuff happening
to_mongo = None
# check if the dataframe is empty and don't transfer it if it is
if not rpg_tables[table].empty:
# my reasoning behind this is that mongo doesn't really care
# that much about documents not existing beforehand
# so whatever functions designed to add info to documents
# don't really fail if it's not there; so we don't need empty
# dataframes because we'll just assume that they're set up the
# way we need them to be when the time comes to add data.
to_mongo = rpg_tables[table].to_dict('records')
print(to_mongo)
if to_mongo is not None:
collection.insert_many(to_mongo)
# That should create collections that are equal to the tables
# of the sqlite database containing documents which are equal to
# the values of the tables. | module3-nosql-and-document-oriented-databases/rpg_nosql.py | import os
import sqlite3
import pandas as pd
import pymongo
from dotenv import load_dotenv
'''
"How was working with MongoDB different from working with PostgreSQL?
What was easier, and what was harder?"
I would say that my biggest hurdle was simply figuring out how to get
data into each system, once I was past that and knew the steps, they're
kind of the same in terms of ease of use.
Kind of.
One thing I noticed about Mongo is that it really doesn't give a damn
about the rules. You can access (and even create if it doesn't exist
already) collections fairly easily, and it doesn't seem to mind what
data you feed into it all that much.
That said, that can also be a hinderance when you accidentally throw,
say, a cleric's data into the fighters collection and it won't raise an
error about how you messed up. Definitely gives me javascript vibes,
and not in a good way.
That said, not having to learn SQL is a bonus, as trying to suss out
how proper querying works and taking the time to learn the language can
be cumbersome. Thankfully, pandas does have some stuff which makes data
flow a bit simpler.
All in all, I can't really gauge which one is easier or harder,
but I will say that I like SQLite, and by association postgres, better.
I've gotten used to SQL and its querying, and while it can be a bit
finnicky sometimes, I like how it's structured and easily readable.
And how you have to yell it.
SELECT!! FROM!!! WHILE!!!!
'''
# Set up .env variables to connect to postgres later
envpath = os.path.join(os.getcwd(),'..', '.env')
# print(envpath)
load_dotenv(envpath)
# grab .env data for mongo database for later
CLUSTER_NAME = os.getenv('MONGO_CLUSTER_NAME')
DB_USER = os.getenv('MONGO_USER')
DB_PASSWORD = os.getenv('MONGO_PASSWORD')
# grab filepath for rpg sqlite db
RPG_FILEPATH = os.path.join(os.getcwd(),'..',
'module1-introduction-to-sql','rpg_db.sqlite3')
# print(RPG_FILEPATH)
# Connect first to the RPG database and pull data from it to
# transfer it to postgres
conn = sqlite3.connect(RPG_FILEPATH)
print('CONNECTION IN:', conn)
def q(q_query, q_conn):
return pd.read_sql(q_query, q_conn)
# Get all tables in the sqlite database
q_in = 'SELECT name FROM sqlite_master WHERE type = "table";'
rpg_names = q(
q_in, conn
)
# put them into a list
rpg_names = list(rpg_names['name'].values)
rpg_tables = {}
print(rpg_names)
# get each table as a dataframe to post to nosqr
for table in rpg_names:
q_in = 'SELECT * FROM ' + table
rpg_tables[table] = q(q_in, conn)
print(rpg_tables[table].head())
conn.commit()
conn.close()
print('CONNECTION CLOSED:',conn)
# All data grabbed from sqlite, now to post to MongoDB
print('CONNECTING TO MONGODB...')
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}-siyeu.mongodb.net/test?retryWrites=true&w=majority"
print('------------------------')
print('URI:',connection_uri)
client = pymongo.MongoClient(connection_uri)
print('------------------------')
print('CLIENT:', type(client), client)
db = client.rpg_db
print('------------------------')
print('DB:', type(db), db,'\n\n')
print(f'List of collections in database:\n{db.list_collection_names()}')
# put the tables from sqlite into collections in Mongo
for table in rpg_names:
# MongoDB usually creates a collection when it's first referenced
# but I don't think I can say collection = db.table
# because it won't use the value of table, it'll try to create a
# collection called table, So I need to use
# db.create_collection.-
# check if the collection with that name already exists and
# replace it if need be
if table in db.list_collection_names():
print(f'Collection {table} already exists: {type(db[table])}')
db[table].drop()
# print(f'Does collection {table} exist? {table in db.list_collection_names()}')
collection = db.create_collection(table)
# I need to de-dataframe-ize the stored dataframes I have into
# dict entries stored in lists
# print(rpg_tables[table].head())
# Reset to_mongo beforehand to avoid bad stuff happening
to_mongo = None
# check if the dataframe is empty and don't transfer it if it is
if not rpg_tables[table].empty:
# my reasoning behind this is that mongo doesn't really care
# that much about documents not existing beforehand
# so whatever functions designed to add info to documents
# don't really fail if it's not there; so we don't need empty
# dataframes because we'll just assume that they're set up the
# way we need them to be when the time comes to add data.
to_mongo = rpg_tables[table].to_dict('records')
print(to_mongo)
if to_mongo is not None:
collection.insert_many(to_mongo)
# That should create collections that are equal to the tables
# of the sqlite database containing documents which are equal to
# the values of the tables. | 0.096153 | 0.254871 |
import abc
import tensorflow as tf
from tensor_annotations import tensorflow as ttf
from tensor_annotations import axes
from src.channelcoding.dataclasses import FixedPermuteInterleaverSettings, RandomPermuteInterleaverSettings
from .codes import Code
from .types import Batch, Time, Channels
class Interleaver(Code):
@abc.abstractmethod
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
pass
def reset(self):
pass
class FixedPermuteInterleaver(Interleaver):
def __init__(self, block_len: int, permutation=None, depermutation=None, name: str = 'FixedPermuteInterleaver'):
super().__init__(name)
self.block_len = block_len
if permutation is None:
self.permutation = tf.random.shuffle(tf.range(block_len))
else:
self.permutation = permutation
if depermutation is None:
self.depermutation = tf.math.invert_permutation(self.permutation)
else:
# No validation is done
self.depermutation = permutation
@property
def num_input_channels(self):
return None
@property
def num_output_channels(self):
return None
def __len__(self):
return self.block_len
def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self.permutation, axis=1)
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self.depermutation, axis=1)
def settings(self) -> FixedPermuteInterleaverSettings:
return FixedPermuteInterleaverSettings(permutation=self.permutation, block_len=self.block_len, name=self.name)
class RandomPermuteInterleaver(Interleaver):
def __init__(self, block_len: int, name: str = 'RandomPermuteInterleaver'):
super().__init__(name)
self.block_len = block_len
self._permutation = None
self._depermutation = None
@property
def num_input_channels(self):
return None
@property
def num_output_channels(self):
return None
def __len__(self):
return self.block_len
def generate_permutations(self, batch_size):
ta_perm = tf.TensorArray(tf.int32, size=batch_size, clear_after_read=True, element_shape=tf.TensorShape([self.block_len]))
ta_deperm = tf.TensorArray(tf.int32, size=batch_size, clear_after_read=True, element_shape=tf.TensorShape([self.block_len]))
for i in tf.range(batch_size):
permutation = tf.random.shuffle(tf.range(self.block_len))
ta_perm = ta_perm.write(i, permutation)
ta_deperm = ta_deperm.write(i, tf.math.invert_permutation(permutation))
return ta_perm.stack(), ta_deperm.stack()
def set(self, msg: ttf.Tensor3[Batch, Time, Channels]):
if self._permutation is None:
batch_size = tf.shape(msg)[0]
self._permutation, self._depermutation = self.generate_permutations(batch_size)
def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
self.set(msg)
return tf.gather(msg, self._permutation, axis=1, batch_dims=1)
def reset(self):
self._permutation = None
self._depermutation = None
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self._depermutation, axis=1, batch_dims=1)
def settings(self) -> RandomPermuteInterleaverSettings:
return RandomPermuteInterleaverSettings(block_len=self.block_len, name=self.name) | turbo-codes/src/channelcoding/interleavers.py | import abc
import tensorflow as tf
from tensor_annotations import tensorflow as ttf
from tensor_annotations import axes
from src.channelcoding.dataclasses import FixedPermuteInterleaverSettings, RandomPermuteInterleaverSettings
from .codes import Code
from .types import Batch, Time, Channels
class Interleaver(Code):
@abc.abstractmethod
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
pass
def reset(self):
pass
class FixedPermuteInterleaver(Interleaver):
def __init__(self, block_len: int, permutation=None, depermutation=None, name: str = 'FixedPermuteInterleaver'):
super().__init__(name)
self.block_len = block_len
if permutation is None:
self.permutation = tf.random.shuffle(tf.range(block_len))
else:
self.permutation = permutation
if depermutation is None:
self.depermutation = tf.math.invert_permutation(self.permutation)
else:
# No validation is done
self.depermutation = permutation
@property
def num_input_channels(self):
return None
@property
def num_output_channels(self):
return None
def __len__(self):
return self.block_len
def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self.permutation, axis=1)
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self.depermutation, axis=1)
def settings(self) -> FixedPermuteInterleaverSettings:
return FixedPermuteInterleaverSettings(permutation=self.permutation, block_len=self.block_len, name=self.name)
class RandomPermuteInterleaver(Interleaver):
def __init__(self, block_len: int, name: str = 'RandomPermuteInterleaver'):
super().__init__(name)
self.block_len = block_len
self._permutation = None
self._depermutation = None
@property
def num_input_channels(self):
return None
@property
def num_output_channels(self):
return None
def __len__(self):
return self.block_len
def generate_permutations(self, batch_size):
ta_perm = tf.TensorArray(tf.int32, size=batch_size, clear_after_read=True, element_shape=tf.TensorShape([self.block_len]))
ta_deperm = tf.TensorArray(tf.int32, size=batch_size, clear_after_read=True, element_shape=tf.TensorShape([self.block_len]))
for i in tf.range(batch_size):
permutation = tf.random.shuffle(tf.range(self.block_len))
ta_perm = ta_perm.write(i, permutation)
ta_deperm = ta_deperm.write(i, tf.math.invert_permutation(permutation))
return ta_perm.stack(), ta_deperm.stack()
def set(self, msg: ttf.Tensor3[Batch, Time, Channels]):
if self._permutation is None:
batch_size = tf.shape(msg)[0]
self._permutation, self._depermutation = self.generate_permutations(batch_size)
def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
self.set(msg)
return tf.gather(msg, self._permutation, axis=1, batch_dims=1)
def reset(self):
self._permutation = None
self._depermutation = None
def deinterleave(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]:
return tf.gather(msg, self._depermutation, axis=1, batch_dims=1)
def settings(self) -> RandomPermuteInterleaverSettings:
return RandomPermuteInterleaverSettings(block_len=self.block_len, name=self.name) | 0.832271 | 0.249082 |
import getopt
import os
from os import path
import sys
import acg
INDENT = ' '
def declare_namespaces(namespaces, source):
return '\n'.join(['namespace %s {' % i for i in namespaces]) + '\n' + source + '\n' +'\n'.join(['}'] * len(namespaces))
def output_tofile(content, filename, outputdir):
if outputdir:
filepath = path.join(outputdir, filename)
dirpath = path.dirname(filepath)
if not path.isdir(dirpath):
os.makedirs(dirpath)
with open(filepath, 'wt') as fp:
fp.write(content)
else:
print(filename + ':')
print(content)
def char_to_ord(c):
return str(ord(c) if type(c) is str else int(c))
def text_to_chararray(text, indent, colcount):
return ', '.join(char_to_ord(j) if i % colcount != colcount - 1 else '\n' + indent + char_to_ord(j) for i, j in enumerate(text))
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'p:n:o:')
params = dict((i.lstrip('-'), j) for i, j in opts)
if any(i not in params for i in ['p', 'n']) or len(args) == 0:
print('Usage: %s -p namespace -n name [-o output_dir] dir_paths...' % sys.argv[0])
sys.exit(0)
outputdir = params['o'] if 'o' in params else None
stringtablename = params['n']
stringtablenames = stringtablename.split('_') + ['string', 'table']
unit_name = '_'.join(stringtablenames)
namespaces = params['p'].replace('::', ':').split(':')
stringtables = []
stringitems = []
classname = acg.toCamelName(stringtablenames)
for i in args:
category = path.splitext(path.basename(i))[0]
stringtables.append('\nnamespace %s {\n' % category)
for j in os.listdir(i):
filepath = path.join(i, j)
if path.isfile(filepath) and not j.startswith('.'):
basename = path.basename(j)
strname = basename.replace('.', '_')
with open(path.join(i, j), 'rt') as fp:
text = fp.read()
stringtables.append('static const char %s[] = {\n%s%s, 0};\n' % (strname, INDENT, text_to_chararray(text, INDENT , 16)))
stringitems.append((category, strname, basename))
stringtables.append('}\n')
macro = '_'.join([i.upper() for i in namespaces] + ([i.upper() for i in stringtablenames]) + ['H_'])
classdeclare = acg.format('''
class ${classname} : public ark::StringBundle {
public:
${classname}();
virtual sp<String> getString(const String& name) override;
virtual std::vector<String> getStringArray(const String& name) override;
private:
std::unordered_map<String, sp<String>> _items;
};
''', classname=classname)
header = acg.format('''#ifndef ${macro}
#define ${macro}
#include <unordered_map>
#include "core/inf/string_bundle.h"
${classdeclare}
#endif''', macro=macro, classdeclare=declare_namespaces(namespaces, classdeclare))
classdefinition = acg.format('''
${classname}::${classname}()
{
${0};
}
sp<String> ${classname}::getString(const String& name)
{
auto iter = _items.find(name);
return iter != _items.end() ? iter->second : nullptr;
}
std::vector<String> ${classname}::getStringArray(const String&)
{
return {};
}
''', ';\n '.join(['_items["%s"] = sp<String>::make(%s::%s)' % (i[2], i[0], i[1]) for i in stringitems]), classname=classname)
bootstrap_func = '__ark_bootstrap_%s__' % '_'.join(stringtablenames)
source = acg.format('''#include "core/base/string_table.h"
#include "core/types/global.h"
#include "${unit_name}.h"
${body}
using namespace ark;
void ${bootstrap_func}()
{
const Global<StringTable> string_table;
string_table->addStringBundle("${stringtablename}", sp<${classname}>::make());
}''', unit_name=unit_name, body=declare_namespaces(namespaces, '\n'.join(stringtables) + '\n' + classdefinition),
bootstrap_func=bootstrap_func, stringtablename=stringtablename,
classname='::'.join(namespaces + ['']) + classname)
output_tofile(header, unit_name + '.h', outputdir)
if not outputdir:
print('\n----------------------------------------------------------------\n')
output_tofile(source, unit_name + '.cpp', outputdir) | tools/python/gen_string_table.py |
import getopt
import os
from os import path
import sys
import acg
INDENT = ' '
def declare_namespaces(namespaces, source):
return '\n'.join(['namespace %s {' % i for i in namespaces]) + '\n' + source + '\n' +'\n'.join(['}'] * len(namespaces))
def output_tofile(content, filename, outputdir):
if outputdir:
filepath = path.join(outputdir, filename)
dirpath = path.dirname(filepath)
if not path.isdir(dirpath):
os.makedirs(dirpath)
with open(filepath, 'wt') as fp:
fp.write(content)
else:
print(filename + ':')
print(content)
def char_to_ord(c):
return str(ord(c) if type(c) is str else int(c))
def text_to_chararray(text, indent, colcount):
return ', '.join(char_to_ord(j) if i % colcount != colcount - 1 else '\n' + indent + char_to_ord(j) for i, j in enumerate(text))
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'p:n:o:')
params = dict((i.lstrip('-'), j) for i, j in opts)
if any(i not in params for i in ['p', 'n']) or len(args) == 0:
print('Usage: %s -p namespace -n name [-o output_dir] dir_paths...' % sys.argv[0])
sys.exit(0)
outputdir = params['o'] if 'o' in params else None
stringtablename = params['n']
stringtablenames = stringtablename.split('_') + ['string', 'table']
unit_name = '_'.join(stringtablenames)
namespaces = params['p'].replace('::', ':').split(':')
stringtables = []
stringitems = []
classname = acg.toCamelName(stringtablenames)
for i in args:
category = path.splitext(path.basename(i))[0]
stringtables.append('\nnamespace %s {\n' % category)
for j in os.listdir(i):
filepath = path.join(i, j)
if path.isfile(filepath) and not j.startswith('.'):
basename = path.basename(j)
strname = basename.replace('.', '_')
with open(path.join(i, j), 'rt') as fp:
text = fp.read()
stringtables.append('static const char %s[] = {\n%s%s, 0};\n' % (strname, INDENT, text_to_chararray(text, INDENT , 16)))
stringitems.append((category, strname, basename))
stringtables.append('}\n')
macro = '_'.join([i.upper() for i in namespaces] + ([i.upper() for i in stringtablenames]) + ['H_'])
classdeclare = acg.format('''
class ${classname} : public ark::StringBundle {
public:
${classname}();
virtual sp<String> getString(const String& name) override;
virtual std::vector<String> getStringArray(const String& name) override;
private:
std::unordered_map<String, sp<String>> _items;
};
''', classname=classname)
header = acg.format('''#ifndef ${macro}
#define ${macro}
#include <unordered_map>
#include "core/inf/string_bundle.h"
${classdeclare}
#endif''', macro=macro, classdeclare=declare_namespaces(namespaces, classdeclare))
classdefinition = acg.format('''
${classname}::${classname}()
{
${0};
}
sp<String> ${classname}::getString(const String& name)
{
auto iter = _items.find(name);
return iter != _items.end() ? iter->second : nullptr;
}
std::vector<String> ${classname}::getStringArray(const String&)
{
return {};
}
''', ';\n '.join(['_items["%s"] = sp<String>::make(%s::%s)' % (i[2], i[0], i[1]) for i in stringitems]), classname=classname)
bootstrap_func = '__ark_bootstrap_%s__' % '_'.join(stringtablenames)
source = acg.format('''#include "core/base/string_table.h"
#include "core/types/global.h"
#include "${unit_name}.h"
${body}
using namespace ark;
void ${bootstrap_func}()
{
const Global<StringTable> string_table;
string_table->addStringBundle("${stringtablename}", sp<${classname}>::make());
}''', unit_name=unit_name, body=declare_namespaces(namespaces, '\n'.join(stringtables) + '\n' + classdefinition),
bootstrap_func=bootstrap_func, stringtablename=stringtablename,
classname='::'.join(namespaces + ['']) + classname)
output_tofile(header, unit_name + '.h', outputdir)
if not outputdir:
print('\n----------------------------------------------------------------\n')
output_tofile(source, unit_name + '.cpp', outputdir) | 0.210442 | 0.096323 |
import numpy as np
import pandas as pd
from multiprocessing import Pool
from scipy.special import expit
from scipy.stats import beta
from scipy.stats import powerlaw
from opaque.betabinomial_regression import BetaBinomialRegressor
from opaque.stats import equal_tailed_interval, KL_beta
class EndtoEndSimulator:
def __init__(
self,
sens_coefs_mean,
sens_coefs_disp,
spec_coefs_mean,
spec_coefs_disp,
sens_noise_mean=0.0,
sens_noise_disp=0.0,
spec_noise_mean=0.0,
spec_noise_disp=0.0,
cov=None,
n_shape=0.2,
n_loc=30,
n_scale=1000,
random_state=None,
n_jobs=1,
):
if cov is None:
cov = np.diag(np.full(len(sens_coefs_mean) - 1, 1.0))
else:
cov = np.array(cov)
if random_state is None:
self.random_state = np.random.RandomState()
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
assert len(sens_coefs_mean) == len(sens_coefs_disp) == cov.shape[0] + 1
assert len(spec_coefs_mean) == len(spec_coefs_disp) == cov.shape[0] + 1
self.sens_coefs_mean = np.array(sens_coefs_mean)
self.sens_coefs_disp = np.array(sens_coefs_disp)
self.spec_coefs_mean = np.array(spec_coefs_mean)
self.spec_coefs_disp = np.array(spec_coefs_disp)
self.sens_noise_mean = sens_noise_mean
self.sens_noise_disp = sens_noise_disp
self.spec_noise_mean = spec_noise_mean
self.spec_noise_disp = spec_noise_disp
self.cov = cov
self.num_covariates = cov.shape[0]
self.n_shape = n_shape
self.n_loc = n_loc
self.n_scale = n_scale
self.n_jobs = n_jobs
def generate_data(self, size):
X = self.random_state.multivariate_normal(
np.zeros(self.cov.shape[0]), self.cov, size=size
)
X = np.hstack([np.full((X.shape[0], 1), 1), X])
sens_mu = expit(
X.dot(self.sens_coefs_mean)
+ np.random.normal(0, self.sens_noise_mean, size=size)
)
sens_nu = np.exp(
X.dot(self.sens_coefs_disp)
+ self.random_state.normal(0, self.sens_noise_disp, size=size)
)
sens_prior = beta(sens_mu * sens_nu, (1 - sens_mu) * sens_nu)
sens_prior.random_state = self.random_state
sens = sens_prior.rvs()
spec_mu = expit(
X.dot(self.spec_coefs_mean)
+ np.random.normal(0, self.spec_noise_mean, size=size)
)
spec_nu = np.exp(
X.dot(self.spec_coefs_disp)
+ np.random.normal(0, self.spec_noise_disp, size=size)
)
spec_prior = beta(spec_mu * spec_nu, (1 - spec_mu) * spec_nu)
spec_prior.random_state = self.random_state
spec = spec_prior.rvs()
sens.shape = sens_mu.shape = sens_nu.shape = (size, 1)
spec.shape = spec_mu.shape = spec_nu.shape = (size, 1)
N_dist = powerlaw(a=self.n_shape, loc=self.n_loc, scale=self.n_scale)
N_dist.random_state = self.random_state
N_inlier = np.floor(N_dist.rvs(size=sens.shape)).astype(int)
N_outlier = np.floor(N_dist.rvs(size=sens.shape)).astype(int)
K_inlier = self.random_state.binomial(N_inlier, p=spec)
K_outlier = self.random_state.binomial(N_outlier, p=sens)
theta = N_outlier / (N_inlier + N_outlier)
data = np.hstack(
[
X[:, 1:],
sens,
spec,
N_inlier,
K_inlier,
N_outlier,
K_outlier,
theta,
sens_mu,
sens_nu,
spec_mu,
spec_nu,
sens_mu * sens_nu,
(1 - sens_mu) * sens_nu,
spec_mu * spec_nu,
(1 - spec_mu) * spec_nu,
]
)
data = pd.DataFrame(
data,
columns=[f"X{i}" for i in range(self.num_covariates)]
+ [
"sens",
"spec",
"N_inlier",
"K_inlier",
"N_outlier",
"K_outlier",
"theta",
"sens_mu",
"sens_nu",
"spec_mu",
"spec_nu",
"sens_a",
"sens_b",
"spec_a",
"spec_b",
],
)
return data
def run(self, size_train=1000, size_test=200):
data_train = self.generate_data(size=size_train)
data_test = self.generate_data(size=size_test)
X_train = data_train.iloc[:, : self.num_covariates].values
X_test = data_test.iloc[:, : self.num_covariates].values
sens_train = data_train[['N_outlier', 'K_outlier']].values
spec_train = data_train[['N_inlier', 'K_inlier']].values
br = BetaBinomialRegressor()
br.fit(X_train, sens_train)
sens_shape, _ = br.predict_shape_params(X_test)
br.fit(X_train, spec_train)
spec_shape, _ = br.predict_shape_params(X_test)
points = []
rows = []
for i, row in data_test.iterrows():
n = row['N_outlier'] + row['N_inlier']
t = row['K_outlier'] + row['N_inlier'] - row['K_inlier']
theta = row['theta']
sens_a_est, sens_b_est = sens_shape[i, :]
spec_a_est, spec_b_est = spec_shape[i, :]
sens_a, sens_b = data_test.iloc[i, -4], data_test.iloc[i, -3]
spec_a, spec_b = data_test.iloc[i, -2], data_test.iloc[i, -1]
point = [n, t, sens_a_est, sens_b_est, spec_a_est, spec_b_est]
points.append(point)
rows.append(
point
+ [
sens_a,
sens_b,
spec_a,
spec_b,
KL_beta(sens_a, sens_b, sens_a_est, sens_b_est),
KL_beta(spec_a, spec_b, spec_a_est, spec_b_est),
theta,
]
)
with Pool(self.n_jobs) as pool:
intervals = pool.starmap(equal_tailed_interval, points)
data = np.array(rows)
intervals = np.array(intervals)
data = np.hstack([data, intervals])
data = pd.DataFrame(
data,
columns=[
"n",
"t",
"sens_a_est",
"sens_b_est",
"spec_a_est",
"spec_b_est",
"sens_a",
"sens_b",
"spec_a",
"spec_b",
"KL_sens",
"KL_spec",
"theta",
"left",
"right",
],
)
return data | opaque/simulations/end_to_end.py | import numpy as np
import pandas as pd
from multiprocessing import Pool
from scipy.special import expit
from scipy.stats import beta
from scipy.stats import powerlaw
from opaque.betabinomial_regression import BetaBinomialRegressor
from opaque.stats import equal_tailed_interval, KL_beta
class EndtoEndSimulator:
def __init__(
self,
sens_coefs_mean,
sens_coefs_disp,
spec_coefs_mean,
spec_coefs_disp,
sens_noise_mean=0.0,
sens_noise_disp=0.0,
spec_noise_mean=0.0,
spec_noise_disp=0.0,
cov=None,
n_shape=0.2,
n_loc=30,
n_scale=1000,
random_state=None,
n_jobs=1,
):
if cov is None:
cov = np.diag(np.full(len(sens_coefs_mean) - 1, 1.0))
else:
cov = np.array(cov)
if random_state is None:
self.random_state = np.random.RandomState()
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
assert len(sens_coefs_mean) == len(sens_coefs_disp) == cov.shape[0] + 1
assert len(spec_coefs_mean) == len(spec_coefs_disp) == cov.shape[0] + 1
self.sens_coefs_mean = np.array(sens_coefs_mean)
self.sens_coefs_disp = np.array(sens_coefs_disp)
self.spec_coefs_mean = np.array(spec_coefs_mean)
self.spec_coefs_disp = np.array(spec_coefs_disp)
self.sens_noise_mean = sens_noise_mean
self.sens_noise_disp = sens_noise_disp
self.spec_noise_mean = spec_noise_mean
self.spec_noise_disp = spec_noise_disp
self.cov = cov
self.num_covariates = cov.shape[0]
self.n_shape = n_shape
self.n_loc = n_loc
self.n_scale = n_scale
self.n_jobs = n_jobs
def generate_data(self, size):
X = self.random_state.multivariate_normal(
np.zeros(self.cov.shape[0]), self.cov, size=size
)
X = np.hstack([np.full((X.shape[0], 1), 1), X])
sens_mu = expit(
X.dot(self.sens_coefs_mean)
+ np.random.normal(0, self.sens_noise_mean, size=size)
)
sens_nu = np.exp(
X.dot(self.sens_coefs_disp)
+ self.random_state.normal(0, self.sens_noise_disp, size=size)
)
sens_prior = beta(sens_mu * sens_nu, (1 - sens_mu) * sens_nu)
sens_prior.random_state = self.random_state
sens = sens_prior.rvs()
spec_mu = expit(
X.dot(self.spec_coefs_mean)
+ np.random.normal(0, self.spec_noise_mean, size=size)
)
spec_nu = np.exp(
X.dot(self.spec_coefs_disp)
+ np.random.normal(0, self.spec_noise_disp, size=size)
)
spec_prior = beta(spec_mu * spec_nu, (1 - spec_mu) * spec_nu)
spec_prior.random_state = self.random_state
spec = spec_prior.rvs()
sens.shape = sens_mu.shape = sens_nu.shape = (size, 1)
spec.shape = spec_mu.shape = spec_nu.shape = (size, 1)
N_dist = powerlaw(a=self.n_shape, loc=self.n_loc, scale=self.n_scale)
N_dist.random_state = self.random_state
N_inlier = np.floor(N_dist.rvs(size=sens.shape)).astype(int)
N_outlier = np.floor(N_dist.rvs(size=sens.shape)).astype(int)
K_inlier = self.random_state.binomial(N_inlier, p=spec)
K_outlier = self.random_state.binomial(N_outlier, p=sens)
theta = N_outlier / (N_inlier + N_outlier)
data = np.hstack(
[
X[:, 1:],
sens,
spec,
N_inlier,
K_inlier,
N_outlier,
K_outlier,
theta,
sens_mu,
sens_nu,
spec_mu,
spec_nu,
sens_mu * sens_nu,
(1 - sens_mu) * sens_nu,
spec_mu * spec_nu,
(1 - spec_mu) * spec_nu,
]
)
data = pd.DataFrame(
data,
columns=[f"X{i}" for i in range(self.num_covariates)]
+ [
"sens",
"spec",
"N_inlier",
"K_inlier",
"N_outlier",
"K_outlier",
"theta",
"sens_mu",
"sens_nu",
"spec_mu",
"spec_nu",
"sens_a",
"sens_b",
"spec_a",
"spec_b",
],
)
return data
def run(self, size_train=1000, size_test=200):
data_train = self.generate_data(size=size_train)
data_test = self.generate_data(size=size_test)
X_train = data_train.iloc[:, : self.num_covariates].values
X_test = data_test.iloc[:, : self.num_covariates].values
sens_train = data_train[['N_outlier', 'K_outlier']].values
spec_train = data_train[['N_inlier', 'K_inlier']].values
br = BetaBinomialRegressor()
br.fit(X_train, sens_train)
sens_shape, _ = br.predict_shape_params(X_test)
br.fit(X_train, spec_train)
spec_shape, _ = br.predict_shape_params(X_test)
points = []
rows = []
for i, row in data_test.iterrows():
n = row['N_outlier'] + row['N_inlier']
t = row['K_outlier'] + row['N_inlier'] - row['K_inlier']
theta = row['theta']
sens_a_est, sens_b_est = sens_shape[i, :]
spec_a_est, spec_b_est = spec_shape[i, :]
sens_a, sens_b = data_test.iloc[i, -4], data_test.iloc[i, -3]
spec_a, spec_b = data_test.iloc[i, -2], data_test.iloc[i, -1]
point = [n, t, sens_a_est, sens_b_est, spec_a_est, spec_b_est]
points.append(point)
rows.append(
point
+ [
sens_a,
sens_b,
spec_a,
spec_b,
KL_beta(sens_a, sens_b, sens_a_est, sens_b_est),
KL_beta(spec_a, spec_b, spec_a_est, spec_b_est),
theta,
]
)
with Pool(self.n_jobs) as pool:
intervals = pool.starmap(equal_tailed_interval, points)
data = np.array(rows)
intervals = np.array(intervals)
data = np.hstack([data, intervals])
data = pd.DataFrame(
data,
columns=[
"n",
"t",
"sens_a_est",
"sens_b_est",
"spec_a_est",
"spec_b_est",
"sens_a",
"sens_b",
"spec_a",
"spec_b",
"KL_sens",
"KL_spec",
"theta",
"left",
"right",
],
)
return data | 0.620507 | 0.40536 |
import json
import string
import sys
from geopy.geocoders import Nominatim
#Open a file with tweets and get the coordinates, if it's not null
geolocator = Nominatim()
file_list = ['stream_Alice.json', 'stream_Clank_2105.json', 'stream_deadpool0803.json', 'stream_deadpool1103.json', 'stream_deadpool.json', 'stream_Deadpool.json', 'stream_Finding_Dory.json', 'stream_Huntsman_2105.json', 'stream_Huntsman.json', 'stream_jungle_book.json', 'stream_Mogli_2105.json', 'stream_Mogli.json', 'stream_Ratchet_2105.json', 'stream_revenant_begin.json', 'stream_revenant.json', 'stream_War_2105.json', 'stream_Warcraft.json', 'stream_Zootopia_2105.json']
cont =1
for fname in file_list:
data = []
end_fname = fname.split('.')[0]
infile = 'data/' + fname
outfile = 'workflow/nLabel/info/' + end_fname + '.json'
print("Processando file: " + infile)
with open(infile, 'r') as f, open(outfile, 'w') as out:
nmbr_lines = 0
out.write('{\n\t"type": "FeatureCollection",\n\t"features": [\n')
for line in f:
tweet = json.loads(line)
if ("coordinates" in tweet.keys() and tweet.get('coordinates') != None and tweet['lang'] == 'en'):
nmbr_lines+= 1
if (nmbr_lines > 1):
out.write(',\n')
coord = tweet.get('coordinates')['coordinates']
#If the coordinates are inside world boundaries
if coord[0] >= -90 and coord[0] <= 90 and coord[1] >= -180 and coord[1] <= 180:
c = str(coord[1]) + ", " + str(coord[0])
location = geolocator.reverse(c, timeout=100)
if (location.raw.get('address')):
if (location.raw['address'].get('country')):
country = location.raw['address']['country']
if (location.raw['address'].get('state')):
state = location.raw['address']['state']
address = state + ", " + country
else:
address = country
else:
state = None
country = None
address = None
info = {
"geometry": tweet['coordinates'],
"tweet_id": str(tweet['id']),
"tweet": tweet['text'],
"user_location": tweet['user']['location'],
"user_id": tweet['user']['id'],
"user_name": tweet['user']['name']
}
if (state is not None):
info['state'] = state
if (country is not None):
info['country'] = country
info['address'] = address
out.write(json.dumps(info, indent=4, separators=(',', ': ')))
out.write('\n\t]\n}')
print(str(cont) + " files concluídos")
cont+=1 | infoprocessing.py | import json
import string
import sys
from geopy.geocoders import Nominatim
#Open a file with tweets and get the coordinates, if it's not null
geolocator = Nominatim()
file_list = ['stream_Alice.json', 'stream_Clank_2105.json', 'stream_deadpool0803.json', 'stream_deadpool1103.json', 'stream_deadpool.json', 'stream_Deadpool.json', 'stream_Finding_Dory.json', 'stream_Huntsman_2105.json', 'stream_Huntsman.json', 'stream_jungle_book.json', 'stream_Mogli_2105.json', 'stream_Mogli.json', 'stream_Ratchet_2105.json', 'stream_revenant_begin.json', 'stream_revenant.json', 'stream_War_2105.json', 'stream_Warcraft.json', 'stream_Zootopia_2105.json']
cont =1
for fname in file_list:
data = []
end_fname = fname.split('.')[0]
infile = 'data/' + fname
outfile = 'workflow/nLabel/info/' + end_fname + '.json'
print("Processando file: " + infile)
with open(infile, 'r') as f, open(outfile, 'w') as out:
nmbr_lines = 0
out.write('{\n\t"type": "FeatureCollection",\n\t"features": [\n')
for line in f:
tweet = json.loads(line)
if ("coordinates" in tweet.keys() and tweet.get('coordinates') != None and tweet['lang'] == 'en'):
nmbr_lines+= 1
if (nmbr_lines > 1):
out.write(',\n')
coord = tweet.get('coordinates')['coordinates']
#If the coordinates are inside world boundaries
if coord[0] >= -90 and coord[0] <= 90 and coord[1] >= -180 and coord[1] <= 180:
c = str(coord[1]) + ", " + str(coord[0])
location = geolocator.reverse(c, timeout=100)
if (location.raw.get('address')):
if (location.raw['address'].get('country')):
country = location.raw['address']['country']
if (location.raw['address'].get('state')):
state = location.raw['address']['state']
address = state + ", " + country
else:
address = country
else:
state = None
country = None
address = None
info = {
"geometry": tweet['coordinates'],
"tweet_id": str(tweet['id']),
"tweet": tweet['text'],
"user_location": tweet['user']['location'],
"user_id": tweet['user']['id'],
"user_name": tweet['user']['name']
}
if (state is not None):
info['state'] = state
if (country is not None):
info['country'] = country
info['address'] = address
out.write(json.dumps(info, indent=4, separators=(',', ': ')))
out.write('\n\t]\n}')
print(str(cont) + " files concluídos")
cont+=1 | 0.077997 | 0.179297 |
from PIL import Image, ImageOps
from pathlib import Path
import os
import json
import re
MISC_IDS = {
(220, 255, 166, 255): 200, #Invisible Wall (Boundary)
(128, 128, 128, 255): 206, #Surface 0
(100, 100, 100, 255): 206, #Surface 0
(204, 186, 143, 255): 206, #Surface 0
(204, 176, 143, 255): 206, #Surface 0
(143, 186, 204, 255): 207, #Surface 1
(143, 176, 204, 255): 207, #Surface 1
(177, 204, 143, 255): 208, #Surface 2
(177, 194, 143, 255): 208 #Surface 2
#"": 253, #Invisible Wall (Structure)
#"": 254, #Underwater Boundary
#"": 255, #Zero G
#"": 256, #Zero G (protected)
#"": 257, #World Gen Must Contain Ocean Liquid
#"": 258, #World Gen Must Not Contain Ocean Liquid
#"": 240, #World Gen Must Contain Solid
}
MISC_BACKGROUND_IDS = {
(255, 0, 220, 255): 199, #Magic Pink Brush
(200, 200, 200, 255): 206, #Surface 0
(255, 232, 178, 255): 206, #Surface 0
(255, 222, 178, 255): 206, #Surface 0
(178, 232, 255, 255): 207, #Surface 1
(178, 222, 255, 255): 207, #Surface 1
(222, 255, 178, 255): 208, #Surface 2
(222, 245, 178, 255): 208, #Surface 2
(32, 32, 32, 255): 198, #Fill with air
(48, 48, 48, 255): 209 #Fill with air (Overwritable)
}
ANCHOR_IDS = {
(85, 255, 0, 255): 201, #Player Start
(120, 120, 120, 255): 202, #World Gen Must Contain Air
(0, 0, 0, 255): 214, #World Gen Must Contain Air Background
(255, 255, 255, 255): 215, #World Gen Must Contain Solid Background
(255, 168, 0, 255): 210, #Red Connector
(0, 255, 186, 255): 211, #Yellow Connector
(168, 255, 0, 255): 212, #Green Connector
(0, 38, 255, 255): 213 #Blue Connector
}
BIOME_OBJECT_IDS = {
(34, 102, 0, 255): 204, #Biome Item
(26, 77, 0, 255): 205 #Biome Tree
}
class MapData:
def __init__(self, height, width):
self.map_data = {
"backgroundcolor":"#000000",
"compressionlevel":-1,
"editorsettings":
{
"export":
{
"target":"."
}
},
"height":height,
"infinite":False,
"layers":[
{
"data":[],
"height":height,
"id":1,
"name":"back",
"opacity":0.5,
"type":"tilelayer",
"visible":True,
"width":width,
"x":0,
"y":0
},
{
"data":[],
"height":height,
"id":2,
"name":"front",
"opacity":1,
"type":"tilelayer",
"visible":True,
"width":width,
"x":0,
"y":0
},
{
"color":"#5555ff",
"draworder":"topdown",
"id":3,
"name":"mods",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#ff0000",
"draworder":"topdown",
"id":4,
"name":"objects",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#ffff00",
"draworder":"topdown",
"id":5,
"name":"wiring - lights & guns",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#ff0000",
"draworder":"topdown",
"id":6,
"name":"monsters & npcs",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#00ffff",
"draworder":"topdown",
"id":7,
"name":"wiring - locked door",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"draworder":"topdown",
"id":8,
"name":"outside the map",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"draworder":"topdown",
"id":9,
"name":"anchors etc",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"draworder":"topdown",
"id":10,
"name":"items",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
}],
"nextlayerid":11,
"nextobjectid":679,
"orientation":"orthogonal",
"renderorder":"right-down",
"tiledversion":"1.3.5",
"tileheight":8,
"tilesets":[
{
"firstgid":1,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/materials.json"
},
{
"firstgid":198,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/miscellaneous.json"
},
{
"firstgid":222,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/liquids.json"
},
{
"firstgid":250,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/supports.json"
},
{
"firstgid":287,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/generic.json"
},
{
"firstgid":2271,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/ancient.json"
},
{
"firstgid":2434,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/apex.json"
},
{
"firstgid":2805,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/avian.json"
},
{
"firstgid":3110,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/floran.json"
},
{
"firstgid":3305,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/glitch.json"
},
{
"firstgid":3531,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/human.json"
},
{
"firstgid":3819,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/hylotl.json"
},
{
"firstgid":4051,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/novakid.json"
},
{
"firstgid":4115,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/crafting.json"
},
{
"firstgid":4195,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/decorative.json"
},
{
"firstgid":5636,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/door.json"
},
{
"firstgid":5768,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/farmable.json"
},
{
"firstgid":5843,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/furniture.json"
},
{
"firstgid":6197,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/light.json"
},
{
"firstgid":6655,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/other.json"
},
{
"firstgid":6967,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/pot.json"
},
{
"firstgid":7264,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/sapling.json"
},
{
"firstgid":7265,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/spawner.json"
},
{
"firstgid":7281,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/storage.json"
},
{
"firstgid":7515,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/teleporter.json"
},
{
"firstgid":7557,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/tools.json"
},
{
"firstgid":7562,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/trap.json"
},
{
"firstgid":7766,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/wire.json"
},
{
"firstgid":7988,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/container.json"
},
{
"firstgid":8273,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/farmable.json"
},
{
"firstgid":8351,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/loungeable.json"
},
{
"firstgid":8632,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/noisy.json"
},
{
"firstgid":8673,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/teleporter.json"
},
{
"firstgid":8700,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/huge-objects.json"
}],
"tilewidth":8,
"type":"map",
"version":1.2,
"width":width
}
class StarboundObject:
def __init__(self, gid, height, id, width, x, y, offset_x, offset_y):
self.object_data = {
"gid":gid,
"height":height,
"id":id,
"name":"",
"rotation":0,
"type":"",
"visible":True,
"width":width,
"x":x*8 + offset_x,
"y":y*8 - offset_y + 8
}
input_folder = os.path.dirname(os.path.realpath(__file__)) + "/input/"
output_folder = os.path.dirname(os.path.realpath(__file__)) + "/output/"
object_dir_path = os.path.dirname(os.path.realpath(__file__)) + "/objects/"
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_sizes.json", 'r') as read_file:
OBJECT_SIZES = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_offsets.json", 'r') as read_file:
OBJECT_OFFSETS = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/tile_ids.json", 'r') as read_file:
TILE_IDS = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_ids.json", 'r') as read_file:
OBJECT_IDS = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_flipped_ids.json", 'r') as read_file:
OBJECT_FLIPPED_IDS = json.load(read_file)
def add_data(ids, tiles, new_map, x, y):
for key, value in ids.items():
if tiles == value:
if tiles in OBJECT_SIZES:
object_size = OBJECT_SIZES[tiles]
if tiles in OBJECT_OFFSETS:
object_offset = OBJECT_OFFSETS[tiles]
new_object = StarboundObject(key, object_size[1], new_map.map_data.get("nextobjectid"), object_size[0], x, y, object_offset[0], object_offset[1])
new_map.map_data.get("layers")[3].get("objects").append(new_object.object_data)
new_map.map_data["nextobjectid"] += 1
new_map.map_data.get("layers")[1].get("data").append(0)
new_map.map_data.get("layers")[0].get("data").append(199)
break
def convert():
dungeon_file = Path(input('Please input the path to your .dungeon file: '))
for file in os.listdir(input_folder):
current_image = Image.open(input_folder + file)
new_map = MapData(current_image.height, current_image.width)
for y in range(0, current_image.height):
for x in range(0, current_image.width):
pixel = current_image.getpixel((x,y))
if pixel in MISC_IDS:
new_map.map_data.get("layers")[1].get("data").append(MISC_IDS.get(pixel))
new_map.map_data.get("layers")[0].get("data").append(199)
elif pixel in MISC_BACKGROUND_IDS:
new_map.map_data.get("layers")[0].get("data").append(MISC_BACKGROUND_IDS.get(pixel))
new_map.map_data.get("layers")[1].get("data").append(0)
elif pixel in ANCHOR_IDS:
new_anchor = StarboundObject(ANCHOR_IDS.get(pixel), 8, new_map.map_data.get("nextobjectid"), 8, x, y, 0, 0)
new_map.map_data.get("layers")[8].get("objects").append(new_anchor.object_data)
new_map.map_data["nextobjectid"] += 1
new_map.map_data.get("layers")[1].get("data").append(0)
new_map.map_data.get("layers")[0].get("data").append(199)
else:
with open(dungeon_file, 'r') as read_file:
fixed_json = ''.join(line for line in read_file if not line.startswith(" //"))
data = json.loads(fixed_json)
for tiles in data["tiles"]:
if "brush" in tiles:
value = tiles["value"]
if pixel == (value[0], value[1], value[2], value[3]):
if "npc" not in str(tiles["brush"]) and "stagehand" not in str(tiles["brush"]):
if tiles["brush"][1][1] in TILE_IDS:
if "foreground" in tiles["comment"]:
new_map.map_data.get("layers")[1].get("data").append(TILE_IDS.get(tiles["brush"][1][1]))
new_map.map_data.get("layers")[0].get("data").append(199)
break
elif "background" in tiles["comment"]:
new_map.map_data.get("layers")[0].get("data").append(TILE_IDS.get(tiles["brush"][1][1]))
new_map.map_data.get("layers")[1].get("data").append(0)
break
else:
new_map.map_data.get("layers")[1].get("data").append(TILE_IDS.get(tiles["brush"][1][1]))
new_map.map_data.get("layers")[0].get("data").append(199)
break
elif "right" in tiles["comment"]:
add_data(OBJECT_IDS, tiles["brush"][1][1], new_map, x, y)
break
elif "left" in tiles["comment"]:
add_data(OBJECT_FLIPPED_IDS, tiles["brush"][1][1], new_map, x, y)
break
else:
add_data(OBJECT_IDS, tiles["brush"][1][1], new_map, x, y)
break
else:
new_map.map_data.get("layers")[1].get("data").append(0)
new_map.map_data.get("layers")[0].get("data").append(199)
break
new_file_name = os.path.splitext(file)
with open(output_folder + new_file_name[0] + ".json", "w") as write_file:
json.dump(new_map.map_data, write_file, indent=4)
if __name__ == '__main__':
convert() | Starbound Dungeon Converter v2/SDVv2.py | from PIL import Image, ImageOps
from pathlib import Path
import os
import json
import re
MISC_IDS = {
(220, 255, 166, 255): 200, #Invisible Wall (Boundary)
(128, 128, 128, 255): 206, #Surface 0
(100, 100, 100, 255): 206, #Surface 0
(204, 186, 143, 255): 206, #Surface 0
(204, 176, 143, 255): 206, #Surface 0
(143, 186, 204, 255): 207, #Surface 1
(143, 176, 204, 255): 207, #Surface 1
(177, 204, 143, 255): 208, #Surface 2
(177, 194, 143, 255): 208 #Surface 2
#"": 253, #Invisible Wall (Structure)
#"": 254, #Underwater Boundary
#"": 255, #Zero G
#"": 256, #Zero G (protected)
#"": 257, #World Gen Must Contain Ocean Liquid
#"": 258, #World Gen Must Not Contain Ocean Liquid
#"": 240, #World Gen Must Contain Solid
}
MISC_BACKGROUND_IDS = {
(255, 0, 220, 255): 199, #Magic Pink Brush
(200, 200, 200, 255): 206, #Surface 0
(255, 232, 178, 255): 206, #Surface 0
(255, 222, 178, 255): 206, #Surface 0
(178, 232, 255, 255): 207, #Surface 1
(178, 222, 255, 255): 207, #Surface 1
(222, 255, 178, 255): 208, #Surface 2
(222, 245, 178, 255): 208, #Surface 2
(32, 32, 32, 255): 198, #Fill with air
(48, 48, 48, 255): 209 #Fill with air (Overwritable)
}
ANCHOR_IDS = {
(85, 255, 0, 255): 201, #Player Start
(120, 120, 120, 255): 202, #World Gen Must Contain Air
(0, 0, 0, 255): 214, #World Gen Must Contain Air Background
(255, 255, 255, 255): 215, #World Gen Must Contain Solid Background
(255, 168, 0, 255): 210, #Red Connector
(0, 255, 186, 255): 211, #Yellow Connector
(168, 255, 0, 255): 212, #Green Connector
(0, 38, 255, 255): 213 #Blue Connector
}
BIOME_OBJECT_IDS = {
(34, 102, 0, 255): 204, #Biome Item
(26, 77, 0, 255): 205 #Biome Tree
}
class MapData:
def __init__(self, height, width):
self.map_data = {
"backgroundcolor":"#000000",
"compressionlevel":-1,
"editorsettings":
{
"export":
{
"target":"."
}
},
"height":height,
"infinite":False,
"layers":[
{
"data":[],
"height":height,
"id":1,
"name":"back",
"opacity":0.5,
"type":"tilelayer",
"visible":True,
"width":width,
"x":0,
"y":0
},
{
"data":[],
"height":height,
"id":2,
"name":"front",
"opacity":1,
"type":"tilelayer",
"visible":True,
"width":width,
"x":0,
"y":0
},
{
"color":"#5555ff",
"draworder":"topdown",
"id":3,
"name":"mods",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#ff0000",
"draworder":"topdown",
"id":4,
"name":"objects",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#ffff00",
"draworder":"topdown",
"id":5,
"name":"wiring - lights & guns",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#ff0000",
"draworder":"topdown",
"id":6,
"name":"monsters & npcs",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"color":"#00ffff",
"draworder":"topdown",
"id":7,
"name":"wiring - locked door",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"draworder":"topdown",
"id":8,
"name":"outside the map",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"draworder":"topdown",
"id":9,
"name":"anchors etc",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
},
{
"draworder":"topdown",
"id":10,
"name":"items",
"objects":[],
"opacity":1,
"type":"objectgroup",
"visible":True,
"x":0,
"y":0
}],
"nextlayerid":11,
"nextobjectid":679,
"orientation":"orthogonal",
"renderorder":"right-down",
"tiledversion":"1.3.5",
"tileheight":8,
"tilesets":[
{
"firstgid":1,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/materials.json"
},
{
"firstgid":198,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/miscellaneous.json"
},
{
"firstgid":222,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/liquids.json"
},
{
"firstgid":250,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/supports.json"
},
{
"firstgid":287,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/generic.json"
},
{
"firstgid":2271,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/ancient.json"
},
{
"firstgid":2434,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/apex.json"
},
{
"firstgid":2805,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/avian.json"
},
{
"firstgid":3110,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/floran.json"
},
{
"firstgid":3305,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/glitch.json"
},
{
"firstgid":3531,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/human.json"
},
{
"firstgid":3819,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/hylotl.json"
},
{
"firstgid":4051,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-race\/novakid.json"
},
{
"firstgid":4115,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/crafting.json"
},
{
"firstgid":4195,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/decorative.json"
},
{
"firstgid":5636,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/door.json"
},
{
"firstgid":5768,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/farmable.json"
},
{
"firstgid":5843,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/furniture.json"
},
{
"firstgid":6197,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/light.json"
},
{
"firstgid":6655,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/other.json"
},
{
"firstgid":6967,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/pot.json"
},
{
"firstgid":7264,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/sapling.json"
},
{
"firstgid":7265,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/spawner.json"
},
{
"firstgid":7281,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/storage.json"
},
{
"firstgid":7515,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/teleporter.json"
},
{
"firstgid":7557,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/tools.json"
},
{
"firstgid":7562,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/trap.json"
},
{
"firstgid":7766,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-category\/wire.json"
},
{
"firstgid":7988,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/container.json"
},
{
"firstgid":8273,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/farmable.json"
},
{
"firstgid":8351,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/loungeable.json"
},
{
"firstgid":8632,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/noisy.json"
},
{
"firstgid":8673,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/objects-by-type\/teleporter.json"
},
{
"firstgid":8700,
"source":"..\/..\/..\/..\/..\/..\/..\/Program Files (x86)\/Steam\/steamapps\/common\/Starbound\/assets\/_unpacked\/tilesets\/packed\/huge-objects.json"
}],
"tilewidth":8,
"type":"map",
"version":1.2,
"width":width
}
class StarboundObject:
def __init__(self, gid, height, id, width, x, y, offset_x, offset_y):
self.object_data = {
"gid":gid,
"height":height,
"id":id,
"name":"",
"rotation":0,
"type":"",
"visible":True,
"width":width,
"x":x*8 + offset_x,
"y":y*8 - offset_y + 8
}
input_folder = os.path.dirname(os.path.realpath(__file__)) + "/input/"
output_folder = os.path.dirname(os.path.realpath(__file__)) + "/output/"
object_dir_path = os.path.dirname(os.path.realpath(__file__)) + "/objects/"
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_sizes.json", 'r') as read_file:
OBJECT_SIZES = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_offsets.json", 'r') as read_file:
OBJECT_OFFSETS = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/tile_ids.json", 'r') as read_file:
TILE_IDS = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_ids.json", 'r') as read_file:
OBJECT_IDS = json.load(read_file)
with open(os.path.dirname(os.path.realpath(__file__)) + "/object_flipped_ids.json", 'r') as read_file:
OBJECT_FLIPPED_IDS = json.load(read_file)
def add_data(ids, tiles, new_map, x, y):
for key, value in ids.items():
if tiles == value:
if tiles in OBJECT_SIZES:
object_size = OBJECT_SIZES[tiles]
if tiles in OBJECT_OFFSETS:
object_offset = OBJECT_OFFSETS[tiles]
new_object = StarboundObject(key, object_size[1], new_map.map_data.get("nextobjectid"), object_size[0], x, y, object_offset[0], object_offset[1])
new_map.map_data.get("layers")[3].get("objects").append(new_object.object_data)
new_map.map_data["nextobjectid"] += 1
new_map.map_data.get("layers")[1].get("data").append(0)
new_map.map_data.get("layers")[0].get("data").append(199)
break
def convert():
dungeon_file = Path(input('Please input the path to your .dungeon file: '))
for file in os.listdir(input_folder):
current_image = Image.open(input_folder + file)
new_map = MapData(current_image.height, current_image.width)
for y in range(0, current_image.height):
for x in range(0, current_image.width):
pixel = current_image.getpixel((x,y))
if pixel in MISC_IDS:
new_map.map_data.get("layers")[1].get("data").append(MISC_IDS.get(pixel))
new_map.map_data.get("layers")[0].get("data").append(199)
elif pixel in MISC_BACKGROUND_IDS:
new_map.map_data.get("layers")[0].get("data").append(MISC_BACKGROUND_IDS.get(pixel))
new_map.map_data.get("layers")[1].get("data").append(0)
elif pixel in ANCHOR_IDS:
new_anchor = StarboundObject(ANCHOR_IDS.get(pixel), 8, new_map.map_data.get("nextobjectid"), 8, x, y, 0, 0)
new_map.map_data.get("layers")[8].get("objects").append(new_anchor.object_data)
new_map.map_data["nextobjectid"] += 1
new_map.map_data.get("layers")[1].get("data").append(0)
new_map.map_data.get("layers")[0].get("data").append(199)
else:
with open(dungeon_file, 'r') as read_file:
fixed_json = ''.join(line for line in read_file if not line.startswith(" //"))
data = json.loads(fixed_json)
for tiles in data["tiles"]:
if "brush" in tiles:
value = tiles["value"]
if pixel == (value[0], value[1], value[2], value[3]):
if "npc" not in str(tiles["brush"]) and "stagehand" not in str(tiles["brush"]):
if tiles["brush"][1][1] in TILE_IDS:
if "foreground" in tiles["comment"]:
new_map.map_data.get("layers")[1].get("data").append(TILE_IDS.get(tiles["brush"][1][1]))
new_map.map_data.get("layers")[0].get("data").append(199)
break
elif "background" in tiles["comment"]:
new_map.map_data.get("layers")[0].get("data").append(TILE_IDS.get(tiles["brush"][1][1]))
new_map.map_data.get("layers")[1].get("data").append(0)
break
else:
new_map.map_data.get("layers")[1].get("data").append(TILE_IDS.get(tiles["brush"][1][1]))
new_map.map_data.get("layers")[0].get("data").append(199)
break
elif "right" in tiles["comment"]:
add_data(OBJECT_IDS, tiles["brush"][1][1], new_map, x, y)
break
elif "left" in tiles["comment"]:
add_data(OBJECT_FLIPPED_IDS, tiles["brush"][1][1], new_map, x, y)
break
else:
add_data(OBJECT_IDS, tiles["brush"][1][1], new_map, x, y)
break
else:
new_map.map_data.get("layers")[1].get("data").append(0)
new_map.map_data.get("layers")[0].get("data").append(199)
break
new_file_name = os.path.splitext(file)
with open(output_folder + new_file_name[0] + ".json", "w") as write_file:
json.dump(new_map.map_data, write_file, indent=4)
if __name__ == '__main__':
convert() | 0.375936 | 0.117446 |
__author__ = 'carlos.diaz'
# Autoencoder for the context data based on residual networks
import numpy as np
import matplotlib.pyplot as plt
from torch import nn
import torch
import time
import os
import nde_utils
import nde_ae
from tqdm import tqdm
import sys
mdouble = False
if mdouble is True:
print('[INFO] Using float64!')
torch.set_default_tensor_type(torch.DoubleTensor)
class bayes_inversion(object):
# =========================================================================
def __init__(self, directory = 'dataset_ae_final/', device = 'cpu'):
# Configuration
self.args = nde_utils.dotdict()
self.args.kwargs = {'num_workers': 1, 'pin_memory': True} if device=="cuda" else {}
self.args.directory = directory
self.device = device
if not os.path.exists(self.args.directory): os.makedirs(self.args.directory)
# =========================================================================
def create_database(self, batch_size = 100, tauvalues = 15, spectral_range=0, noise=5e-4):
import sparsetools as sp
print('[INFO] Using spectral range '+str(spectral_range))
print('[INFO] Reading database')
mdir = '../gaussian_model/'
lines = np.load(mdir+'trainfixe_lines.npy')[:,:]
values = np.load(mdir+'trainfixe_values.npy')[:,:]
self.waves_info = np.load(mdir+'train_waves_info.npy')
self.waves = np.load(mdir+'train_waves.npy')
self.lenwave = len(self.waves)
self.ltau = np.load(mdir+'train_ltau.npy')
self.mltau = np.load(mdir+'train_mltau.npy')
self.lentau = len(self.mltau)
self.spectral_range = spectral_range
self.spectral_idx = np.load(mdir+'train_spectral_idx.npy')
split = 0.9
train_split = int(lines.shape[0]*split)
wholedataset = np.arange(lines.shape[0])
np.random.shuffle(wholedataset)
self.args.batch_size = batch_size
self.train_loader = nde_utils.basicLoader(values[wholedataset[:train_split],:], lines[wholedataset[:train_split],:], noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
self.vali_loader = nde_utils.basicLoader(values[wholedataset[train_split:],:], lines[wholedataset[train_split:],:], noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
print("[INFO] len(ltau):", self.lentau)
print("[INFO] len(waves):", self.lenwave)
print('[INFO] Datasize obsdata: ',lines.shape)
print('[INFO] Train/valid split: ',train_split,int(lines.shape[0]*(1.0-split)))
#vali cube:
print('[INFO] Reading test database')
mdir = '../gaussian_model/'
lines = np.load(mdir+'test_lines.npy')
values = np.load(mdir+'test_values.npy')
self.test_loader = nde_utils.basicLoader(values, lines, noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
# =========================================================================
def train_network(self, num_epochs = 2000, learning_rate = 1e-6, log_interval = 1, continueTraining=True, name_posterior= 'posterior',num_blocks=5,mhidden_features=64,modeltype=None, l_size=15):
name_posterior = name_posterior+'_sp'+str(self.spectral_range)
self.args.y_size = self.lentau*3
self.args.x_size = self.lenwave
self.args.l_size = l_size
if modeltype is None:
self.model = nde_ae.AE(self.args.x_size, self.args.l_size, train_loader=self.train_loader, hidden_size = [128, 128, 128, 128, 128])
elif modeltype == 'RAE':
self.model = nde_ae.RAE(self.args.x_size, self.args.l_size, train_loader=self.train_loader,hidden_size = mhidden_features,num_blocks=num_blocks)
else:
print('no type')
nde_utils.get_params(self.model)
self.args.learning_rate = learning_rate
self.args.num_epochs = num_epochs
self.args.log_interval = log_interval
self.args.name_posterior = name_posterior
print('[INFO] name_posterior: ',name_posterior)
if continueTraining: self.model = torch.load(self.args.directory+name_posterior+'_best.pth'); print('Loading previous weigths...')
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
train_loss_avg = []
vali_loss_avg = []
time0 = time.time()
# When extra weights are needed
ww = torch.ones(self.args.x_size)
# print(ww[:])
from tqdm import trange
t = trange(num_epochs, desc='', leave=True)
self.valimin = 1e3
self.count = 0
self.maxiloop = 100
for epoch in t:
self.model.train()
avgloss = 0
for batch_idx, (params, data) in enumerate(tqdm(self.train_loader, desc='', leave=False)):
data = data.to(self.device)
params = params.to(self.device)
optimizer.zero_grad()
if mdouble is True:
loss = self.model.forward(params.double(),ww.double())
else:
loss = self.model.forward(params,ww)
loss.backward()
optimizer.step()
avgloss += loss.item()
avgloss /= (batch_idx +1)
train_loss_avg.append(avgloss)
self.model.eval()
avgloss2 = 0
for batch_idx, (params, data) in enumerate(self.vali_loader):
data = data.to(self.device)
params = params.to(self.device)
if mdouble is True:
loss = self.model.forward(params.double(),ww.double())
else:
loss = self.model.forward(params,ww)
avgloss2 += loss.item()
avgloss2 /= (batch_idx +1)
vali_loss_avg.append(avgloss2)
argminiv = np.argmin(vali_loss_avg)
miniv = np.mean(vali_loss_avg[argminiv-1:argminiv+1+1])
if argminiv == 0:
miniv = vali_loss_avg[argminiv]
fig = plt.figure(); plt.plot(train_loss_avg); plt.plot(vali_loss_avg)
plt.axhline(np.mean(train_loss_avg[-10:]),color='C0',ls='--')
plt.axhline(np.mean(train_loss_avg[-10:]),color='k',ls='--',alpha=0.5)
# plt.axhline(np.mean(vali_loss_avg[-10:]),color='C1',ls='--')
# plt.axhline(np.mean(vali_loss_avg[-10:]),color='k',ls='--',alpha=0.5)
# plt.axhline(np.min(vali_loss_avg[:]),color='C1',ls='--')
# plt.axhline(np.min(vali_loss_avg[:]),color='k',ls='--',alpha=0.5)
plt.axvline(argminiv,color='k',ls='--',alpha=0.5)
plt.axhline(miniv,color='C1',ls='--')
plt.axhline(miniv,color='k',ls='--',alpha=0.5)
plt.title('loss_final: {0:.2e} / {1:.2e}'.format( np.mean(train_loss_avg[-10:]), miniv ))
plt.xlabel('Epochs'); plt.ylabel('Loss')
plt.yscale('log')
plt.savefig(self.args.directory+self.args.name_posterior+'_train_loss_avg.pdf'); plt.close(fig)
self.test_plots(8160)
t.set_postfix({'loss': '{:.2e}'.format(avgloss)})
t.refresh()
if avgloss2 < self.valimin:
self.valimin = np.copy(avgloss2)
self.count = 0
torch.save(self.model, self.args.directory+self.args.name_posterior+'_best.pth')
else:
self.count += 1
if self.count > self.maxiloop:
print('[INFO] Done')
print('[INFO] name_posterior: ',name_posterior)
sys.exit()
# =========================================================================
def test_plots(self, testindex=0,nsamples = 1000):
import mathtools as mt
mltau = self.mltau
waves = self.waves
testvalue = self.test_loader.dataset.modelparameters[testindex,:]
testobs = self.test_loader.dataset.observations[testindex,:]
if mdouble is True:
samples_histo = self.model.sample(testvalue.astype(np.float64)).data.cpu().numpy()
else:
samples_histo = self.model.sample(testvalue).data.cpu().numpy()
fig3 = plt.figure(figsize=(8,16))
plt.subplot(411)
plt.plot(waves, testvalue,'.--',color='C1',label='Full line')
plt.plot(waves, testvalue, "k", marker='s', markersize=2, label="Used points", ls='none')
plt.plot(waves, samples_histo[0,:],'.--',color='C0', label="Prediction")
plt.xlabel(r"$\lambda - \lambda_0 [\AA]$")
plt.ylabel(r"I/I$_{C(QS)}$");
plt.legend(fontsize=14)
plt.savefig(self.args.directory+self.args.name_posterior+'_'+str(testindex)+'_im_plot_nn.pdf')
plt.close(fig3)
# =========================================================================
def test_error(self, nsamples = 10000, name_posterior = 'posterior',tauvalues = 9,spirange=[0,1],testindex=11387,gotostic = False):
import matplotlib
matplotlib.rcParams['axes.formatter.useoffset'] = False
inc = 0.8
fig3 = plt.figure(figsize=(8*inc,14*inc))
name_posterior = name_posterior+'_sp'+str(self.spectral_range)
self.model = torch.load(self.args.directory+name_posterior+'_best.pth').float()
mltau = self.mltau
waves = self.waves
ntestindex = 10000
listdiff = []
for testindex in tqdm(range(ntestindex)):
testvalue = self.train_loader.dataset.modelparameters[testindex,:]
samples_histo = self.model.sample(testvalue).data.cpu().numpy()
absdiff = np.abs(samples_histo[0,:] - testvalue)
listdiff.append(absdiff)
meandiff = np.mean(np.array(listdiff),axis=0)
maxdiff = np.max(np.array(listdiff),axis=0)
fig3 = plt.figure(figsize=(8*0.9,14*0.9))
plt.subplot(411)
plt.plot(waves,meandiff, '.-',color='C1',label=name_posterior+'_STD')
plt.plot(waves,maxdiff, '.-',color='C0',label=name_posterior+'_MAX')
plt.ylim(1e-6,1e-1)
plt.yscale('log')
plt.xlabel(r"$\lambda - \lambda_0 [\AA]$")
plt.ylabel(r"STD[I$_{input}$-I$_{output}$]");
plt.legend(loc='best')
plt.savefig(self.args.directory+name_posterior+'_im_plot_error.pdf')
plt.close(fig3)
if __name__ == "__main__":
myflow = bayes_inversion()
myflow.create_database(spectral_range=5, tauvalues = 9, noise=1e-2)
# myflow.train_network(num_epochs=3000,continueTraining=False,learning_rate = 1e-4,name_posterior= 'context_encoder_1_10_64_20',modeltype='RAE',l_size=20,num_blocks=10,mhidden_features=64)
myflow.test_error(name_posterior= 'context_encoder_1_10_64_20') | nlte/AEcontext.py | __author__ = 'carlos.diaz'
# Autoencoder for the context data based on residual networks
import numpy as np
import matplotlib.pyplot as plt
from torch import nn
import torch
import time
import os
import nde_utils
import nde_ae
from tqdm import tqdm
import sys
mdouble = False
if mdouble is True:
print('[INFO] Using float64!')
torch.set_default_tensor_type(torch.DoubleTensor)
class bayes_inversion(object):
# =========================================================================
def __init__(self, directory = 'dataset_ae_final/', device = 'cpu'):
# Configuration
self.args = nde_utils.dotdict()
self.args.kwargs = {'num_workers': 1, 'pin_memory': True} if device=="cuda" else {}
self.args.directory = directory
self.device = device
if not os.path.exists(self.args.directory): os.makedirs(self.args.directory)
# =========================================================================
def create_database(self, batch_size = 100, tauvalues = 15, spectral_range=0, noise=5e-4):
import sparsetools as sp
print('[INFO] Using spectral range '+str(spectral_range))
print('[INFO] Reading database')
mdir = '../gaussian_model/'
lines = np.load(mdir+'trainfixe_lines.npy')[:,:]
values = np.load(mdir+'trainfixe_values.npy')[:,:]
self.waves_info = np.load(mdir+'train_waves_info.npy')
self.waves = np.load(mdir+'train_waves.npy')
self.lenwave = len(self.waves)
self.ltau = np.load(mdir+'train_ltau.npy')
self.mltau = np.load(mdir+'train_mltau.npy')
self.lentau = len(self.mltau)
self.spectral_range = spectral_range
self.spectral_idx = np.load(mdir+'train_spectral_idx.npy')
split = 0.9
train_split = int(lines.shape[0]*split)
wholedataset = np.arange(lines.shape[0])
np.random.shuffle(wholedataset)
self.args.batch_size = batch_size
self.train_loader = nde_utils.basicLoader(values[wholedataset[:train_split],:], lines[wholedataset[:train_split],:], noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
self.vali_loader = nde_utils.basicLoader(values[wholedataset[train_split:],:], lines[wholedataset[train_split:],:], noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
print("[INFO] len(ltau):", self.lentau)
print("[INFO] len(waves):", self.lenwave)
print('[INFO] Datasize obsdata: ',lines.shape)
print('[INFO] Train/valid split: ',train_split,int(lines.shape[0]*(1.0-split)))
#vali cube:
print('[INFO] Reading test database')
mdir = '../gaussian_model/'
lines = np.load(mdir+'test_lines.npy')
values = np.load(mdir+'test_values.npy')
self.test_loader = nde_utils.basicLoader(values, lines, noise=noise, batch_size=self.args.batch_size, shuffle=True, **self.args.kwargs)
# =========================================================================
def train_network(self, num_epochs = 2000, learning_rate = 1e-6, log_interval = 1, continueTraining=True, name_posterior= 'posterior',num_blocks=5,mhidden_features=64,modeltype=None, l_size=15):
name_posterior = name_posterior+'_sp'+str(self.spectral_range)
self.args.y_size = self.lentau*3
self.args.x_size = self.lenwave
self.args.l_size = l_size
if modeltype is None:
self.model = nde_ae.AE(self.args.x_size, self.args.l_size, train_loader=self.train_loader, hidden_size = [128, 128, 128, 128, 128])
elif modeltype == 'RAE':
self.model = nde_ae.RAE(self.args.x_size, self.args.l_size, train_loader=self.train_loader,hidden_size = mhidden_features,num_blocks=num_blocks)
else:
print('no type')
nde_utils.get_params(self.model)
self.args.learning_rate = learning_rate
self.args.num_epochs = num_epochs
self.args.log_interval = log_interval
self.args.name_posterior = name_posterior
print('[INFO] name_posterior: ',name_posterior)
if continueTraining: self.model = torch.load(self.args.directory+name_posterior+'_best.pth'); print('Loading previous weigths...')
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
train_loss_avg = []
vali_loss_avg = []
time0 = time.time()
# When extra weights are needed
ww = torch.ones(self.args.x_size)
# print(ww[:])
from tqdm import trange
t = trange(num_epochs, desc='', leave=True)
self.valimin = 1e3
self.count = 0
self.maxiloop = 100
for epoch in t:
self.model.train()
avgloss = 0
for batch_idx, (params, data) in enumerate(tqdm(self.train_loader, desc='', leave=False)):
data = data.to(self.device)
params = params.to(self.device)
optimizer.zero_grad()
if mdouble is True:
loss = self.model.forward(params.double(),ww.double())
else:
loss = self.model.forward(params,ww)
loss.backward()
optimizer.step()
avgloss += loss.item()
avgloss /= (batch_idx +1)
train_loss_avg.append(avgloss)
self.model.eval()
avgloss2 = 0
for batch_idx, (params, data) in enumerate(self.vali_loader):
data = data.to(self.device)
params = params.to(self.device)
if mdouble is True:
loss = self.model.forward(params.double(),ww.double())
else:
loss = self.model.forward(params,ww)
avgloss2 += loss.item()
avgloss2 /= (batch_idx +1)
vali_loss_avg.append(avgloss2)
argminiv = np.argmin(vali_loss_avg)
miniv = np.mean(vali_loss_avg[argminiv-1:argminiv+1+1])
if argminiv == 0:
miniv = vali_loss_avg[argminiv]
fig = plt.figure(); plt.plot(train_loss_avg); plt.plot(vali_loss_avg)
plt.axhline(np.mean(train_loss_avg[-10:]),color='C0',ls='--')
plt.axhline(np.mean(train_loss_avg[-10:]),color='k',ls='--',alpha=0.5)
# plt.axhline(np.mean(vali_loss_avg[-10:]),color='C1',ls='--')
# plt.axhline(np.mean(vali_loss_avg[-10:]),color='k',ls='--',alpha=0.5)
# plt.axhline(np.min(vali_loss_avg[:]),color='C1',ls='--')
# plt.axhline(np.min(vali_loss_avg[:]),color='k',ls='--',alpha=0.5)
plt.axvline(argminiv,color='k',ls='--',alpha=0.5)
plt.axhline(miniv,color='C1',ls='--')
plt.axhline(miniv,color='k',ls='--',alpha=0.5)
plt.title('loss_final: {0:.2e} / {1:.2e}'.format( np.mean(train_loss_avg[-10:]), miniv ))
plt.xlabel('Epochs'); plt.ylabel('Loss')
plt.yscale('log')
plt.savefig(self.args.directory+self.args.name_posterior+'_train_loss_avg.pdf'); plt.close(fig)
self.test_plots(8160)
t.set_postfix({'loss': '{:.2e}'.format(avgloss)})
t.refresh()
if avgloss2 < self.valimin:
self.valimin = np.copy(avgloss2)
self.count = 0
torch.save(self.model, self.args.directory+self.args.name_posterior+'_best.pth')
else:
self.count += 1
if self.count > self.maxiloop:
print('[INFO] Done')
print('[INFO] name_posterior: ',name_posterior)
sys.exit()
# =========================================================================
def test_plots(self, testindex=0,nsamples = 1000):
import mathtools as mt
mltau = self.mltau
waves = self.waves
testvalue = self.test_loader.dataset.modelparameters[testindex,:]
testobs = self.test_loader.dataset.observations[testindex,:]
if mdouble is True:
samples_histo = self.model.sample(testvalue.astype(np.float64)).data.cpu().numpy()
else:
samples_histo = self.model.sample(testvalue).data.cpu().numpy()
fig3 = plt.figure(figsize=(8,16))
plt.subplot(411)
plt.plot(waves, testvalue,'.--',color='C1',label='Full line')
plt.plot(waves, testvalue, "k", marker='s', markersize=2, label="Used points", ls='none')
plt.plot(waves, samples_histo[0,:],'.--',color='C0', label="Prediction")
plt.xlabel(r"$\lambda - \lambda_0 [\AA]$")
plt.ylabel(r"I/I$_{C(QS)}$");
plt.legend(fontsize=14)
plt.savefig(self.args.directory+self.args.name_posterior+'_'+str(testindex)+'_im_plot_nn.pdf')
plt.close(fig3)
# =========================================================================
def test_error(self, nsamples = 10000, name_posterior = 'posterior',tauvalues = 9,spirange=[0,1],testindex=11387,gotostic = False):
import matplotlib
matplotlib.rcParams['axes.formatter.useoffset'] = False
inc = 0.8
fig3 = plt.figure(figsize=(8*inc,14*inc))
name_posterior = name_posterior+'_sp'+str(self.spectral_range)
self.model = torch.load(self.args.directory+name_posterior+'_best.pth').float()
mltau = self.mltau
waves = self.waves
ntestindex = 10000
listdiff = []
for testindex in tqdm(range(ntestindex)):
testvalue = self.train_loader.dataset.modelparameters[testindex,:]
samples_histo = self.model.sample(testvalue).data.cpu().numpy()
absdiff = np.abs(samples_histo[0,:] - testvalue)
listdiff.append(absdiff)
meandiff = np.mean(np.array(listdiff),axis=0)
maxdiff = np.max(np.array(listdiff),axis=0)
fig3 = plt.figure(figsize=(8*0.9,14*0.9))
plt.subplot(411)
plt.plot(waves,meandiff, '.-',color='C1',label=name_posterior+'_STD')
plt.plot(waves,maxdiff, '.-',color='C0',label=name_posterior+'_MAX')
plt.ylim(1e-6,1e-1)
plt.yscale('log')
plt.xlabel(r"$\lambda - \lambda_0 [\AA]$")
plt.ylabel(r"STD[I$_{input}$-I$_{output}$]");
plt.legend(loc='best')
plt.savefig(self.args.directory+name_posterior+'_im_plot_error.pdf')
plt.close(fig3)
if __name__ == "__main__":
myflow = bayes_inversion()
myflow.create_database(spectral_range=5, tauvalues = 9, noise=1e-2)
# myflow.train_network(num_epochs=3000,continueTraining=False,learning_rate = 1e-4,name_posterior= 'context_encoder_1_10_64_20',modeltype='RAE',l_size=20,num_blocks=10,mhidden_features=64)
myflow.test_error(name_posterior= 'context_encoder_1_10_64_20') | 0.383526 | 0.294836 |
from oci_cli import cli_util
from oci_cli.cli_util import option
from oci_cli.aliasing import CommandGroupWithAlias
from services.dns.src.oci_cli_dns.generated import dns_cli
from oci_cli import json_skeleton_utils
import click
@dns_cli.dns_root_group.command('record', cls=CommandGroupWithAlias, help="""A DNS record.""")
@cli_util.help_option_group
def record():
pass
@record.command('rrset', cls=CommandGroupWithAlias, help=dns_cli.rr_set_group.help)
@cli_util.help_option_group
def rrset():
pass
@record.command('domain', cls=CommandGroupWithAlias, help="""A collection of DNS records for the same domain.""")
@cli_util.help_option_group
def domain():
pass
@record.command('zone', cls=CommandGroupWithAlias, help="""A collection of DNS records for the same zone.""")
@cli_util.help_option_group
def zone():
pass
# specify that compartment_id is required for create zone
@cli_util.copy_params_from_generated_command(dns_cli.create_zone, params_to_exclude=['compartment_id'])
@dns_cli.zone_group.command(name=cli_util.override('create_zone.command_name', 'create'), help="""Creates a new zone in the specified compartment. The `compartmentId` query parameter is required if the `Content-Type` header for the request is `text/dns`.""")
@option('--compartment-id', required=True, help="""The OCID of the compartment the resource belongs to.""")
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'freeform-tags': {'module': 'dns', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'dns', 'class': 'dict(str, dict(str, object))'}, 'external-masters': {'module': 'dns', 'class': 'list[ExternalMaster]'}}, output_type={'module': 'dns', 'class': 'Zone'})
@cli_util.wrap_exceptions
def create_zone(ctx, **kwargs):
ctx.invoke(dns_cli.create_zone, **kwargs)
dns_cli.dns_root_group.commands.pop(dns_cli.rr_set_group.name)
dns_cli.dns_root_group.commands.pop(dns_cli.record_collection_group.name)
dns_cli.dns_root_group.commands.pop(dns_cli.records_group.name)
dns_cli.dns_root_group.commands.pop(dns_cli.zones_group.name)
dns_cli.zone_group.add_command(dns_cli.get_zone)
dns_cli.zone_group.add_command(dns_cli.list_zones)
# zone records
cli_util.rename_command(zone, dns_cli.get_zone_records, "get")
cli_util.rename_command(zone, dns_cli.patch_zone_records, "patch")
cli_util.rename_command(zone, dns_cli.update_zone_records, "update")
# domain records
cli_util.rename_command(domain, dns_cli.patch_domain_records, "patch")
cli_util.rename_command(domain, dns_cli.update_domain_records, "update")
cli_util.rename_command(domain, dns_cli.get_domain_records, "get")
cli_util.rename_command(domain, dns_cli.delete_domain_records, "delete")
# rrset
cli_util.rename_command(rrset, dns_cli.update_rr_set, "update")
rrset.add_command(dns_cli.get_rr_set)
rrset.add_command(dns_cli.patch_rr_set)
rrset.add_command(dns_cli.delete_rr_set) | services/dns/src/oci_cli_dns/dns_cli_extended.py |
from oci_cli import cli_util
from oci_cli.cli_util import option
from oci_cli.aliasing import CommandGroupWithAlias
from services.dns.src.oci_cli_dns.generated import dns_cli
from oci_cli import json_skeleton_utils
import click
@dns_cli.dns_root_group.command('record', cls=CommandGroupWithAlias, help="""A DNS record.""")
@cli_util.help_option_group
def record():
pass
@record.command('rrset', cls=CommandGroupWithAlias, help=dns_cli.rr_set_group.help)
@cli_util.help_option_group
def rrset():
pass
@record.command('domain', cls=CommandGroupWithAlias, help="""A collection of DNS records for the same domain.""")
@cli_util.help_option_group
def domain():
pass
@record.command('zone', cls=CommandGroupWithAlias, help="""A collection of DNS records for the same zone.""")
@cli_util.help_option_group
def zone():
pass
# specify that compartment_id is required for create zone
@cli_util.copy_params_from_generated_command(dns_cli.create_zone, params_to_exclude=['compartment_id'])
@dns_cli.zone_group.command(name=cli_util.override('create_zone.command_name', 'create'), help="""Creates a new zone in the specified compartment. The `compartmentId` query parameter is required if the `Content-Type` header for the request is `text/dns`.""")
@option('--compartment-id', required=True, help="""The OCID of the compartment the resource belongs to.""")
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'freeform-tags': {'module': 'dns', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'dns', 'class': 'dict(str, dict(str, object))'}, 'external-masters': {'module': 'dns', 'class': 'list[ExternalMaster]'}}, output_type={'module': 'dns', 'class': 'Zone'})
@cli_util.wrap_exceptions
def create_zone(ctx, **kwargs):
ctx.invoke(dns_cli.create_zone, **kwargs)
dns_cli.dns_root_group.commands.pop(dns_cli.rr_set_group.name)
dns_cli.dns_root_group.commands.pop(dns_cli.record_collection_group.name)
dns_cli.dns_root_group.commands.pop(dns_cli.records_group.name)
dns_cli.dns_root_group.commands.pop(dns_cli.zones_group.name)
dns_cli.zone_group.add_command(dns_cli.get_zone)
dns_cli.zone_group.add_command(dns_cli.list_zones)
# zone records
cli_util.rename_command(zone, dns_cli.get_zone_records, "get")
cli_util.rename_command(zone, dns_cli.patch_zone_records, "patch")
cli_util.rename_command(zone, dns_cli.update_zone_records, "update")
# domain records
cli_util.rename_command(domain, dns_cli.patch_domain_records, "patch")
cli_util.rename_command(domain, dns_cli.update_domain_records, "update")
cli_util.rename_command(domain, dns_cli.get_domain_records, "get")
cli_util.rename_command(domain, dns_cli.delete_domain_records, "delete")
# rrset
cli_util.rename_command(rrset, dns_cli.update_rr_set, "update")
rrset.add_command(dns_cli.get_rr_set)
rrset.add_command(dns_cli.patch_rr_set)
rrset.add_command(dns_cli.delete_rr_set) | 0.549641 | 0.096238 |
from blackjack.card import Card
from blackjack.deck import Deck
from blackjack.player import Player
class _Blackjack:
def __init__(self, player: Player, dealer: Player, deck: Deck) -> None:
"""init"""
self.player = player
self.dealer = dealer
self.deck = deck
def _get_cards(self, player: Player) -> list[Card]:
"""get cards"""
return player.hands
def get_player_cards(self) -> list[Card]:
"""get player cards"""
return self._get_cards(self.player)
def get_dealer_cards(self) -> list[Card]:
"""get dealer cards"""
return self._get_cards(self.dealer)
def get_table_cards(self) -> tuple[list[Card], list[Card]]:
"""
get table cards
Returns
-------
tuple[list[Card], list[Card]]
player cards, dealer cards
"""
return self.get_player_cards(), self.get_dealer_cards()
def append_player_card(self) -> None:
"""append player card"""
self.player.append_card(self.deck.draw())
if self.player.total > 21:
raise ValueError("player bust")
def append_dealer_card(self) -> None:
"""append dealer card"""
self.dealer.append_card(self.deck.draw())
def dealer_play(self) -> None:
"""dealer play"""
while self.dealer.total < 17:
self.append_dealer_card()
if self.dealer.total > 21:
raise ValueError("dealer bust")
def judge(self) -> str:
"""
judge
Returns
-------
str
winner
"""
if self.player.total > 21:
self.dealer.win()
self.player.lose()
return self.dealer.name
elif self.dealer.total > 21:
self.player.win()
self.dealer.lose()
return self.player.name
if self.player.total > self.dealer.total:
self.player.win()
self.dealer.lose()
return self.player.name
elif self.player.total < self.dealer.total:
self.dealer.win()
self.player.lose()
return self.dealer.name
else:
self.player.draw()
self.dealer.draw()
return "draw"
def play(self) -> None:
"""play"""
try:
while (
input(
f"player hands{self._convert_prety_cards(self.get_player_cards())[0]} hit?[Y/n]: " # noqa: E501
)
== "Y"
):
self.append_player_card()
self.dealer_play()
except ValueError as e:
print(e)
winner = self.judge()
player_cards, dealer_cards = self._convert_prety_cards(*self.get_table_cards()) # type: ignore # noqa: E501
print(f"player hands: {player_cards}, dealer hands: {dealer_cards}")
print(f"winner: {winner}")
def _convert_prety_cards(self, *cards: list[Card]) -> tuple[str]:
"""
convert prety cards
Parameters
----------
cards : list[Card]
cards
Returns
-------
str
prety cards
"""
return tuple([str([str(cc) for cc in c]).replace("'", "") for c in cards])
class BlackjackCard(Card):
"""blackjack card"""
def __init__(self, card: Card) -> None:
"""init"""
super().__init__(number=card.number, suit=card.suit)
self.blackjack_value = 10 if self.number >= 10 else self.number
class BlackjackPlayer(Player):
"""blackjack player"""
def __init__(self, player: Player) -> None:
"""init"""
super().__init__(player.name, player.hands)
@property
def total(self) -> int:
"""total"""
return sum(card.blackjack_value for card in self.hands) # type: ignore
class Blackjack:
"""blackjack"""
def __init__(self, player: Player, dealer: Player, deck: Deck) -> None:
"""init"""
self.player = player
self.dealer = dealer
self.deck = deck
def play(self) -> _Blackjack:
"""play"""
self.player.append_card(self.deck.draw())
self.player.append_card(self.deck.draw())
self.dealer.append_card(self.deck.draw())
self.dealer.append_card(self.deck.draw())
return _Blackjack(self.player, self.dealer, self.deck) | blackjack/blackjack.py | from blackjack.card import Card
from blackjack.deck import Deck
from blackjack.player import Player
class _Blackjack:
def __init__(self, player: Player, dealer: Player, deck: Deck) -> None:
"""init"""
self.player = player
self.dealer = dealer
self.deck = deck
def _get_cards(self, player: Player) -> list[Card]:
"""get cards"""
return player.hands
def get_player_cards(self) -> list[Card]:
"""get player cards"""
return self._get_cards(self.player)
def get_dealer_cards(self) -> list[Card]:
"""get dealer cards"""
return self._get_cards(self.dealer)
def get_table_cards(self) -> tuple[list[Card], list[Card]]:
"""
get table cards
Returns
-------
tuple[list[Card], list[Card]]
player cards, dealer cards
"""
return self.get_player_cards(), self.get_dealer_cards()
def append_player_card(self) -> None:
"""append player card"""
self.player.append_card(self.deck.draw())
if self.player.total > 21:
raise ValueError("player bust")
def append_dealer_card(self) -> None:
"""append dealer card"""
self.dealer.append_card(self.deck.draw())
def dealer_play(self) -> None:
"""dealer play"""
while self.dealer.total < 17:
self.append_dealer_card()
if self.dealer.total > 21:
raise ValueError("dealer bust")
def judge(self) -> str:
"""
judge
Returns
-------
str
winner
"""
if self.player.total > 21:
self.dealer.win()
self.player.lose()
return self.dealer.name
elif self.dealer.total > 21:
self.player.win()
self.dealer.lose()
return self.player.name
if self.player.total > self.dealer.total:
self.player.win()
self.dealer.lose()
return self.player.name
elif self.player.total < self.dealer.total:
self.dealer.win()
self.player.lose()
return self.dealer.name
else:
self.player.draw()
self.dealer.draw()
return "draw"
def play(self) -> None:
"""play"""
try:
while (
input(
f"player hands{self._convert_prety_cards(self.get_player_cards())[0]} hit?[Y/n]: " # noqa: E501
)
== "Y"
):
self.append_player_card()
self.dealer_play()
except ValueError as e:
print(e)
winner = self.judge()
player_cards, dealer_cards = self._convert_prety_cards(*self.get_table_cards()) # type: ignore # noqa: E501
print(f"player hands: {player_cards}, dealer hands: {dealer_cards}")
print(f"winner: {winner}")
def _convert_prety_cards(self, *cards: list[Card]) -> tuple[str]:
"""
convert prety cards
Parameters
----------
cards : list[Card]
cards
Returns
-------
str
prety cards
"""
return tuple([str([str(cc) for cc in c]).replace("'", "") for c in cards])
class BlackjackCard(Card):
"""blackjack card"""
def __init__(self, card: Card) -> None:
"""init"""
super().__init__(number=card.number, suit=card.suit)
self.blackjack_value = 10 if self.number >= 10 else self.number
class BlackjackPlayer(Player):
"""blackjack player"""
def __init__(self, player: Player) -> None:
"""init"""
super().__init__(player.name, player.hands)
@property
def total(self) -> int:
"""total"""
return sum(card.blackjack_value for card in self.hands) # type: ignore
class Blackjack:
"""blackjack"""
def __init__(self, player: Player, dealer: Player, deck: Deck) -> None:
"""init"""
self.player = player
self.dealer = dealer
self.deck = deck
def play(self) -> _Blackjack:
"""play"""
self.player.append_card(self.deck.draw())
self.player.append_card(self.deck.draw())
self.dealer.append_card(self.deck.draw())
self.dealer.append_card(self.deck.draw())
return _Blackjack(self.player, self.dealer, self.deck) | 0.724481 | 0.163612 |
from __future__ import print_function, division, absolute_import
import os
import pytest
from sdss_brain import cfg_params
from sdss_brain.auth import Netrc
from sdss_brain.exceptions import BrainError
@pytest.fixture()
def netrc(monkeypatch, tmpdir):
tmpnet = tmpdir.mkdir('netrc').join('.netrc')
monkeypatch.setitem(cfg_params, 'netrc_path', str(tmpnet))
yield tmpnet
@pytest.fixture()
def goodnet(netrc):
netrc.write('')
os.chmod(str(netrc), 0o600)
yield netrc
@pytest.fixture()
def bestnet(goodnet):
goodnet.write(write('data.sdss.org'), mode='a')
goodnet.write(write('api.sdss.org'), mode='a')
yield goodnet
def write(host):
netstr = 'machine {0}\n'.format(host)
netstr += ' login test\n'
netstr += ' password test\n'
netstr += '\n'
return netstr
class TestNetrc(object):
''' test the netrc access '''
@pytest.mark.parametrize('host, msg',
[('data.sdss.org', 'api.sdss.org not found in netrc.'),
('api.sdss.org', 'data.sdss.org not found in netrc.')],
ids=['noapi', 'nodata'])
def test_only_one_host(self, goodnet, host, msg):
goodnet.write(write(host))
with pytest.warns(UserWarning, match=msg):
Netrc()
def test_valid_netrc(self, bestnet):
n = Netrc()
assert n.is_valid is True
assert n.valid_hosts == ['data.sdss.org', 'api.sdss.org']
class TestNetrcFails(object):
def test_no_netrc(self, netrc):
with pytest.raises(BrainError, match='No .netrc file found at *'):
Netrc()
def test_badpermissions(self, netrc):
netrc.write('')
with pytest.raises(BrainError, match='Your .netrc file does not have 600 permissions.'):
Netrc()
def test_badparse(self, goodnet):
goodnet.write('hello\n', mode='a')
with pytest.raises(BrainError, match='Your netrc file was not parsed correctly.'):
Netrc() | tests/auth/test_netrc.py |
from __future__ import print_function, division, absolute_import
import os
import pytest
from sdss_brain import cfg_params
from sdss_brain.auth import Netrc
from sdss_brain.exceptions import BrainError
@pytest.fixture()
def netrc(monkeypatch, tmpdir):
tmpnet = tmpdir.mkdir('netrc').join('.netrc')
monkeypatch.setitem(cfg_params, 'netrc_path', str(tmpnet))
yield tmpnet
@pytest.fixture()
def goodnet(netrc):
netrc.write('')
os.chmod(str(netrc), 0o600)
yield netrc
@pytest.fixture()
def bestnet(goodnet):
goodnet.write(write('data.sdss.org'), mode='a')
goodnet.write(write('api.sdss.org'), mode='a')
yield goodnet
def write(host):
netstr = 'machine {0}\n'.format(host)
netstr += ' login test\n'
netstr += ' password test\n'
netstr += '\n'
return netstr
class TestNetrc(object):
''' test the netrc access '''
@pytest.mark.parametrize('host, msg',
[('data.sdss.org', 'api.sdss.org not found in netrc.'),
('api.sdss.org', 'data.sdss.org not found in netrc.')],
ids=['noapi', 'nodata'])
def test_only_one_host(self, goodnet, host, msg):
goodnet.write(write(host))
with pytest.warns(UserWarning, match=msg):
Netrc()
def test_valid_netrc(self, bestnet):
n = Netrc()
assert n.is_valid is True
assert n.valid_hosts == ['data.sdss.org', 'api.sdss.org']
class TestNetrcFails(object):
def test_no_netrc(self, netrc):
with pytest.raises(BrainError, match='No .netrc file found at *'):
Netrc()
def test_badpermissions(self, netrc):
netrc.write('')
with pytest.raises(BrainError, match='Your .netrc file does not have 600 permissions.'):
Netrc()
def test_badparse(self, goodnet):
goodnet.write('hello\n', mode='a')
with pytest.raises(BrainError, match='Your netrc file was not parsed correctly.'):
Netrc() | 0.553023 | 0.205555 |
import os
import re
from .single import FileSinglePermission, _BaseVariables
class FileUserPermission(FileSinglePermission):
"""
Overview:
Single permission of the user part of a file.
Inherited from :class:`pysyslimit.models.permission.single.FileSinglePermission`.
With read(r), write(w) and execute(x).
"""
pass
class FileGroupPermission(FileSinglePermission):
"""
Overview:
Single permission of the group part of a file.
Inherited from :class:`pysyslimit.models.permission.single.FileSinglePermission`.
With read(r), write(w) and execute(x).
"""
pass
class FileOtherPermission(FileSinglePermission):
"""
Overview:
Single permission of the other part of a file.
Inherited from :class:`pysyslimit.models.permission.single.FileSinglePermission`.
With read(r), write(w) and execute(x).
"""
pass
class FilePermission(_BaseVariables):
"""
Overview:
Full file permission class.
"""
def __init__(self, user_permission=None, group_permission=None, other_permission=None):
"""
Overview:
Constructor function.
Arguments:
- user_permission: User permission.
- group_permission: User group permission.
- other_permission: Other permission.
"""
self.__user_permission = FileUserPermission.loads(user_permission or FileUserPermission())
self.__group_permission = FileGroupPermission.loads(group_permission or FileGroupPermission())
self.__other_permission = FileOtherPermission.loads(other_permission or FileOtherPermission())
@property
def user(self):
"""
Overview:
User permission.
"""
return self.__user_permission
@user.setter
def user(self, value):
self.__user_permission = FileUserPermission.loads(value)
@property
def group(self):
"""
Overview:
User group permission.
"""
return self.__group_permission
@group.setter
def group(self, value):
self.__group_permission = FileGroupPermission.loads(value)
@property
def other(self):
"""
Overview:
Other permission.
"""
return self.__other_permission
@other.setter
def other(self, value):
self.__other_permission = FileOtherPermission.loads(value)
@property
def sign(self):
"""
Overview:
Sign format of this permission.
Such as ``rwxrwxrwx``.
"""
return "%s%s%s" % (
self.__user_permission.sign,
self.__group_permission.sign,
self.__other_permission.sign,
)
@sign.setter
def sign(self, value):
if isinstance(value, str):
if re.fullmatch(self._FULL_SIGN, value):
self.__user_permission.sign = value[0:3]
self.__group_permission.sign = value[3:6]
self.__other_permission.sign = value[6:9]
else:
raise ValueError('Invalid single sign - {actual}.'.format(actual=repr(value)))
else:
raise TypeError('Str expected but {actual} found.'.format(actual=repr(type(value))))
def __str__(self):
"""
Overview:
String format of this permission.
The same as ``sign``.
"""
return self.sign
@property
def value(self):
"""
Overview:
Int value of current permission.
"""
return sum([
self.__user_permission.value * 64,
self.__group_permission.value * 8,
self.__other_permission.value * 1,
])
@value.setter
def value(self, val):
if isinstance(val, str):
if not re.fullmatch(self._FULL_DIGIT, val):
raise ValueError('3-length digit expected but {actual} found.'.format(actual=repr(val)))
val = int(val, 8)
if isinstance(val, int):
if val >= self._FULL_WEIGHT:
raise ValueError('Value from 000 to 777 expected but {actual} found.'.format(actual=repr(oct(val)[2:])))
else:
raise TypeError('Integer or integer-like string expected but {actual} found.'.format(actual=repr(val)))
self.__user_permission.value = int(val / 64) & 7
self.__group_permission.value = int(val / 8) & 7
self.__other_permission.value = int(val / 1) & 7
def __int__(self):
"""
Overview:
Int format of this permission.
The same as ``value``.
"""
return self.value
@property
def oct_value(self):
"""
Overview:
Octal tnt value of current permission.
Such as ``777``.
"""
_value = oct(self.value)[2:]
_value = "0" * (3 - len(_value)) + _value
return _value
@oct_value.setter
def oct_value(self, value):
# noinspection PyAttributeOutsideInit
self.value = int(str(value), 8)
def __tuple(self):
return self.__user_permission, self.__group_permission, self.__other_permission
def __eq__(self, other):
"""
Overview:
Get equality of full permission.
"""
if other is self:
return True
elif isinstance(other, self.__class__):
return self.__tuple() == other.__tuple()
else:
return False
def __hash__(self):
"""
Overview:
Get hash of full permission.
"""
return hash(self.__tuple())
def __repr__(self):
"""
Overview:
String representation format of this permission.
"""
return '<%s permission: %s>' % (
self.__class__.__name__,
self.sign
)
@classmethod
def load_by_value(cls, value):
"""
Overview:
Load permission by int value.
Arguments:
- value: Int value of permission.
Returns:
- permission: Loaded permission object.
"""
_instance = cls()
_instance.value = value
return _instance
@classmethod
def load_by_sign(cls, sign):
"""
Overview:
Load permission by string sign.
Arguments:
- value: String sign of permission.
Returns:
- permission: Loaded permission object.
"""
_instance = cls()
_instance.sign = sign
return _instance
@classmethod
def load_by_oct_value(cls, oct_value):
"""
Overview:
Load permission by octal value.
Arguments:
- value: Octal value of permission.
Returns:
- permission: Loaded permission object.
"""
_instance = cls()
_instance.oct_value = oct_value
return _instance
@classmethod
def loads(cls, value):
"""
Overview:
Load permission by any types of value.
Arguments:
- value: Any types of value of permission.
Returns:
- permission: Loaded permission object.
"""
if isinstance(value, cls):
return value
elif isinstance(value, int):
return cls.load_by_value(value)
elif isinstance(value, str):
if re.fullmatch(r"\d+", value):
return cls.load_by_oct_value(value)
else:
return cls.load_by_sign(value)
else:
raise TypeError('Int or str expected but {actual} found.'.format(actual=repr(type(value))))
@classmethod
def load_from_file(cls, filename):
"""
Overview:
Get file's permission.
Arguments:
- filename: Name of the file.
Returns:
- permission: Permission object.
"""
return cls.load_by_value(os.stat(filename).st_mode & cls._FULL_MASK)
def __or__(self, other):
"""
Overview:
Merge permissions.
"""
_other = self.loads(other)
return self.__class__(
user_permission=self.__user_permission | _other.__user_permission,
group_permission=self.__group_permission | _other.__group_permission,
other_permission=self.__other_permission | _other.__other_permission,
)
def __ror__(self, other):
"""
Overview:
Merge permissions, right version.
"""
return self | other
def __ior__(self, other):
"""
Overview:
Merge permissions, self version.
"""
_other = self.loads(other)
self.__user_permission |= _other.__user_permission
self.__group_permission |= _other.__group_permission
self.__other_permission |= _other.__other_permission
return self
def __add__(self, other):
"""
Overview:
Merge permissions, the same as ``|``.
"""
return self | other
def __radd__(self, other):
"""
Overview:
Merge permissions, right version.
"""
return self + other
def __iadd__(self, other):
"""
Overview:
Merge permissions, self version.
"""
self |= other
return self
def __and__(self, other):
"""
Overview:
Permission intersection.
"""
_other = self.loads(other)
return self.__class__(
user_permission=self.__user_permission & _other.__user_permission,
group_permission=self.__group_permission & _other.__group_permission,
other_permission=self.__other_permission & _other.__other_permission,
)
def __rand__(self, other):
"""
Overview:
Permission intersection, right version.
"""
return self & other
def __iand__(self, other):
"""
Overview:
Permission intersection, self version.
"""
_other = self.loads(other)
self.__user_permission &= _other.__user_permission
self.__group_permission &= _other.__group_permission
self.__other_permission &= _other.__other_permission
return self
def __sub__(self, other):
"""
Overview:
Permission subtract.
"""
_other = self.loads(other)
return self.__class__(
user_permission=self.__user_permission - _other.__user_permission,
group_permission=self.__group_permission - _other.__group_permission,
other_permission=self.__other_permission - _other.__other_permission,
)
def __rsub__(self, other):
"""
Overview:
Permission subtract, right version.
"""
return self.loads(other) - self
def __isub__(self, other):
"""
Overview:
Permission subtract, self version.
"""
_other = self.loads(other)
self.__user_permission -= _other.__user_permission
self.__group_permission -= _other.__group_permission
self.__other_permission -= _other.__other_permission
return self | pysyslimit/models/permission/full.py | import os
import re
from .single import FileSinglePermission, _BaseVariables
class FileUserPermission(FileSinglePermission):
"""
Overview:
Single permission of the user part of a file.
Inherited from :class:`pysyslimit.models.permission.single.FileSinglePermission`.
With read(r), write(w) and execute(x).
"""
pass
class FileGroupPermission(FileSinglePermission):
"""
Overview:
Single permission of the group part of a file.
Inherited from :class:`pysyslimit.models.permission.single.FileSinglePermission`.
With read(r), write(w) and execute(x).
"""
pass
class FileOtherPermission(FileSinglePermission):
"""
Overview:
Single permission of the other part of a file.
Inherited from :class:`pysyslimit.models.permission.single.FileSinglePermission`.
With read(r), write(w) and execute(x).
"""
pass
class FilePermission(_BaseVariables):
"""
Overview:
Full file permission class.
"""
def __init__(self, user_permission=None, group_permission=None, other_permission=None):
"""
Overview:
Constructor function.
Arguments:
- user_permission: User permission.
- group_permission: User group permission.
- other_permission: Other permission.
"""
self.__user_permission = FileUserPermission.loads(user_permission or FileUserPermission())
self.__group_permission = FileGroupPermission.loads(group_permission or FileGroupPermission())
self.__other_permission = FileOtherPermission.loads(other_permission or FileOtherPermission())
@property
def user(self):
"""
Overview:
User permission.
"""
return self.__user_permission
@user.setter
def user(self, value):
self.__user_permission = FileUserPermission.loads(value)
@property
def group(self):
"""
Overview:
User group permission.
"""
return self.__group_permission
@group.setter
def group(self, value):
self.__group_permission = FileGroupPermission.loads(value)
@property
def other(self):
"""
Overview:
Other permission.
"""
return self.__other_permission
@other.setter
def other(self, value):
self.__other_permission = FileOtherPermission.loads(value)
@property
def sign(self):
"""
Overview:
Sign format of this permission.
Such as ``rwxrwxrwx``.
"""
return "%s%s%s" % (
self.__user_permission.sign,
self.__group_permission.sign,
self.__other_permission.sign,
)
@sign.setter
def sign(self, value):
if isinstance(value, str):
if re.fullmatch(self._FULL_SIGN, value):
self.__user_permission.sign = value[0:3]
self.__group_permission.sign = value[3:6]
self.__other_permission.sign = value[6:9]
else:
raise ValueError('Invalid single sign - {actual}.'.format(actual=repr(value)))
else:
raise TypeError('Str expected but {actual} found.'.format(actual=repr(type(value))))
def __str__(self):
"""
Overview:
String format of this permission.
The same as ``sign``.
"""
return self.sign
@property
def value(self):
"""
Overview:
Int value of current permission.
"""
return sum([
self.__user_permission.value * 64,
self.__group_permission.value * 8,
self.__other_permission.value * 1,
])
@value.setter
def value(self, val):
if isinstance(val, str):
if not re.fullmatch(self._FULL_DIGIT, val):
raise ValueError('3-length digit expected but {actual} found.'.format(actual=repr(val)))
val = int(val, 8)
if isinstance(val, int):
if val >= self._FULL_WEIGHT:
raise ValueError('Value from 000 to 777 expected but {actual} found.'.format(actual=repr(oct(val)[2:])))
else:
raise TypeError('Integer or integer-like string expected but {actual} found.'.format(actual=repr(val)))
self.__user_permission.value = int(val / 64) & 7
self.__group_permission.value = int(val / 8) & 7
self.__other_permission.value = int(val / 1) & 7
def __int__(self):
"""
Overview:
Int format of this permission.
The same as ``value``.
"""
return self.value
@property
def oct_value(self):
"""
Overview:
Octal tnt value of current permission.
Such as ``777``.
"""
_value = oct(self.value)[2:]
_value = "0" * (3 - len(_value)) + _value
return _value
@oct_value.setter
def oct_value(self, value):
# noinspection PyAttributeOutsideInit
self.value = int(str(value), 8)
def __tuple(self):
return self.__user_permission, self.__group_permission, self.__other_permission
def __eq__(self, other):
"""
Overview:
Get equality of full permission.
"""
if other is self:
return True
elif isinstance(other, self.__class__):
return self.__tuple() == other.__tuple()
else:
return False
def __hash__(self):
"""
Overview:
Get hash of full permission.
"""
return hash(self.__tuple())
def __repr__(self):
"""
Overview:
String representation format of this permission.
"""
return '<%s permission: %s>' % (
self.__class__.__name__,
self.sign
)
@classmethod
def load_by_value(cls, value):
"""
Overview:
Load permission by int value.
Arguments:
- value: Int value of permission.
Returns:
- permission: Loaded permission object.
"""
_instance = cls()
_instance.value = value
return _instance
@classmethod
def load_by_sign(cls, sign):
"""
Overview:
Load permission by string sign.
Arguments:
- value: String sign of permission.
Returns:
- permission: Loaded permission object.
"""
_instance = cls()
_instance.sign = sign
return _instance
@classmethod
def load_by_oct_value(cls, oct_value):
"""
Overview:
Load permission by octal value.
Arguments:
- value: Octal value of permission.
Returns:
- permission: Loaded permission object.
"""
_instance = cls()
_instance.oct_value = oct_value
return _instance
@classmethod
def loads(cls, value):
"""
Overview:
Load permission by any types of value.
Arguments:
- value: Any types of value of permission.
Returns:
- permission: Loaded permission object.
"""
if isinstance(value, cls):
return value
elif isinstance(value, int):
return cls.load_by_value(value)
elif isinstance(value, str):
if re.fullmatch(r"\d+", value):
return cls.load_by_oct_value(value)
else:
return cls.load_by_sign(value)
else:
raise TypeError('Int or str expected but {actual} found.'.format(actual=repr(type(value))))
@classmethod
def load_from_file(cls, filename):
"""
Overview:
Get file's permission.
Arguments:
- filename: Name of the file.
Returns:
- permission: Permission object.
"""
return cls.load_by_value(os.stat(filename).st_mode & cls._FULL_MASK)
def __or__(self, other):
"""
Overview:
Merge permissions.
"""
_other = self.loads(other)
return self.__class__(
user_permission=self.__user_permission | _other.__user_permission,
group_permission=self.__group_permission | _other.__group_permission,
other_permission=self.__other_permission | _other.__other_permission,
)
def __ror__(self, other):
"""
Overview:
Merge permissions, right version.
"""
return self | other
def __ior__(self, other):
"""
Overview:
Merge permissions, self version.
"""
_other = self.loads(other)
self.__user_permission |= _other.__user_permission
self.__group_permission |= _other.__group_permission
self.__other_permission |= _other.__other_permission
return self
def __add__(self, other):
"""
Overview:
Merge permissions, the same as ``|``.
"""
return self | other
def __radd__(self, other):
"""
Overview:
Merge permissions, right version.
"""
return self + other
def __iadd__(self, other):
"""
Overview:
Merge permissions, self version.
"""
self |= other
return self
def __and__(self, other):
"""
Overview:
Permission intersection.
"""
_other = self.loads(other)
return self.__class__(
user_permission=self.__user_permission & _other.__user_permission,
group_permission=self.__group_permission & _other.__group_permission,
other_permission=self.__other_permission & _other.__other_permission,
)
def __rand__(self, other):
"""
Overview:
Permission intersection, right version.
"""
return self & other
def __iand__(self, other):
"""
Overview:
Permission intersection, self version.
"""
_other = self.loads(other)
self.__user_permission &= _other.__user_permission
self.__group_permission &= _other.__group_permission
self.__other_permission &= _other.__other_permission
return self
def __sub__(self, other):
"""
Overview:
Permission subtract.
"""
_other = self.loads(other)
return self.__class__(
user_permission=self.__user_permission - _other.__user_permission,
group_permission=self.__group_permission - _other.__group_permission,
other_permission=self.__other_permission - _other.__other_permission,
)
def __rsub__(self, other):
"""
Overview:
Permission subtract, right version.
"""
return self.loads(other) - self
def __isub__(self, other):
"""
Overview:
Permission subtract, self version.
"""
_other = self.loads(other)
self.__user_permission -= _other.__user_permission
self.__group_permission -= _other.__group_permission
self.__other_permission -= _other.__other_permission
return self | 0.734215 | 0.119229 |
try:
from os import makedirs
from shutil import copyfile
from os.path import join, exists
except ImportError as err:
exit(err)
if __name__ == "__main__":
# The path to the directory where the original
# dataset was uncompressed
original_dataset_dir = "C:/Users/e_sgouge/Documents/Etienne/Python/Reconnaissance_chiffre/datas/dogs-vs-cats/train"
# The directory where we will
# store our smaller dataset
base_dir = "C:/Users/e_sgouge/Documents/Etienne/Python/Reconnaissance_chiffre/datas/dogs_vs_cats"
makedirs(base_dir, exist_ok=True)
# Directories for our training, validation
# and test splits
# Train
train_dir = join(base_dir, "train")
makedirs(train_dir, exist_ok=True)
# Validation
validation_dir = join(base_dir, "validation")
makedirs(validation_dir, exist_ok=True)
# Test
test_dir = join(base_dir, "test")
makedirs(test_dir, exist_ok=True)
# TRAINING
# Directory with our training cat pictures
train_cats_dir = join(train_dir, 'cats')
makedirs(train_cats_dir, exist_ok=True)
# Directory with our training dog pictures
train_dogs_dir = join(train_dir, 'dogs')
makedirs(train_dogs_dir, exist_ok=True)
# VALIDATION
# Directory with our validation cat pictures
validation_cats_dir = join(validation_dir, 'cats')
makedirs(validation_cats_dir, exist_ok=True)
# Directory with our validation dog pictures
validation_dogs_dir = join(validation_dir, 'dogs')
makedirs(validation_dogs_dir, exist_ok=True)
# TEST
# Directory with our validation cat pictures
test_cats_dir = join(test_dir, 'cats')
makedirs(test_cats_dir, exist_ok=True)
# Directory with our validation dog pictures
test_dogs_dir = join(test_dir, 'dogs')
makedirs(test_dogs_dir, exist_ok=True)
def copyFiles(filename, dir, start, stop):
global original_dataset_dir
fnames = [filename.format(i) for i in range(start, stop)]
for fname in fnames:
src = join(original_dataset_dir, fname)
dst = join(dir, fname)
if not exists(dst):
copyfile(src, dst)
# CATS
# Copy first 1000 cat images to train_cats_dir
copyFiles('cat.{}.jpg', train_cats_dir, 0, 1000)
# Copy next 500 cat images to validation_cats_dir
copyFiles('cat.{}.jpg', validation_cats_dir, 1000, 1500)
# Copy next 500 cat images to test_cats_dir
copyFiles('cat.{}.jpg', test_cats_dir, 1500, 2000)
# DOGS
# Copy first 1000 cat images to train_dogs_dir
copyFiles('dog.{}.jpg', train_dogs_dir, 0, 1000)
# Copy next 500 cat images to validation_dogs_dir
copyFiles('dog.{}.jpg', validation_dogs_dir, 1000, 1500)
# Copy next 500 cat images to test_dogs_dir
copyFiles('dog.{}.jpg', test_dogs_dir, 1500, 2000) | src/prepare_datasets/animals_data_preparation.py | try:
from os import makedirs
from shutil import copyfile
from os.path import join, exists
except ImportError as err:
exit(err)
if __name__ == "__main__":
# The path to the directory where the original
# dataset was uncompressed
original_dataset_dir = "C:/Users/e_sgouge/Documents/Etienne/Python/Reconnaissance_chiffre/datas/dogs-vs-cats/train"
# The directory where we will
# store our smaller dataset
base_dir = "C:/Users/e_sgouge/Documents/Etienne/Python/Reconnaissance_chiffre/datas/dogs_vs_cats"
makedirs(base_dir, exist_ok=True)
# Directories for our training, validation
# and test splits
# Train
train_dir = join(base_dir, "train")
makedirs(train_dir, exist_ok=True)
# Validation
validation_dir = join(base_dir, "validation")
makedirs(validation_dir, exist_ok=True)
# Test
test_dir = join(base_dir, "test")
makedirs(test_dir, exist_ok=True)
# TRAINING
# Directory with our training cat pictures
train_cats_dir = join(train_dir, 'cats')
makedirs(train_cats_dir, exist_ok=True)
# Directory with our training dog pictures
train_dogs_dir = join(train_dir, 'dogs')
makedirs(train_dogs_dir, exist_ok=True)
# VALIDATION
# Directory with our validation cat pictures
validation_cats_dir = join(validation_dir, 'cats')
makedirs(validation_cats_dir, exist_ok=True)
# Directory with our validation dog pictures
validation_dogs_dir = join(validation_dir, 'dogs')
makedirs(validation_dogs_dir, exist_ok=True)
# TEST
# Directory with our validation cat pictures
test_cats_dir = join(test_dir, 'cats')
makedirs(test_cats_dir, exist_ok=True)
# Directory with our validation dog pictures
test_dogs_dir = join(test_dir, 'dogs')
makedirs(test_dogs_dir, exist_ok=True)
def copyFiles(filename, dir, start, stop):
global original_dataset_dir
fnames = [filename.format(i) for i in range(start, stop)]
for fname in fnames:
src = join(original_dataset_dir, fname)
dst = join(dir, fname)
if not exists(dst):
copyfile(src, dst)
# CATS
# Copy first 1000 cat images to train_cats_dir
copyFiles('cat.{}.jpg', train_cats_dir, 0, 1000)
# Copy next 500 cat images to validation_cats_dir
copyFiles('cat.{}.jpg', validation_cats_dir, 1000, 1500)
# Copy next 500 cat images to test_cats_dir
copyFiles('cat.{}.jpg', test_cats_dir, 1500, 2000)
# DOGS
# Copy first 1000 cat images to train_dogs_dir
copyFiles('dog.{}.jpg', train_dogs_dir, 0, 1000)
# Copy next 500 cat images to validation_dogs_dir
copyFiles('dog.{}.jpg', validation_dogs_dir, 1000, 1500)
# Copy next 500 cat images to test_dogs_dir
copyFiles('dog.{}.jpg', test_dogs_dir, 1500, 2000) | 0.31237 | 0.319519 |
import enum
import os
import sys
from typing import Optional
import unittest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# pylint: disable=wrong-import-position
import deserialize
# pylint: enable=wrong-import-position
class SomeStringEnum(enum.Enum):
"""Enum example."""
one = "One"
two = "Two"
three = "Three"
class SomeIntEnum(enum.Enum):
"""Enum example."""
one = 1
two = 2
three = 3
class SomeClass:
"""Simple enum test class."""
my_value: int
my_enum: SomeStringEnum
my_optional_enum: Optional[SomeIntEnum]
class EnumTestSuite(unittest.TestCase):
"""Deserialization of enum test cases."""
def test_enums_simple(self):
"""Test that items with an enum property deserializes."""
valid_test_cases = [
{"my_value": 1, "my_enum": "One", "my_optional_enum": 1},
{"my_value": 2, "my_enum": "Two", "my_optional_enum": 2},
{"my_value": 3, "my_enum": "Three", "my_optional_enum": None},
]
invalid_test_cases = [
{"my_value": 1, "my_enum": None, "my_optional_enum": 1},
{"my_value": 2, "my_enum": "two", "my_optional_enum": None},
{"my_value": 3, "my_enum": 3, "my_optional_enum": "Three"},
]
for test_case in valid_test_cases:
instance = deserialize.deserialize(SomeClass, test_case)
self.assertEqual(test_case["my_value"], instance.my_value)
if test_case["my_enum"] is None:
self.assertIsNone(instance.my_enum)
else:
self.assertEqual(test_case["my_enum"], instance.my_enum.value)
if test_case["my_optional_enum"] is None:
self.assertIsNone(instance.my_optional_enum)
else:
self.assertEqual(
test_case["my_optional_enum"], instance.my_optional_enum.value
)
for test_case in invalid_test_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(SomeClass, test_case) | tests/test_enums.py |
import enum
import os
import sys
from typing import Optional
import unittest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# pylint: disable=wrong-import-position
import deserialize
# pylint: enable=wrong-import-position
class SomeStringEnum(enum.Enum):
"""Enum example."""
one = "One"
two = "Two"
three = "Three"
class SomeIntEnum(enum.Enum):
"""Enum example."""
one = 1
two = 2
three = 3
class SomeClass:
"""Simple enum test class."""
my_value: int
my_enum: SomeStringEnum
my_optional_enum: Optional[SomeIntEnum]
class EnumTestSuite(unittest.TestCase):
"""Deserialization of enum test cases."""
def test_enums_simple(self):
"""Test that items with an enum property deserializes."""
valid_test_cases = [
{"my_value": 1, "my_enum": "One", "my_optional_enum": 1},
{"my_value": 2, "my_enum": "Two", "my_optional_enum": 2},
{"my_value": 3, "my_enum": "Three", "my_optional_enum": None},
]
invalid_test_cases = [
{"my_value": 1, "my_enum": None, "my_optional_enum": 1},
{"my_value": 2, "my_enum": "two", "my_optional_enum": None},
{"my_value": 3, "my_enum": 3, "my_optional_enum": "Three"},
]
for test_case in valid_test_cases:
instance = deserialize.deserialize(SomeClass, test_case)
self.assertEqual(test_case["my_value"], instance.my_value)
if test_case["my_enum"] is None:
self.assertIsNone(instance.my_enum)
else:
self.assertEqual(test_case["my_enum"], instance.my_enum.value)
if test_case["my_optional_enum"] is None:
self.assertIsNone(instance.my_optional_enum)
else:
self.assertEqual(
test_case["my_optional_enum"], instance.my_optional_enum.value
)
for test_case in invalid_test_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(SomeClass, test_case) | 0.542863 | 0.245741 |
import numpy as np
class CameraIntr():
def __init__(self, u0, v0, fx, fy, sk=0, dtype=np.float32):
camera_xyz = np.array([
[fx, sk, u0],
[0, fy, v0],
[0, 0, 1],
], dtype=dtype).transpose()
pull_back_xyz = np.array([
[1 / fx, 0, -u0 / fx],
[0, 1 / fy, -v0 / fy],
[0, 0, 1],
], dtype=dtype).transpose()
# convert xyz -> zyx
P = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
], dtype=dtype).transpose()
self.camera_xyz = camera_xyz
self.pull_back_xyz = pull_back_xyz
self.P = P
self.camera_zyx = P @ camera_xyz @ P
self.pull_back_zyx = P @ pull_back_xyz @ P
self.dtype = dtype
def xyz2uv(self, xyz, return_z=False):
z = xyz[:, 2:]
uv_ = xyz / z @ self.camera_xyz
uv = uv_[:, :2]
if return_z:
return uv, z
return uv
def zyx2vu(self, zyx, return_z=False):
z = zyx[:, :1]
zvu = zyx / z @ self.camera_zyx
vu = zvu[:, 1:]
if return_z:
return vu, z
return vu
def uv2xyz(self, uv, z):
nk, *_ = uv.shape
hom_uv = np.concatenate([uv, np.ones((nk, 1), dtype=self.dtype)], axis=1)
xy_ = hom_uv @ self.pull_back_xyz
xyz = z * xy_
return xyz
def vu2zyx(self, vu, z):
nk, *_ = vu.shape
hom_vu = np.concatenate([np.ones((nk, 1), dtype=self.dtype), vu], axis=1)
_yx = hom_vu @ self.pull_back_zyx
zyx = z * _yx
return zyx
def translate_camera(self, y_offset, x_offset):
translate_xyz = np.array([
[1, 0, x_offset],
[0, 1, y_offset],
[0, 0, 1],
], dtype=self.dtype).transpose()
translated_xyz = self.camera_xyz @ translate_xyz
translated_xyz = translated_xyz.transpose()
u0 = translated_xyz[0, 2]
v0 = translated_xyz[1, 2]
fx = translated_xyz[0, 0]
fy = translated_xyz[1, 1]
sk = translated_xyz[0, 1]
return CameraIntr(u0=u0, v0=v0, fx=fx, fy=fy, sk=sk, dtype=self.dtype)
def scale_camera(self, y_scale, x_scale):
scale_xyz = np.array([
[x_scale, 0, 0],
[0, y_scale, 0],
[0, 0, 1],
], dtype=self.dtype).transpose()
scaled_xyz = self.camera_xyz @ scale_xyz
scaled_xyz = scaled_xyz.transpose()
u0 = scaled_xyz[0, 2]
v0 = scaled_xyz[1, 2]
fx = scaled_xyz[0, 0]
fy = scaled_xyz[1, 1]
sk = scaled_xyz[0, 1]
return CameraIntr(u0=u0, v0=v0, fx=fx, fy=fy, sk=sk, dtype=self.dtype)
class CameraExtr(object):
def __init__(self, r, t, dtype=np.float32):
_tr_concat = np.concatenate([r, t.reshape(3, 1)], axis=1)
cam_extr_xyz = np.concatenate(
[_tr_concat, np.zeros((1, 4))], axis=0).transpose()
# xyzw->zyxw and vice versa
P = np.array([
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
], dtype=dtype).transpose()
cam_extr_zyx = P @ cam_extr_xyz @ P
self.cam_extr_xyz = cam_extr_xyz
self.cam_extr_zyx = cam_extr_zyx
def world_xyz2cam_xyz(self, world_xyz):
nk, *_ = world_xyz.shape
hom_world_xyz = np.concatenate([world_xyz, np.ones((nk, 1))], axis=1)
hom_cam_xyz = hom_world_xyz @ self.cam_extr_xyz
# xyzw -> xyz
cam_xyz = hom_cam_xyz[:, :3]
return cam_xyz
def world_zyx2cam_zyx(self, world_zyx):
nk, *_ = world_zyx.shape
hom_world_zyx = np.concatenate([np.ones((nk, 1)), world_zyx], axis=1)
hom_cam_zyx = hom_world_zyx @ self.cam_extr_zyx
# wzyx -> zyx
cam_zyx = hom_cam_zyx[:, 1:]
return cam_zyx | src/detector/graphics/camera.py | import numpy as np
class CameraIntr():
def __init__(self, u0, v0, fx, fy, sk=0, dtype=np.float32):
camera_xyz = np.array([
[fx, sk, u0],
[0, fy, v0],
[0, 0, 1],
], dtype=dtype).transpose()
pull_back_xyz = np.array([
[1 / fx, 0, -u0 / fx],
[0, 1 / fy, -v0 / fy],
[0, 0, 1],
], dtype=dtype).transpose()
# convert xyz -> zyx
P = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
], dtype=dtype).transpose()
self.camera_xyz = camera_xyz
self.pull_back_xyz = pull_back_xyz
self.P = P
self.camera_zyx = P @ camera_xyz @ P
self.pull_back_zyx = P @ pull_back_xyz @ P
self.dtype = dtype
def xyz2uv(self, xyz, return_z=False):
z = xyz[:, 2:]
uv_ = xyz / z @ self.camera_xyz
uv = uv_[:, :2]
if return_z:
return uv, z
return uv
def zyx2vu(self, zyx, return_z=False):
z = zyx[:, :1]
zvu = zyx / z @ self.camera_zyx
vu = zvu[:, 1:]
if return_z:
return vu, z
return vu
def uv2xyz(self, uv, z):
nk, *_ = uv.shape
hom_uv = np.concatenate([uv, np.ones((nk, 1), dtype=self.dtype)], axis=1)
xy_ = hom_uv @ self.pull_back_xyz
xyz = z * xy_
return xyz
def vu2zyx(self, vu, z):
nk, *_ = vu.shape
hom_vu = np.concatenate([np.ones((nk, 1), dtype=self.dtype), vu], axis=1)
_yx = hom_vu @ self.pull_back_zyx
zyx = z * _yx
return zyx
def translate_camera(self, y_offset, x_offset):
translate_xyz = np.array([
[1, 0, x_offset],
[0, 1, y_offset],
[0, 0, 1],
], dtype=self.dtype).transpose()
translated_xyz = self.camera_xyz @ translate_xyz
translated_xyz = translated_xyz.transpose()
u0 = translated_xyz[0, 2]
v0 = translated_xyz[1, 2]
fx = translated_xyz[0, 0]
fy = translated_xyz[1, 1]
sk = translated_xyz[0, 1]
return CameraIntr(u0=u0, v0=v0, fx=fx, fy=fy, sk=sk, dtype=self.dtype)
def scale_camera(self, y_scale, x_scale):
scale_xyz = np.array([
[x_scale, 0, 0],
[0, y_scale, 0],
[0, 0, 1],
], dtype=self.dtype).transpose()
scaled_xyz = self.camera_xyz @ scale_xyz
scaled_xyz = scaled_xyz.transpose()
u0 = scaled_xyz[0, 2]
v0 = scaled_xyz[1, 2]
fx = scaled_xyz[0, 0]
fy = scaled_xyz[1, 1]
sk = scaled_xyz[0, 1]
return CameraIntr(u0=u0, v0=v0, fx=fx, fy=fy, sk=sk, dtype=self.dtype)
class CameraExtr(object):
def __init__(self, r, t, dtype=np.float32):
_tr_concat = np.concatenate([r, t.reshape(3, 1)], axis=1)
cam_extr_xyz = np.concatenate(
[_tr_concat, np.zeros((1, 4))], axis=0).transpose()
# xyzw->zyxw and vice versa
P = np.array([
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
], dtype=dtype).transpose()
cam_extr_zyx = P @ cam_extr_xyz @ P
self.cam_extr_xyz = cam_extr_xyz
self.cam_extr_zyx = cam_extr_zyx
def world_xyz2cam_xyz(self, world_xyz):
nk, *_ = world_xyz.shape
hom_world_xyz = np.concatenate([world_xyz, np.ones((nk, 1))], axis=1)
hom_cam_xyz = hom_world_xyz @ self.cam_extr_xyz
# xyzw -> xyz
cam_xyz = hom_cam_xyz[:, :3]
return cam_xyz
def world_zyx2cam_zyx(self, world_zyx):
nk, *_ = world_zyx.shape
hom_world_zyx = np.concatenate([np.ones((nk, 1)), world_zyx], axis=1)
hom_cam_zyx = hom_world_zyx @ self.cam_extr_zyx
# wzyx -> zyx
cam_zyx = hom_cam_zyx[:, 1:]
return cam_zyx | 0.770206 | 0.344581 |