text stringlengths 38 1.54M |
|---|
species(
label = '[CH2]C(C[C]=O)OO(13197)',
structure = SMILES('[CH2]C(C[C]=O)OO'),
E0 = (45.8543,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1855,455,950,3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,361.368],'cm^-1')),
HinderedRotor(inertia=(0.133558,'amu*angstrom^2'), symmetry=1, barrier=(12.3654,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0823752,'amu*angstrom^2'), symmetry=1, barrier=(7.6301,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.082343,'amu*angstrom^2'), symmetry=1, barrier=(7.62934,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.082515,'amu*angstrom^2'), symmetry=1, barrier=(7.64618,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.49629,'amu*angstrom^2'), symmetry=1, barrier=(45.9566,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.878002,0.0752979,-9.73227e-05,6.4342e-08,-1.4376e-11,5621.28,28.8099], Tmin=(100,'K'), Tmax=(638.168,'K')), NASAPolynomial(coeffs=[9.84676,0.0294481,-1.39184e-05,2.66582e-09,-1.8561e-13,4265.48,-12.0847], Tmin=(638.168,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(45.8543,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(286.849,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCCJ=O) + radical(CJCOOH)"""),
)
species(
label = 'CH2CHOOH(64)',
structure = SMILES('C=COO'),
E0 = (-53.0705,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,3615,1310,387.5,850,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.754187,'amu*angstrom^2'), symmetry=1, barrier=(17.3402,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.754176,'amu*angstrom^2'), symmetry=1, barrier=(17.34,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (60.052,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3284.22,'J/mol'), sigma=(4.037,'angstroms'), dipoleMoment=(1.3,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""NOx2018"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.79821,0.0274377,-2.03468e-05,7.62127e-09,-1.12671e-12,-6315.53,9.11829], Tmin=(298,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.82519,0.0274167,-2.04456e-05,7.77399e-09,-1.18661e-12,-6325.27,8.96641], Tmin=(1000,'K'), Tmax=(2000,'K'))], Tmin=(298,'K'), Tmax=(2000,'K'), E0=(-53.0705,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(174.604,'J/(mol*K)'), label="""CH2CHOOH""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'CH2CO(28)',
structure = SMILES('C=C=O'),
E0 = (-60.8183,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2120,512.5,787.5],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (42.0367,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.12,'J/mol'), sigma=(3.97,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.13241,0.0181319,-1.74093e-05,9.35336e-09,-2.01725e-12,-7148.09,13.3808], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[5.75871,0.00635124,-2.25955e-06,3.62322e-10,-2.15856e-14,-8085.33,-4.9649], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-60.8183,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""CH2CO""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=C(C[C]=O)OO(16146)',
structure = SMILES('C=C(C[C]=O)OO'),
E0 = (-23.2419,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,1855,455,950,3615,1310,387.5,850,1000,350,440,435,1725,441.292],'cm^-1')),
HinderedRotor(inertia=(0.112205,'amu*angstrom^2'), symmetry=1, barrier=(15.7176,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0454961,'amu*angstrom^2'), symmetry=1, barrier=(2.74523,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.131082,'amu*angstrom^2'), symmetry=1, barrier=(15.6885,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.113281,'amu*angstrom^2'), symmetry=1, barrier=(15.7072,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (101.081,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.6742,0.0527896,-4.22036e-05,1.61042e-08,-2.49787e-12,-2713.6,24.9271], Tmin=(100,'K'), Tmax=(1473.73,'K')), NASAPolynomial(coeffs=[12.653,0.0229915,-1.18748e-05,2.38461e-09,-1.70552e-13,-5949.59,-32.2967], Tmin=(1473.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-23.2419,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(266.063,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cds-CdsCsOs) + group(Cds-OdCsH) + group(Cds-CdsHH) + radical(CCCJ=O)"""),
)
species(
label = '[CH2]C(C=C=O)OO(16147)',
structure = SMILES('[CH2]C(C=C=O)OO'),
E0 = (2.93663,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2120,512.5,787.5,3010,987.5,1337.5,450,1655,3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,180],'cm^-1')),
HinderedRotor(inertia=(0.884271,'amu*angstrom^2'), symmetry=1, barrier=(20.3311,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.40379,'amu*angstrom^2'), symmetry=1, barrier=(55.2678,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.577108,'amu*angstrom^2'), symmetry=1, barrier=(13.2688,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.578233,'amu*angstrom^2'), symmetry=1, barrier=(13.2947,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (101.081,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.501278,0.0829225,-0.000128903,1.06712e-07,-3.47465e-11,473.476,26.4525], Tmin=(100,'K'), Tmax=(838.318,'K')), NASAPolynomial(coeffs=[10.78,0.0257589,-1.20938e-05,2.26761e-09,-1.54245e-13,-964.608,-19.6223], Tmin=(838.318,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(2.93663,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(266.063,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cdd-O2d)CsOsH) + group(Cs-CsHHH) + group(Cds-(Cdd-O2d)CsH) + radical(CJCOOH)"""),
)
species(
label = 'C=[C][O](173)',
structure = SMILES('[CH2][C]=O'),
E0 = (160.185,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,539.612,539.669],'cm^-1')),
HinderedRotor(inertia=(0.000578908,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (42.0367,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.39563,0.0101365,2.30741e-06,-8.97566e-09,3.68242e-12,19290.3,10.0703], Tmin=(100,'K'), Tmax=(1068.9,'K')), NASAPolynomial(coeffs=[6.35055,0.00638951,-2.69368e-06,5.4221e-10,-4.02476e-14,18240.9,-6.33602], Tmin=(1068.9,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(160.185,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(CsCJ=O) + radical(CJC=O)"""),
)
species(
label = '[CH2][CH]OO(104)',
structure = SMILES('[CH2][CH]OO'),
E0 = (224.812,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000],'cm^-1')),
HinderedRotor(inertia=(0.00920734,'amu*angstrom^2'), symmetry=1, barrier=(3.53679,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00921023,'amu*angstrom^2'), symmetry=1, barrier=(3.53685,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.43223,'amu*angstrom^2'), symmetry=1, barrier=(32.9297,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (60.052,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.52392,0.0279389,-1.79903e-05,3.58397e-09,4.58838e-13,27095.5,18.6054], Tmin=(100,'K'), Tmax=(1150.47,'K')), NASAPolynomial(coeffs=[9.41961,0.0107482,-4.42273e-06,8.47943e-10,-6.05179e-14,25059.9,-17.5802], Tmin=(1150.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(224.812,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(220.334,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(CCsJOOH) + radical(CJCOOH)"""),
)
species(
label = 'HO2(10)',
structure = SMILES('[O]O'),
E0 = (2.67648,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1112.81,1388.53,3298.45],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (33.0067,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(892.977,'J/mol'), sigma=(3.458,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.02956,-0.00263985,1.5223e-05,-1.71671e-08,6.26738e-12,322.677,4.84428], Tmin=(100,'K'), Tmax=(923.913,'K')), NASAPolynomial(coeffs=[4.15133,0.00191146,-4.11274e-07,6.34957e-11,-4.86385e-15,83.4208,3.09341], Tmin=(923.913,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(2.67648,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""HO2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=CC[C]=O(2390)',
structure = SMILES('C=CC[C]=O'),
E0 = (66.8219,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1855,455,950,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,458.926],'cm^-1')),
HinderedRotor(inertia=(0.0997865,'amu*angstrom^2'), symmetry=1, barrier=(14.9157,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.099798,'amu*angstrom^2'), symmetry=1, barrier=(14.9167,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (69.0819,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3285.42,'J/mol'), sigma=(5.46087,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=513.18 K, Pc=45.78 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.51804,0.0238835,1.19491e-05,-2.85418e-08,1.09388e-11,8097.53,17.8098], Tmin=(100,'K'), Tmax=(1083.61,'K')), NASAPolynomial(coeffs=[9.78041,0.0178579,-8.47799e-06,1.72441e-09,-1.27255e-13,5303.46,-23.4402], Tmin=(1083.61,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(66.8219,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cds-CdsCsH) + group(Cds-OdCsH) + group(Cds-CdsHH) + radical(CCCJ=O)"""),
)
species(
label = 'C[C](C[C]=O)OO(16148)',
structure = SMILES('C[C](C[C]=O)OO'),
E0 = (18.7909,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.822888,0.0787336,-0.000125404,1.1427e-07,-4.08497e-11,2365.88,27.2713], Tmin=(100,'K'), Tmax=(835.906,'K')), NASAPolynomial(coeffs=[6.11464,0.0360168,-1.75367e-05,3.34738e-09,-2.30115e-13,2088.91,6.32516], Tmin=(835.906,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(18.7909,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(286.849,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(C2CsJOOH) + radical(CCCJ=O)"""),
)
species(
label = '[CH2]C([CH]C=O)OO(16149)',
structure = SMILES('[CH2]C(C=C[O])OO'),
E0 = (28.809,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.523988,0.0640261,-4.00115e-05,-4.65877e-09,9.07363e-12,3601.25,29.8479], Tmin=(100,'K'), Tmax=(971.708,'K')), NASAPolynomial(coeffs=[19.9705,0.0123661,-4.09161e-06,7.65685e-10,-5.79041e-14,-1518.37,-70.3092], Tmin=(971.708,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.809,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-(Cds-Cd)H) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(CJCOOH) + radical(C=COJ)"""),
)
species(
label = 'CC([CH][C]=O)OO(16150)',
structure = SMILES('CC([CH][C]=O)OO'),
E0 = (31.7938,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.08123,0.0684083,-8.15253e-05,5.49437e-08,-1.53016e-11,3925.33,28.5613], Tmin=(100,'K'), Tmax=(865.285,'K')), NASAPolynomial(coeffs=[9.55606,0.0292316,-1.36117e-05,2.61946e-09,-1.84064e-13,2458.69,-11.0986], Tmin=(865.285,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(31.7938,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(286.849,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCJCO) + radical(CCCJ=O)"""),
)
species(
label = '[CH2][C](CC=O)OO(16151)',
structure = SMILES('[CH2][C](CC=O)OO'),
E0 = (72.7929,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,360,370,350,180],'cm^-1')),
HinderedRotor(inertia=(0.218541,'amu*angstrom^2'), symmetry=1, barrier=(5.02469,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0143454,'amu*angstrom^2'), symmetry=1, barrier=(47.0285,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.218946,'amu*angstrom^2'), symmetry=1, barrier=(5.034,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.219242,'amu*angstrom^2'), symmetry=1, barrier=(5.04082,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.0433,'amu*angstrom^2'), symmetry=1, barrier=(46.9795,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.825321,0.0785617,-0.000123878,1.1328e-07,-4.09511e-11,8860.82,28.4385], Tmin=(100,'K'), Tmax=(822.98,'K')), NASAPolynomial(coeffs=[5.97734,0.0370914,-1.83476e-05,3.53549e-09,-2.44776e-13,8569.2,7.96695], Tmin=(822.98,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(72.7929,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(286.849,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(C2CsJOOH) + radical(CJCOOH)"""),
)
species(
label = 'CC(C[C]=O)O[O](16152)',
structure = SMILES('CC(C[C]=O)O[O]'),
E0 = (-16.1035,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,1855,455,950,492.5,1135,1000,1380,1390,370,380,2900,435,180],'cm^-1')),
HinderedRotor(inertia=(0.315902,'amu*angstrom^2'), symmetry=1, barrier=(7.26321,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.315797,'amu*angstrom^2'), symmetry=1, barrier=(7.2608,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.31581,'amu*angstrom^2'), symmetry=1, barrier=(7.26109,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.315809,'amu*angstrom^2'), symmetry=1, barrier=(7.26107,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.968194,0.0723804,-0.000104071,8.72547e-08,-2.9437e-11,-1833.1,27.2165], Tmin=(100,'K'), Tmax=(827.922,'K')), NASAPolynomial(coeffs=[7.72901,0.0312808,-1.43245e-05,2.68172e-09,-1.83162e-13,-2663.48,-2.37781], Tmin=(827.922,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-16.1035,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCCJ=O) + radical(ROOJ)"""),
)
species(
label = '[CH2]C(CC=O)O[O](16153)',
structure = SMILES('[CH2]C(CC=O)O[O]'),
E0 = (37.8984,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.980231,0.0720844,-0.000102058,8.55523e-08,-2.9197e-11,4661.43,28.3499], Tmin=(100,'K'), Tmax=(805.508,'K')), NASAPolynomial(coeffs=[7.56546,0.0324026,-1.51639e-05,2.87674e-09,-1.98408e-13,3827.01,-0.589952], Tmin=(805.508,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(37.8984,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CJCOOH) + radical(ROOJ)"""),
)
species(
label = 'C=C(CC=O)OO(16154)',
structure = SMILES('C=C(CC=O)OO'),
E0 = (-183.202,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.47295,0.0526993,-3.56243e-05,1.0736e-08,-1.262e-12,-21941.3,24.8551], Tmin=(100,'K'), Tmax=(1959.99,'K')), NASAPolynomial(coeffs=[18.2553,0.0184493,-9.41223e-06,1.82022e-09,-1.24778e-13,-28519.9,-67.4032], Tmin=(1959.99,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-183.202,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cds-CdsCsOs) + group(Cds-OdCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'CC(C=C=O)OO(16155)',
structure = SMILES('CC(C=C=O)OO'),
E0 = (-211.026,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.621548,0.0795313,-0.000113306,9.0911e-08,-2.94995e-11,-25263.9,24.0275], Tmin=(100,'K'), Tmax=(794.907,'K')), NASAPolynomial(coeffs=[9.84285,0.030004,-1.395e-05,2.63721e-09,-1.81587e-13,-26631.1,-17.7223], Tmin=(794.907,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-211.026,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cdd-O2d)CsOsH) + group(Cs-CsHHH) + group(Cds-(Cdd-O2d)CsH)"""),
)
species(
label = 'OH(5)',
structure = SMILES('[OH]'),
E0 = (28.372,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3287.46],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (17.0073,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.4858,0.00133397,-4.70043e-06,5.64379e-09,-2.06318e-12,3411.96,1.99788], Tmin=(100,'K'), Tmax=(1005.25,'K')), NASAPolynomial(coeffs=[2.88225,0.00103869,-2.35652e-07,1.40229e-11,6.34581e-16,3669.56,5.59053], Tmin=(1005.25,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.372,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'O=[C]CC1CO1(16156)',
structure = SMILES('O=[C]CC1CO1'),
E0 = (-50.0495,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (85.0813,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.48893,0.0486523,-4.76958e-05,2.66315e-08,-5.71824e-12,-5923.48,20.8561], Tmin=(100,'K'), Tmax=(1343.79,'K')), NASAPolynomial(coeffs=[9.56376,0.0167665,-3.34104e-06,2.79552e-10,-6.97024e-15,-7384.91,-17.8492], Tmin=(1343.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-50.0495,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(249.434,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-OdCsH) + ring(Ethylene_oxide) + radical(CCCJ=O)"""),
)
species(
label = '[CH2]C1CC(=O)O1(16157)',
structure = SMILES('[CH2]C1CC(=O)O1'),
E0 = (-122.378,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (85.0813,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.35494,0.0247681,3.1403e-05,-6.22678e-08,2.76228e-11,-14648.7,18.3698], Tmin=(100,'K'), Tmax=(902.146,'K')), NASAPolynomial(coeffs=[11.2826,0.0153801,-3.1948e-06,4.01396e-10,-2.55304e-14,-17488.3,-30.592], Tmin=(902.146,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-122.378,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(253.591,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-O2d)) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + ring(Beta-Propiolactone) + radical(CJC(C)OC)"""),
)
species(
label = 'O=[C]CC[CH]OO(13196)',
structure = SMILES('O=[C]CC[CH]OO'),
E0 = (33.1172,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.687425,0.0811836,-0.000128296,1.14373e-07,-4.01187e-11,4094.34,27.9583], Tmin=(100,'K'), Tmax=(835.207,'K')), NASAPolynomial(coeffs=[7.22527,0.0344534,-1.6679e-05,3.17561e-09,-2.18022e-13,3540.04,0.813787], Tmin=(835.207,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(33.1172,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(286.849,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-OdCsH) + radical(CCCJ=O) + radical(CCsJOOH)"""),
)
species(
label = '[CH2]C(OO)C(=C)[O](13193)',
structure = SMILES('[CH2]C(OO)C(=C)[O]'),
E0 = (19.3853,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.399352,0.0713039,-7.33157e-05,3.73102e-08,-7.37441e-12,2467.83,30.5869], Tmin=(100,'K'), Tmax=(1242.68,'K')), NASAPolynomial(coeffs=[17.8248,0.0152135,-5.6101e-06,9.87529e-10,-6.70293e-14,-1862.99,-57.2669], Tmin=(1242.68,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(19.3853,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-(Cds-Cd)H) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsHHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + radical(CJCOOH) + radical(C=C(C)OJ)"""),
)
species(
label = 'O=C1CC(C1)OO(13199)',
structure = SMILES('O=C1CC(C1)OO'),
E0 = (-216.895,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.55564,0.0444495,-1.08816e-05,-1.35864e-08,6.97353e-12,-25990.6,22.6646], Tmin=(100,'K'), Tmax=(1097.7,'K')), NASAPolynomial(coeffs=[11.8059,0.0258594,-1.11158e-05,2.12612e-09,-1.51088e-13,-29371.3,-32.8909], Tmin=(1097.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-216.895,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(299.321,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cds-OdCsCs) + ring(Cyclobutanone)"""),
)
species(
label = 'CO(12)',
structure = SMILES('[C-]#[O+]'),
E0 = (-119.219,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2084.51],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0101,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(762.44,'J/mol'), sigma=(3.69,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.5971,-0.00102424,2.83336e-06,-1.75825e-09,3.42587e-13,-14343.2,3.45822], Tmin=(100,'K'), Tmax=(1669.93,'K')), NASAPolynomial(coeffs=[2.92796,0.00181931,-8.35308e-07,1.51269e-10,-9.88872e-15,-14292.7,6.51157], Tmin=(1669.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-119.219,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""CO""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C([CH2])OO(5700)',
structure = SMILES('[CH2]C([CH2])OO'),
E0 = (207.868,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,1380,1390,370,380,2900,435],'cm^-1')),
HinderedRotor(inertia=(0.00830001,'amu*angstrom^2'), symmetry=1, barrier=(4.01693,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.175343,'amu*angstrom^2'), symmetry=1, barrier=(4.03149,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.571253,'amu*angstrom^2'), symmetry=1, barrier=(13.1342,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.571425,'amu*angstrom^2'), symmetry=1, barrier=(13.1382,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (74.0785,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.59507,0.0541721,-6.21486e-05,3.83338e-08,-9.48459e-12,25086.3,23.0636], Tmin=(100,'K'), Tmax=(982.383,'K')), NASAPolynomial(coeffs=[10.333,0.0185929,-7.82187e-06,1.46588e-09,-1.02174e-13,23369.5,-18.9365], Tmin=(982.383,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(207.868,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CJCOOH) + radical(CJCOOH)"""),
)
species(
label = '[O]C(CO)C[C]=O(16158)',
structure = SMILES('[O]C(CO)C[C]=O'),
E0 = (-169.08,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.840333,0.0745175,-0.000106698,8.65964e-08,-2.82432e-11,-20226.6,28.6666], Tmin=(100,'K'), Tmax=(823.568,'K')), NASAPolynomial(coeffs=[9.10824,0.0287158,-1.29959e-05,2.42264e-09,-1.65177e-13,-21397,-8.45411], Tmin=(823.568,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-169.08,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-OdCsH) + radical(CC(C)OJ) + radical(CCCJ=O)"""),
)
species(
label = '[CH2]C([O])CC(=O)O(16159)',
structure = SMILES('[CH2]C([O])CC(=O)O'),
E0 = (-223.31,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.29329,0.0635105,-6.52704e-05,3.81841e-08,-9.45459e-12,-26763.9,25.473], Tmin=(100,'K'), Tmax=(954.874,'K')), NASAPolynomial(coeffs=[8.98601,0.0312852,-1.46476e-05,2.84037e-09,-2.01001e-13,-28233,-11.2846], Tmin=(954.874,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-223.31,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(O2s-(Cds-O2d)H) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CC(C)OJ) + radical(CJCO)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'O=[C]C[CH]OO(7813)',
structure = SMILES('O=[C]C[CH]OO'),
E0 = (67.1929,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1855,455,950,3615,1310,387.5,850,1000,3025,407.5,1350,352.5,180],'cm^-1')),
HinderedRotor(inertia=(0.303459,'amu*angstrom^2'), symmetry=1, barrier=(6.97711,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.304021,'amu*angstrom^2'), symmetry=1, barrier=(6.99004,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.306102,'amu*angstrom^2'), symmetry=1, barrier=(7.03788,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.614289,'amu*angstrom^2'), symmetry=1, barrier=(14.1237,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (88.0621,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.03762,0.0782953,-0.000153718,1.48437e-07,-5.28251e-11,8175.61,22.8901], Tmin=(100,'K'), Tmax=(882.158,'K')), NASAPolynomial(coeffs=[5.53701,0.0268459,-1.34418e-05,2.53032e-09,-1.69269e-13,8589.85,8.59457], Tmin=(882.158,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(67.1929,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(266.063,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CCsJOOH) + radical(CCCJ=O)"""),
)
species(
label = '[C]=O(361)',
structure = SMILES('[C]=O'),
E0 = (439.086,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3054.48],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0101,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.08916,0.00200416,-1.61661e-05,2.55058e-08,-1.16424e-11,52802.7,4.52505], Tmin=(100,'K'), Tmax=(856.11,'K')), NASAPolynomial(coeffs=[0.961625,0.00569045,-3.48044e-06,7.19202e-10,-5.08041e-14,53738.7,21.4663], Tmin=(856.11,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(439.086,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(CdCdJ2_triplet)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (45.8543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (199.84,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (225.721,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (115.832,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (209.627,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (107.573,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (178.139,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (202.728,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (163.793,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (188.929,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (124.102,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (87.6404,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (384.997,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (109.254,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (109.254,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (91.4976,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (121.71,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (204.481,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (291.455,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (54.1387,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (114.267,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (158.404,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (158.404,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (448.756,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (646.953,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['CH2CHOOH(64)', 'CH2CO(28)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['H(3)', 'C=C(C[C]=O)OO(16146)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(170.641,'m^3/(mol*s)'), n=1.56204, Ea=(11.2897,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds_Cds;HJ] for rate rule [Cds-OsCs_Cds;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', '[CH2]C(C=C=O)OO(16147)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(3.82e-16,'cm^3/(molecule*s)'), n=1.61, Ea=(10.992,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds_Ck;HJ] for rate rule [Cds-CsH_Ck;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['CH2CHOOH(64)', 'C=[C][O](173)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(0.30847,'m^3/(mol*s)'), n=2.06429, Ea=(8.71744,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Cds_Cds;CJ] + [Cds-OsH_Cds;YJ] for rate rule [Cds-OsH_Cds;CJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['[CH2][CH]OO(104)', 'CH2CO(28)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(0.284303,'m^3/(mol*s)'), n=1.93802, Ea=(45.6341,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [Cds-HH_Ck;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['HO2(10)', 'C=CC[C]=O(2390)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(10.6,'cm^3/(mol*s)'), n=3.29, Ea=(38.0744,'kJ/mol'), T0=(1,'K'), Tmin=(400,'K'), Tmax=(1100,'K'), comment="""From training reaction 2772 used for Cds-CsH_Cds-HH;OJ-O2s
Exact match found for rate rule [Cds-CsH_Cds-HH;OJ-O2s]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['C[C](C[C]=O)OO(16148)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(309968,'s^-1'), n=2.08546, Ea=(132.285,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;C_rad_out_2H;Cs_H_out] for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_OOH/Cs]
Euclidian distance = 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['[CH2]C([CH]C=O)OO(16149)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(791180,'s^-1'), n=2.19286, Ea=(156.873,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;Y_rad_out;Cs_H_out_H/NonDeC] for rate rule [R2H_S;CO_rad_out;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['CC([CH][C]=O)OO(16150)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(166690,'s^-1'), n=2.17519, Ea=(117.939,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;C_rad_out_2H;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH2][C](CC=O)OO(16151)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(285601,'s^-1'), n=2.01653, Ea=(116.136,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS_Cs;Y_rad_out;XH_out] for rate rule [R3H_SS_Cs;Y_rad_out;CO_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['CC(C[C]=O)O[O](16152)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(3.18e+08,'s^-1'), n=1.06, Ea=(140.206,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 247 used for R4H_SSS_O(Cs)Cs;O_rad_out;Cs_H_out_2H
Exact match found for rate rule [R4H_SSS_O(Cs)Cs;O_rad_out;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['[CH2]C(CC=O)O[O](16153)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(67170.6,'s^-1'), n=1.77845, Ea=(41.7861,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SSSS;Y_rad_out;XH_out] for rate rule [R5H_SSSS;CO_rad_out;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2][CH]OO(104)', 'C=[C][O](173)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['C=C(CC=O)OO(16154)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad]
Euclidian distance = 0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction15',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['CC(C=C=O)OO(16155)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['OH(5)', 'O=[C]CC1CO1(16156)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(3.98e+12,'s^-1','*|/',1.2), n=0, Ea=(45.6433,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 1 used for R2OO_S;C_pri_rad_intra;OOH
Exact match found for rate rule [R2OO_S;C_pri_rad_intra;OOH]
Euclidian distance = 0
family: Cyclic_Ether_Formation"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['OH(5)', '[CH2]C1CC(=O)O1(16157)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(3.11355e+11,'s^-1'), n=0, Ea=(75.8559,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3OO_SS;Y_rad_intra;OOH]
Euclidian distance = 0
family: Cyclic_Ether_Formation"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['O=[C]CC[CH]OO(13196)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(2.95289e+09,'s^-1'), n=1, Ea=(158.627,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CsJ-HH;C] + [cCs(-HR!H)CJ;CsJ;C] for rate rule [cCs(-HR!H)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['[CH2]C(OO)C(=C)[O](13193)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(3.53e+06,'s^-1'), n=1.73, Ea=(245.601,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [cCs(-HH)CJ;CJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['O=C1CC(C1)OO(13199)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [R4_SSS;C_rad_out_2H;Ypri_rad_out]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction21',
reactants = ['CO(12)', '[CH2]C([CH2])OO(5700)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(2461.18,'m^3/(mol*s)'), n=1.0523, Ea=(25.6182,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [COm;C_rad/H2/Cs]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_COm"""),
)
reaction(
label = 'reaction22',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['[O]C(CO)C[C]=O(16158)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.39e+11,'s^-1'), n=0, Ea=(112.55,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 1 used for R2OOH_S;C_rad_out_2H
Exact match found for rate rule [R2OOH_S;C_rad_out_2H]
Euclidian distance = 0
family: intra_OH_migration"""),
)
reaction(
label = 'reaction23',
reactants = ['[CH2]C(C[C]=O)OO(13197)'],
products = ['[CH2]C([O])CC(=O)O(16159)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(3.95074e+10,'s^-1'), n=0, Ea=(112.549,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3OOH_SS;Y_rad_out]
Euclidian distance = 0
family: intra_OH_migration"""),
)
reaction(
label = 'reaction24',
reactants = ['CH2(19)', 'O=[C]C[CH]OO(7813)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/CsO;Birad]
Euclidian distance = 4.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction25',
reactants = ['[C]=O(361)', '[CH2]C([CH2])OO(5700)'],
products = ['[CH2]C(C[C]=O)OO(13197)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(2.13464e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/Cs;Birad]
Euclidian distance = 3.0
Multiplied by reaction path degeneracy 2.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
network(
label = '2526',
isomers = [
'[CH2]C(C[C]=O)OO(13197)',
],
reactants = [
('CH2CHOOH(64)', 'CH2CO(28)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '2526',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-12 06:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='expiration_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='organization',
name='link_to_organization',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='organization',
name='location_city',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='organization',
name='location_country',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='organization',
name='long_description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='organization',
name='title',
field=models.CharField(max_length=256, null=True),
),
]
|
import tensorflow as tf
import logging
import sys
from collections import namedtuple
from tensorflow.contrib.layers import fully_connected
from tfTools.gradientTools import average_gradients, handle_gradients
from tfModels.tools import warmup_exponential_decay, choose_device, lr_decay_with_warmup, stepped_down_decay, exponential_decay, size_variables
from tfModels.layers import build_cell, cell_forward
from tfModels.tensor2tensor.common_layers import layer_norm
class LSTM_Model(object):
num_Instances = 0
num_Model = 0
def __init__(self, tensor_global_step, is_train, args, batch=None, name='model'):
# Initialize some parameters
self.is_train = is_train
self.num_gpus = args.num_gpus if is_train else 1
self.list_gpu_devices = args.list_gpus
self.center_device = "/cpu:0"
self.learning_rate = None
self.args = args
self.batch = batch
self.name = name
self.build_input = self.build_tf_input if batch else self.build_pl_input
self.list_pl = None
self.global_step = tensor_global_step
# Build graph
self.list_run = list(self.build_graph() if is_train else self.build_infer_graph())
def build_graph(self):
# cerate input tensors in the cpu
tensors_input = self.build_input()
# create optimizer
self.optimizer = self.build_optimizer()
if 'horovod' in sys.modules:
import horovod.tensorflow as hvd
logging.info('wrap the optimizer with horovod!')
self.optimizer = hvd.DistributedOptimizer(self.optimizer)
loss_step = []
tower_grads = []
list_debug = []
# the outer scope is necessary for the where the reuse scope need to be limited whthin
# or reuse=tf.get_variable_scope().reuse
for id_gpu, name_gpu in enumerate(self.list_gpu_devices):
# with tf.variable_scope(self.name, reuse=bool(self.__class__.num_Model)):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
loss, gradients, debug = self.build_single_graph(id_gpu, name_gpu, tensors_input)
loss_step.append(loss)
tower_grads.append(gradients)
list_debug.append(debug)
# mean the loss
loss = tf.reduce_mean(loss_step)
# merge gradients, update current model
with tf.device(self.center_device):
# computation relevant to gradient
averaged_grads = average_gradients(tower_grads)
handled_grads = handle_gradients(averaged_grads, self.args)
# with tf.variable_scope('adam', reuse=False):
op_optimize = self.optimizer.apply_gradients(handled_grads, self.global_step)
self.__class__.num_Instances += 1
logging.info("built {} {} instance(s).".format(self.__class__.num_Instances, self.__class__.__name__))
# return loss, tensors_input.shape_batch, op_optimize
return loss, tensors_input.shape_batch, op_optimize, [x for x in zip(*list_debug)]
# return loss, tensors_input.shape_batch, op_optimize, debug
def build_infer_graph(self):
"""
reuse=True if build train models above
reuse=False if in the inder file
"""
# cerate input tensors in the cpu
tensors_input = self.build_input()
# the outer scope is necessary for the where the reuse scope need to be limited whthin
# or reuse=tf.get_variable_scope().reuse
# with tf.variable_scope(self.name, reuse=bool(self.__class__.num_Model)):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
loss, logits = self.build_single_graph(
id_gpu=0,
name_gpu=self.list_gpu_devices[0],
tensors_input=tensors_input)
# TODO havn't checked
infer = tf.nn.in_top_k(logits, tf.reshape(tensors_input.label_splits[0], [-1]), 1)
return loss, tensors_input.shape_batch, infer
def build_pl_input(self):
"""
use for training. but recomend to use build_tf_input insted
"""
tensors_input = namedtuple('tensors_input',
'feature_splits, label_splits, len_fea_splits, len_label_splits, shape_batch')
with tf.device(self.center_device):
with tf.name_scope("inputs"):
batch_features = tf.placeholder(tf.float32, [None, None, self.args.data.dim_input], name='input_feature')
batch_labels = tf.placeholder(tf.int32, [None, None], name='input_labels')
batch_fea_lens = tf.placeholder(tf.int32, [None], name='input_fea_lens')
batch_label_lens = tf.placeholder(tf.int32, [None], name='input_label_lens')
self.list_pl = [batch_features, batch_labels, batch_fea_lens, batch_label_lens]
# split input data alone batch axis to gpus
tensors_input.feature_splits = tf.split(batch_features, self.num_gpus, name="feature_splits")
tensors_input.label_splits = tf.split(batch_labels, self.num_gpus, name="label_splits")
tensors_input.len_fea_splits = tf.split(batch_fea_lens, self.num_gpus, name="len_fea_splits")
tensors_input.len_label_splits = tf.split(batch_label_lens, self.num_gpus, name="len_label_splits")
tensors_input.shape_batch = tf.shape(batch_features)
return tensors_input
def build_infer_input(self):
"""
used for inference. For inference must use placeholder.
during the infer, we only get the decoded result and not use label
"""
tensors_input = namedtuple('tensors_input',
'feature_splits, len_fea_splits, label_splits, len_label_splits, shape_batch')
with tf.device(self.center_device):
with tf.name_scope("inputs"):
batch_features = tf.placeholder(tf.float32, [None, None, self.args.data.dim_input], name='input_feature')
batch_fea_lens = tf.placeholder(tf.int32, [None], name='input_fea_lens')
self.list_pl = [batch_features, batch_fea_lens]
# split input data alone batch axis to gpus
tensors_input.feature_splits = tf.split(batch_features, self.num_gpus, name="feature_splits")
tensors_input.len_fea_splits = tf.split(batch_fea_lens, self.num_gpus, name="len_fea_splits")
tensors_input.label_splits = None
tensors_input.len_label_splits = None
tensors_input.shape_batch = tf.shape(batch_features)
return tensors_input
def build_tf_input(self):
"""
stand training input
"""
tensors_input = namedtuple('tensors_input',
'feature_splits, label_splits, len_fea_splits, len_label_splits, shape_batch')
with tf.device(self.center_device):
with tf.name_scope("inputs"):
# split input data alone batch axis to gpus
tensors_input.feature_splits = tf.split(self.batch[0], self.num_gpus, name="feature_splits")
tensors_input.label_splits = tf.split(self.batch[1], self.num_gpus, name="label_splits")
tensors_input.len_fea_splits = tf.split(self.batch[2], self.num_gpus, name="len_fea_splits")
tensors_input.len_label_splits = tf.split(self.batch[3], self.num_gpus, name="len_label_splits")
tensors_input.shape_batch = tf.shape(self.batch[0])
return tensors_input
def build_single_graph(self, id_gpu, name_gpu, tensors_input):
"""
be used for build infer model and the train model, conditioned on self.is_train
"""
# build model in one device
num_cell_units = self.args.model.num_cell_units
cell_type = self.args.model.cell_type
dropout = self.args.model.dropout
forget_bias = self.args.model.forget_bias
use_residual = self.args.model.use_residual
hidden_output = tensors_input.feature_splits[id_gpu]
with tf.device(lambda op: choose_device(op, name_gpu, self.center_device)):
for i in range(self.args.model.num_lstm_layers):
# build one layer: build block, connect block
single_cell = build_cell(
num_units=num_cell_units,
num_layers=1,
is_train=self.is_train,
cell_type=cell_type,
dropout=dropout,
forget_bias=forget_bias,
use_residual=use_residual)
hidden_output, _ = cell_forward(
cell=single_cell,
inputs=hidden_output,
index_layer=i)
hidden_output = fully_connected(
inputs=hidden_output,
num_outputs=num_cell_units,
activation_fn=tf.nn.tanh,
scope='wx_b'+str(i))
if self.args.model.use_layernorm:
hidden_output = layer_norm(hidden_output)
logits = fully_connected(inputs=hidden_output,
num_outputs=self.args.dim_output,
activation_fn=tf.identity,
scope='fully_connected')
# Accuracy
with tf.name_scope("label_accuracy"):
correct = tf.nn.in_top_k(logits, tf.reshape(tensors_input.label_splits[id_gpu], [-1]), 1)
correct = tf.multiply(tf.cast(correct, tf.float32), tf.reshape(tensors_input.mask_splits[id_gpu], [-1]))
label_accuracy = tf.reduce_sum(correct)
# Cross entropy loss
with tf.name_scope("CE_loss"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tensors_input.label_splits[id_gpu], [-1]),
logits=logits)
cross_entropy = tf.multiply(cross_entropy, tf.reshape(tensors_input.mask_splits[id_gpu], [-1]))
cross_entropy_loss = tf.reduce_sum(cross_entropy) / tf.reduce_sum(tensors_input.mask_splits[id_gpu])
loss = cross_entropy_loss
if self.is_train:
with tf.name_scope("gradients"):
gradients = self.optimizer.compute_gradients(loss)
logging.info('\tbuild {} on {} succesfully! total model number: {}'.format(
self.__class__.__name__, name_gpu, self.__class__.num_Instances))
return loss, gradients if self.is_train else logits
def build_optimizer(self):
if self.args.lr_type == 'stepped_down_decay':
self.learning_rate = stepped_down_decay(
self.global_step,
learning_rate=self.args.learning_rate,
decay_rate=self.args.decay_rate,
decay_steps=self.args.decay_steps)
elif self.args.lr_type == 'lr_decay_with_warmup':
self.learning_rate = lr_decay_with_warmup(
self.global_step,
warmup_steps=self.args.warmup_steps,
hidden_units=self.args.model.encoder.num_cell_units)
elif self.args.lr_type == 'constant_learning_rate':
self.learning_rate = tf.convert_to_tensor(self.args.constant_learning_rate)
elif self.args.lr_type == 'exponential_decay':
self.learning_rate = exponential_decay(
self.global_step,
lr_init=self.args.lr_init,
lr_final=self.args.lr_final,
decay_rate=self.args.decay_rate,
decay_steps=self.args.decay_steps)
else:
self.learning_rate = warmup_exponential_decay(
self.global_step,
warmup_steps=self.args.warmup_steps,
peak=self.args.peak,
decay_rate=0.5,
decay_steps=self.args.decay_steps)
if 'horovod' in sys.modules:
import horovod.tensorflow as hvd
logging.info('wrap the optimizer with horovod!')
self.learning_rate = self.learning_rate * hvd.size()
with tf.name_scope("optimizer"):
if self.args.optimizer == "adam":
logging.info("Using ADAM as optimizer")
optimizer = tf.train.AdamOptimizer(self.learning_rate,
beta1=0.9,
beta2=0.98,
epsilon=1e-9,
name=self.args.optimizer)
elif self.args.optimizer == "adagrad":
logging.info("Using Adagrad as optimizer")
optimizer = tf.train.AdagradOptimizer(self.learning_rate)
else:
logging.info("Using SGD as optimizer")
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate,
name=self.args.optimizer)
return optimizer
def variables(self, scope=None):
'''get a list of the models's variables'''
scope = scope if scope else self.name
scope += '/'
print('all the variables in the scope:', scope)
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope)
return variables
if __name__ == '__main__':
from dataProcessing.kaldiModel import KaldiModel, build_kaldi_lstm_layers, build_kaldi_output_affine
from configs.arguments import args
from dataProcessing import tfRecoderData
import os
os.chdir('/mnt/lustre/xushuang2/easton/projects/mix_model_2.0')
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
logging.info('CUDA_VISIBLE_DEVICES : {}'.format(args.gpus))
logging.info('args.dim_input : {}'.format(args.dim_input))
dataReader_train = tfRecoderData.TFRecordReader(args.dir_train_data, args=args)
dataReader_dev = tfRecoderData.TFRecordReader(args.dir_dev_data, args=args)
seq_features, seq_labels = dataReader_train.create_seq_tensor(is_train=False)
batch_train = dataReader_train.fentch_batch_with_TFbuckets([seq_features, seq_labels], args=args)
seq_features, seq_labels = dataReader_dev.create_seq_tensor(is_train=False)
batch_dev = dataReader_dev.fentch_batch_with_TFbuckets([seq_features, seq_labels], args=args)
tensor_global_step = tf.train.get_or_create_global_step()
graph_train = LSTM_Model(batch_train, tensor_global_step, True, args)
logging.info('build training graph successfully!')
graph_dev = LSTM_Model(batch_dev, tensor_global_step, False, args)
logging.info('build dev graph successfully!')
writer = tf.summary.FileWriter(os.path.join(args.model_dir, 'log'), graph=tf.get_default_graph())
writer.close()
sys.exit()
if args.is_debug:
list_ops = [op.name+' '+op.device for op in tf.get_default_graph().get_operations()]
list_variables_and_devices = [op.name+' '+op.device for op in tf.get_default_graph().get_operations() if op.type.startswith('Variable')]
logging.info('\n'.join(list_variables_and_devices))
list_kaldi_layers = []
list_kaldi_layers = build_kaldi_lstm_layers(list_kaldi_layers, args.num_lstm_layers, args.dim_input, args.num_projs)
list_kaldi_layers = build_kaldi_output_affine(list_kaldi_layers)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.train.MonitoredTrainingSession(config=config) as sess:
kaldi_model = KaldiModel(list_kaldi_layers)
kaldi_model.loadModel(sess=sess, model_path=args.model_init)
|
#
# Copyright 2005-2010 Dustin Bernard
#
# This file is part of UruAgeManager/Drizzle.
#
# UruAgeManager/Drizzle is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UruAgeManager/Drizzle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UruAgeManager/Drizzle. If not, see <http://www.gnu.org/licenses/>.
#
from PlasmaTypes import * #You need this "from x import *" line, because even though the log doesn't give an error, 3dsmax won't show this file otherwise! It's a mystery, because it works to have "import PlasmaTypes" and PlasmaTypes.ptAttribString, etc. so long as this is also here.
import Plasma
import _UamVars
import uam
import _UamUtils
import os
# Transitions:
# None->x is ignored because we don't want anything to happen on initial link-in.
# ""->"1" is the trigger we're looking for
_vartolisten = ptAttribString(1, "Uamvar to trigger book:", "")
#_text_en = ptAttribString(2, "Text(English):", "")
#_text_de = ptAttribString(3, "Text(German)(optional):", "")
#_text_fr = ptAttribString(4, "Text(French)(optional):", "")
_journalname = ptAttribString(2, "Journal name:","Journal1")
_showopen = ptAttribBoolean(5, "Start Opened?", default=1)
_booktype = ptAttribDropDownList(6, "Book type:", ("Old Book","Notebook")) #Problem with no default selected. Plasma bug :P
class UamVar_Journal(ptResponder):
varname = None
#text_en = None
#text_de = None
#text_fr = None
journalname = None
showopen = None
booktype = None
def __init__(self):
self.id = 31290 #must have and must be unique (3dsmax will let you know if it's not unique) and must be a 16bit (signed?) int. So let's keep them under 32768. And it must never change, or it will anger any .max files that use it.
self.version = 1 #must have and must be at least 1. It can go up, but never down, or it will anger those .max files again.
print "UamVarJournal.__init__"
def OnInit(self):
print "UamVarJournal.OnInit"
self.varname = str(_vartolisten.value)
#self.text_en = str(_text_en.value)
#self.text_de = str(_text_de.value)
#self.text_fr = str(_text_fr.value)
self.journalname = str(_journalname.value)
self.showopen = int(_showopen.value)
#print "booktype: "+`_booktype`
#print "booktype: "+`dir(_booktype)`
self.booktype = str(getattr(_booktype,"value","Old Book")) #Stupid Plasma bug. We want _booktype.value, or "OldBook" if .value isn't defined.
#self.booktype = str(_booktype.value)
_UamVars.RegisterVar(self.varname)
_UamVars.ListenToVar(self.varname, self)
if self.FindFile()==None:
_UamVars.Error("Unable to find journal: "+"ageresources/"+_UamUtils.GetAgeName()+"--"+self.journalname+".txt")
def FindFile(self):
#Tries:
# ageresources/Agename--JournalName--lang1.txt
# ageresources/Agename--JournalName.txt
# ageresources/Agename--JournalName--lang2.txt
# ageresources/Agename--JournalName--lang3.txt
filebase = "ageresources/"+_UamUtils.GetAgeName()+"--"+self.journalname
lang = _UamUtils.GetLanguage()
filename = filebase+"--"+lang+".txt"
if os.path.isfile(filename):
return filename
filename = filebase+".txt"
if os.path.isfile(filename):
return filename
if lang=="en":
lang2 = "de"
lang3 = "fr"
elif lang=="de":
lang2 = "en"
lang3 = "fr"
elif lang=="fr":
lang2 = "en"
lang3 = "de"
filename = filebase+"--"+lang2+".txt"
if os.path.isfile(filename):
return filename
filename = filebase+"--"+lang3+".txt"
if os.path.isfile(filename):
return filename
return None
def UamListenEvent(self, uamvar, prev, next):
print "UamVarJournal.UamListenEvent uamvar="+uamvar+" prev="+`prev`+" next="+`next`
if prev=="" and next=="1":
print "Showing book..."
#Find text
#lang = _UamUtils.GetLanguage()
#text = ""
#if lang=="en":
# if self.text_en!="":
# text = self.text_en
# elif self.text_de!="":
# text = self.text_de
# else:
# text = self.text_fr
#elif lang=="de":
# if self.text_de!="":
# text = self.text_de
# elif self.text_en!="":
# text = self.text_en
# else:
# text = self.text_fr
#elif lang=="fr":
# if self.text_fr!="":
# text = self.text_fr
# elif self.text_en!="":
# text = self.text_en
# else:
# text = self.text_de
#else:
# raise "Unexpected language: "+lang
filename = self.FindFile()
text = _UamUtils.ReadJournal(filename)
#Do <br> conversions
#text = text.replace("<br>","\n")
#Open the book
if self.booktype=="Old Book":
uam.DisplayBook(text, self.showopen)
elif self.booktype=="Notebook":
uam.DisplayJournal(text, self.showopen)
#Decompiled with Drizzle28! Enjoy :)
global glue_cl
global glue_inst
global glue_params
global glue_paramKeys
glue_cl = None
glue_inst = None
glue_params = None
glue_paramKeys = None
try:
x = glue_verbose
except NameError:
glue_verbose = 0
def glue_getClass():
global glue_cl
if (glue_cl == None):
try:
cl = eval(glue_name)
if issubclass(cl, ptModifier):
glue_cl = cl
elif glue_verbose:
print ('Class %s is not derived from modifier' % cl.__name__)
except NameError:
if glue_verbose:
try:
print ('Could not find class %s' % glue_name)
except NameError:
print 'Filename/classname not set!'
return glue_cl
def glue_getInst():
global glue_inst
if (type(glue_inst) == type(None)):
cl = glue_getClass()
if (cl != None):
glue_inst = cl()
return glue_inst
def glue_delInst():
global glue_inst
global glue_cl
global glue_params
global glue_paramKeys
if (type(glue_inst) != type(None)):
del glue_inst
glue_cl = None
glue_params = None
glue_paramKeys = None
def glue_getVersion():
inst = glue_getInst()
ver = inst.version
glue_delInst()
return ver
def glue_findAndAddAttribs(obj, glue_params):
if isinstance(obj, ptAttribute):
if glue_params.has_key(obj.id):
if glue_verbose:
print 'WARNING: Duplicate attribute ids!'
print ('%s has id %d which is already defined in %s' % (obj.name, obj.id, glue_params[obj.id].name))
else:
glue_params[obj.id] = obj
elif (type(obj) == type([])):
for o in obj:
glue_findAndAddAttribs(o, glue_params)
elif (type(obj) == type({})):
for o in obj.values():
glue_findAndAddAttribs(o, glue_params)
elif (type(obj) == type(())):
for o in obj:
glue_findAndAddAttribs(o, glue_params)
def glue_getParamDict():
global glue_params
global glue_paramKeys
if (type(glue_params) == type(None)):
glue_params = {}
gd = globals()
for obj in gd.values():
glue_findAndAddAttribs(obj, glue_params)
glue_paramKeys = glue_params.keys()
glue_paramKeys.sort()
glue_paramKeys.reverse()
return glue_params
def glue_getClassName():
cl = glue_getClass()
if (cl != None):
return cl.__name__
if glue_verbose:
print ('Class not found in %s.py' % glue_name)
return None
def glue_getBlockID():
inst = glue_getInst()
if (inst != None):
return inst.id
if glue_verbose:
print ('Instance could not be created in %s.py' % glue_name)
return None
def glue_getNumParams():
pd = glue_getParamDict()
if (pd != None):
return len(pd)
if glue_verbose:
print ('No attributes found in %s.py' % glue_name)
return 0
def glue_getParam(number):
global glue_paramKeys
pd = glue_getParamDict()
if (pd != None):
if (type(glue_paramKeys) == type([])):
if ((number >= 0) and (number < len(glue_paramKeys))):
return pd[glue_paramKeys[number]].getdef()
else:
print ('glue_getParam: Error! %d out of range of attribute list' % number)
else:
pl = pd.values()
if ((number >= 0) and (number < len(pl))):
return pl[number].getdef()
elif glue_verbose:
print ('glue_getParam: Error! %d out of range of attribute list' % number)
if glue_verbose:
print 'GLUE: Attribute list error'
return None
def glue_setParam(id, value):
pd = glue_getParamDict()
if (pd != None):
if pd.has_key(id):
try:
pd[id].__setvalue__(value)
except AttributeError:
if isinstance(pd[id], ptAttributeList):
try:
if (type(pd[id].value) != type([])):
pd[id].value = []
except AttributeError:
pd[id].value = []
pd[id].value.append(value)
else:
pd[id].value = value
elif glue_verbose:
print 'setParam: can\'t find id=',
print id
else:
print 'setParma: Something terribly has gone wrong. Head for the cover.'
def glue_isNamedAttribute(id):
pd = glue_getParamDict()
if (pd != None):
try:
if isinstance(pd[id], ptAttribNamedActivator):
return 1
if isinstance(pd[id], ptAttribNamedResponder):
return 2
except KeyError:
if glue_verbose:
print ('Could not find id=%d attribute' % id)
return 0
def glue_isMultiModifier():
inst = glue_getInst()
if isinstance(inst, ptMultiModifier):
return 1
return 0
def glue_getVisInfo(number):
global glue_paramKeys
pd = glue_getParamDict()
if (pd != None):
if (type(glue_paramKeys) == type([])):
if ((number >= 0) and (number < len(glue_paramKeys))):
return pd[glue_paramKeys[number]].getVisInfo()
else:
print ('glue_getVisInfo: Error! %d out of range of attribute list' % number)
else:
pl = pd.values()
if ((number >= 0) and (number < len(pl))):
return pl[number].getVisInfo()
elif glue_verbose:
print ('glue_getVisInfo: Error! %d out of range of attribute list' % number)
if glue_verbose:
print 'GLUE: Attribute list error'
return None
|
def add(n1, n2):
return n1 + n2
# 心知肚明哪些可以做
print(add(3, 5))
print(add(3, 2.2))
print(add("hello", "abd"))
# print(add("hello", 3))
# 沒加():紙 有加(): 紙->執行
b = add
print(b(3, 5))
c = print
c("hello")
def test():
return print
test()("hello")
# 預設值
def add(n1, n2, digit=2, mul=1):
return mul * round(n1+n2, digit)
print(add(3.2, 5.12341234, mul=2))
|
from hexcell import HexCell
if __name__ == "__main__":
c0 = HexCell(0, 0)
assert 3 == c0.ne().ne().ne().distance(c0)
assert 0 == c0.ne().ne().sw().sw().distance(c0)
assert 2 == c0.ne().ne().s().s().distance(c0)
assert 3 == c0.se().sw().se().sw().sw().distance(c0)
# follow path in input data
with open('day11.txt') as f:
steps = f.read().strip().split(',')
c = origin = HexCell(0, 0)
max_distance = 0
for step in steps:
c = getattr(c, step)()
max_distance = max(max_distance, c.distance(origin))
print("day 11 part 1", c.distance(origin))
print("day 11 part 2", max_distance)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from h import models
from .base import ModelFactory
class FeatureCohort(ModelFactory):
class Meta:
model = models.FeatureCohort
|
# RootTools imports
from RootTools.core.TreeVariable import TreeVariable, VectorTreeVariable, ScalarTreeVariable
from RootTools.core.helpers import cStringTypeDict, defaultCTypeDict
def getCTypeString(typeString):
'''Translate ROOT shortcuts for branch description to proper C types
'''
if typeString in cStringTypeDict.keys():
return cStringTypeDict[typeString]
else:
raise Exception( "Cann ot determine C type for type '%s'"%typeString )
def getCDefaultString(typeString):
'''Get default string from ROOT branch description shortcut
'''
if typeString in defaultCTypeDict.keys():
return defaultCTypeDict[typeString]
else:
raise Exception( "Can not determine C type for type '%s'"%typeString )
def createClassString(variables, useSTDVectors = False, addVectorCounters = False):
'''Create class string from scalar and vector variables
'''
vectors = [v for v in variables if isinstance(v, VectorTreeVariable) ]
scalars = [s for s in variables if isinstance(s, ScalarTreeVariable) ]
# Adding default counterVariable 'nVectorname/I' if specified
if addVectorCounters: scalars += [v.counterVariable() for v in vectors]
# for removing duplicates:
declared_components = []
# Create the class string
scalarDeclaration = ""
scalarInitString = ""
for scalar in scalars:
# checking for duplicates.
# This is necessary since I can't define __eq__ for Variables ignoring the filler function
# The filler function makes variables be different when their class name is identical
# Safer to check for duplicate class names at the lowest level
if scalar.name in declared_components:
continue
else:
declared_components.append(scalar.name)
scalarDeclaration += " %s %s;\n"% ( getCTypeString(scalar.type), scalar.name )
scalarInitString += " %s = %s;\n"%( scalar.name, getCDefaultString(scalar.type) )
vectorDeclaration = ""
vectorInitString = ""
if useSTDVectors:
for vector in vectors:
for c in vector.components:
if c.name in declared_components:
continue
else:
declared_components.append(c.name)
# FIXME Rewritten, but never actually checked for std vectors
vectorDeclaration += " std::vector< %s > %s;\n" % ( getCTypeString(c.type), c.name)
vectorInitString += " %s.clear();\n" % c.name
else:
for vector in vectors:
if not hasattr( vector, 'nMax' ):
raise ValueError ("Vector definition needs nMax if using C arrays: %r"%vector)
vectorCompInitString = ""
for c in vector.components:
if c.name in declared_components:
continue
else:
declared_components.append(c.name)
vectorDeclaration += " %s %s[%3i];\n" % ( getCTypeString(c.type), c.name, vector.nMax)
vectorCompInitString += " %s[i] = %15s;\n"%(c.name, getCDefaultString(c.type))
if vectorCompInitString != "":
vectorInitString += """\n for(UInt_t i=0;i<{nMax};i++){{\n{vectorCompInitString} }}; //End for loop"""\
.format(nMax = vector.nMax, vectorCompInitString = vectorCompInitString)
return \
"""#ifndef __className__
#define __className__
#include<vector>
#include<TMath.h>
class className{{
public:
{scalarDeclaration}
{vectorDeclaration}
void init(){{
{scalarInitString}
{vectorInitString}
}}; // End init
}}; // End class declaration
#endif""".format(scalarDeclaration = scalarDeclaration,\
scalarInitString = scalarInitString, vectorDeclaration=vectorDeclaration,
vectorInitString=vectorInitString)
|
import json
import pickle
import time
import copy
import os
import numpy as np
import cv2
from sklearn.linear_model import LinearRegression
# from sklearn.utils.linear_assignment_ import linear_assignment
from res.fields import get_mask_movements_heatmaps
import matplotlib
if os.name == 'nt':
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
class Track(object):
def __init__(self, tracking_id, frame_cnt, item):
self.tracking_id = tracking_id
self.frames = [frame_cnt]
item['tracking_id'] = tracking_id
self.items = [item]
self.frame = None
def assign(self, frame_cnt, item):
self.frames.append(frame_cnt)
item['tracking_id'] = self.tracking_id
self.items.append(item)
def last(self):
return self.items[-1]
def is_alive(self, frame_count):
if frame_count - self.frames[-1] < 5:
return True
class Tracker(object):
def __init__(self, init_time, vid_id, max_frames, camera_id, width, height, new_thresh=0.4, track_thresh=0.2, debug=0, print_stdout=True):
self.init_time = init_time
self.vid_id = vid_id
self.width = width
self.height = height
self.max_frames = max_frames
self.new_thresh = new_thresh
self.track_thresh = track_thresh
self.debug = debug
self.outputs = []
self.print_stdout = print_stdout
self.movements, self.corners, self.distance_heatmaps, self.proportion_heatmaps = get_mask_movements_heatmaps(camera_id, height, width)
# for i in range(len(self.distance_heatmaps)):
# cv2.imshow("distance", self.distance_heatmaps[i]/ np.max(self.distance_heatmaps[i]))
# cv2.imshow("proportion", self.proportion_heatmaps[i])
# cv2.waitKey(0)
self.id_count = 0
self.frame_count = 0
self.tracks = []
def reset(self):
self.id_count = 0
self.frame_count = 0
self.tracks = []
self.outputs = []
def filter_results(self, results):
results_cars = [item for item in results if item['class'] == 3 and item['score'] > self.track_thresh]
results_trucks = [item for item in results if item['class'] == 8 and item['score'] > self.track_thresh]
results_buses = [item for item in results if item['class'] == 6 and item['score'] > self.track_thresh]
# NMS buses -> trucks
bbox_cars = np.array([item['bbox'] for item in results_cars]).reshape(-1, 4)
bbox_trucks = np.array([item['bbox'] for item in results_trucks]).reshape(-1, 4)
bbox_buses = np.array([item['bbox'] for item in results_buses]).reshape(-1, 4)
if len(results_buses) > 0:
iou_buses_trucks = iou(bbox_trucks, bbox_buses)
good_buses = np.all(iou_buses_trucks < 0.7, axis=0)
results_trucks.extend([results_buses[i] for i in range(len(results_buses)) if good_buses[i]])
bbox_trucks = np.row_stack([bbox_trucks, bbox_buses[good_buses, :]])
# NMS trucks -> cars
if len(results_trucks) > 0:
iou_cars_trucks = iou(bbox_cars, bbox_trucks)
good_trucks = np.all(iou_cars_trucks < 0.7, axis=0)
results_cars.extend([results_trucks[i] for i in range(len(results_trucks)) if good_trucks[i]])
return results_cars
def debug_track(self, pos_x, pos_y, corner_x, corner_y, color=(0, 255, 0), save=False):
vis = np.copy(self.frame)
for i in range(len(pos_x)):
vis = cv2.circle(vis, (pos_x[i], pos_y[i]), 9, color=(0, 255, 0), thickness=-1)
vis = cv2.circle(vis, (corner_x[i], corner_y[i]), 9, color=(0, 0, 255), thickness=-1)
cv2.imshow("Track debug", vis)
if save:
cv2.imwrite("track.png", vis)
cv2.waitKey(0)
def generate_entry(self, track):
if len(track.frames) < 15:
return
positions = np.array([item['ct'] for item in track.items]).astype(np.int32)
positions[:, 0] = np.clip(positions[:, 0], 0, self.width - 1)
positions[:, 1] = np.clip(positions[:, 1], 0, self.height - 1)
distances = self.distance_heatmaps[:, positions[:, 1], positions[:, 0]]
proportions = self.proportion_heatmaps[:, positions[:, 1], positions[:, 0]]
mean_distances = np.mean(distances, axis=-1)
mean_distances += 1e18 * (proportions[:, 0] > proportions[:, -1])
std_distances = np.std(distances, axis=-1)
path = np.argmin(mean_distances + 3 * std_distances)
proportions = proportions[path]
corner_positiots_x = np.array([item['bbox'][self.corners[path, 0]] for item in track.items], dtype=np.int32)
corner_positiots_y = np.array([item['bbox'][self.corners[path, 1]] for item in track.items], dtype=np.int32)
corner_positiots_x = np.clip(corner_positiots_x, 0, self.width - 1)
corner_positiots_y = np.clip(corner_positiots_y, 0, self.height - 1)
proportions_corners = self.proportion_heatmaps[path, corner_positiots_y, corner_positiots_x]
proportions_end = np.argmax(proportions_corners) + 1
proportions_corners = proportions_corners[:proportions_end]
times = np.array(track.frames)[:proportions_end]
if np.max(proportions) < 0.3 or np.max(proportions) - np.min(proportions) < 0.25 * min(self.frame_count / 50, 1) or len(proportions_corners) < 5:
if self.debug > 0:
self.debug_track(positions[:, 0], positions[:, 1], corner_positiots_x, corner_positiots_y, color=(0, 0, 255))
return
regr = LinearRegression()
regr.fit(times[-8:].reshape(-1, 1), proportions_corners[-8:].reshape(-1, 1))
if self.debug > 1:
plt.plot(times, proportions_corners)
plt.plot(times[:, np.newaxis], regr.predict(times[:, np.newaxis]))
if self.debug > 2:
print(times, proportions_corners)
print(times[:, np.newaxis], regr.predict(times[:, np.newaxis]))
plt.show()
if regr.coef_ <= 0.0:
return
projected_last_frame = ((1 - regr.intercept_) / regr.coef_)[0]
if projected_last_frame > self.max_frames:
return
truck_num = sum([item['class'] == 6 or item['class'] == 8 for item in track.items])
cls = 2 if truck_num / len(track.frames) > 0.3 else 1
gen_time = time.time() - self.init_time
output = '{} {} {} {} {}'.format(gen_time, self.vid_id, np.int32(projected_last_frame[0]), path + 1, cls)
if self.print_stdout:
print(output)
self.outputs.append(output)
if self.debug > 0:
if self.debug > 2:
self.debug_track(positions[:, 0], positions[:, 1], corner_positiots_x, corner_positiots_y, save=True)
else:
self.debug_track(positions[:, 0], positions[:, 1], corner_positiots_x, corner_positiots_y)
def step(self, results):
self.frame_count += 1
results = self.filter_results(results)
# results = self.add_sizes(results)
track_bboxes = np.array([track.last()['bbox'] for track in self.tracks]).reshape(-1, 4)
det_bboxes = np.array([item['bbox'] for item in results]).reshape(-1, 4)
det_bboxes += np.tile(np.array([item['tracking'] for item in results]), (1, 2)).reshape(-1, 4)
ious = iou(track_bboxes, det_bboxes)
# print(ious)
matches = []
unmatched_dets = []
unmatched_tracks = np.ones(len(self.tracks), dtype=bool)
if len(self.tracks) == 0:
unmatched_dets = [i for i in range(len(results))]
else:
for j in range(len(results)):
i = np.argmax(ious[:, j])
if ious[i, j] > 0.1:
matches.append([i, j])
ious[i, :] = 0.0
unmatched_tracks[i] = False
else:
unmatched_dets.append(j)
for m in matches:
self.tracks[m[0]].assign(self.frame_count, results[m[1]])
for i in reversed(unmatched_dets):
item = results[i]
if item['score'] > self.new_thresh:
self.id_count += 1
track = Track(self.id_count, self.frame_count, item)
self.tracks.append(track)
for i, val in reversed(list(enumerate(unmatched_tracks))):
if val:
track = self.tracks[i]
if not track.is_alive(self.frame_count):
self.generate_entry(track)
del self.tracks[i]
ret = [track.last() for track in self.tracks if track.is_alive(self.frame_count)]
return ret
def finalize(self):
for track in self.tracks:
self.generate_entry(track)
class WriterTracker(object):
def __init__(self, json_path, max_frames):
self.json_path = json_path
self.max_frames = max_frames
self.frame_count = 0
self.results_list = []
def step(self, results, public_det=None):
self.frame_count += 1
filtered_results = [item for item in results if item['class'] in [3, 6, 8]]
self.results_list.append(filtered_results)
if self.frame_count >= self.max_frames:
self.save()
return filtered_results
def save(self):
with open(self.json_path, 'wb') as f:
pickle.dump(self.results_list, f)
def reset(self):
pass
def iou(A, B):
if len(A) == 0 or len(B) == 0:
return np.array([[]]).reshape(len(A), len(B))
intersections = (np.maximum(0, np.minimum(A[:, np.newaxis, 2:], B[:, 2:]) - np.maximum(A[:, np.newaxis, :2], B[:, :2]))).prod(-1)
unions = (A[:, np.newaxis, 2:] - A[:, np.newaxis, :2]).prod(-1) + (B[:, 2:] - B[:, :2]).prod(-1) - intersections
return intersections / (unions + 1e-12)
def greedy_assignment(dist):
matched_indices = []
if dist.shape[1] == 0:
return np.array(matched_indices, np.int32).reshape(-1, 2)
for i in range(dist.shape[0]):
j = dist[i].argmin()
if dist[i][j] > 0.5:
dist[:, j] = 0
matched_indices.append([i, j])
return np.array(matched_indices, np.int32).reshape(-1, 2)
|
test = raw_input()
test = int(test)
for t in range(1,test+1):
error = 1
n = raw_input()
nint = int(n)
numbers = {}
for i in range(0,10):
numbers[i] = 0
# print numbers
if(nint==0):
print "Case #"+str(t)+": INSOMNIA"
else:
k = 1
while(1):
error = 0
for c in n:
numbers[int(c)] = numbers[int(c)] + 1
for key in numbers.keys():
if(numbers[key]<1):
error = 1
if(error == 0):
print "Case #"+str(t)+": "+str(n)
break
else:
k = k + 1
n = str(nint * k)
# print n
# print numbers
|
import random
random.seed(20)
gridMax = 18
gridDimX = random.randint(8, gridMax)
gridDimY = random.randint(8, gridMax)
energy = random.randint(4, 30)
# print("X: ", x)
# print("Y: ", y)
startX = random.randint(1, gridDimX - 1)
startY = random.randint(1, gridDimY - 1)
curPosX = startX
curPosY = startY
print("startX: ", startX)
print("startY: ", startY)
print("gridDimX: ", gridDimX)
print("gridDimY: ", gridDimY)
board = [[0 for x in range(gridDimX)] for y in range(gridDimY)]
board[curPosY][curPosX] = 2
directionList = []
lastDirection = 0
def printBoard():
for i in range(gridDimY):
for j in range(gridDimX):
print(board[i][j], end = '')
print()
print()
def potentiallyPlaceLetter():
decider = random.randint(0,11)
if decider > 9 :
letter = str(chr(random.randint(110,122)))
board[curPosY][curPosX] = letter
elif decider == 9:
letter = str(chr(random.randint(97,109)))
board[curPosY][curPosX] = letter
#print(letter)
for j in range(20):
newDirection = random.randint(1,4)
tilesToMove = random.randint(4,7)
board[curPosY][curPosX] = 0
while newDirection == lastDirection or newDirection % 2 == lastDirection % 2:
newDirection = random.randint(1,4)
if curPosX == 0:
if(lastDirection != 1 and lastDirection != 3):
newDirection = random.randint(1,2)
if(newDirection == 2):
newDirection = 3
else:
newDirection = 2
if curPosY == 0:
if(lastDirection != 2 and lastDirection != 4):
newDirection = random.randint(2,3)
if(newDirection == 3):
newDirection = 4
else:
newDirection = 3
if curPosX == gridDimX -1:
if(lastDirection != 1 and lastDirection != 3):
newDirection = random.randint(1,2)
if(newDirection == 2):
newDirection = 3
else:
newDirection = 4
if curPosY == gridDimY - 1:
if(lastDirection != 2 and lastDirection != 4):
newDirection = random.randint(2,3)
if(newDirection == 3):
newDirection = 4
else:
newDirection = 1
if newDirection == 1:
directionList.append("up")
for i in range(tilesToMove):
if curPosY - 1 >= 0 and board[curPosY -1][curPosX] != 1:
if i == tilesToMove - 1:
if curPosY - 1 == startY and curPosX == startX:
tilesToMove = tilesToMove + 1
else:
board[curPosY - 1][curPosX] = 1
else:
potentiallyPlaceLetter()
curPosY -= 1
elif newDirection == 2:
directionList.append("right")
for i in range(tilesToMove):
if curPosX + 1 < gridDimX and board[curPosY][curPosX +1] != 1:
if i == tilesToMove - 1:
if curPosY == startY and curPosX + 1== startX:
tilesToMove = tilesToMove + 1
else:
board[curPosY][curPosX + 1] = 1
else:
potentiallyPlaceLetter()
curPosX += 1
elif newDirection == 3:
directionList.append("down")
for i in range(tilesToMove):
if curPosY + 1 < gridDimY and board[curPosY +1][curPosX] != 1:
if i == tilesToMove - 1:
if curPosY + 1 == startY and curPosX == startX:
tilesToMove = tilesToMove + 1
else:
board[curPosY + 1][curPosX] = 1
else:
potentiallyPlaceLetter()
curPosY += 1
elif newDirection == 4:
directionList.append("left")
for i in range(tilesToMove):
if curPosX - 1 >= 0 and board[curPosY][curPosX -1] != 1:
if i == tilesToMove - 1:
if curPosY == startY and curPosX - 1 == startX:
tilesToMove = tilesToMove + 1
else:
board[curPosY][curPosX - 1] = 1
else:
potentiallyPlaceLetter()
curPosX -= 1
lastDirection = newDirection
board[curPosY][curPosX] = 2
print(directionList[-1])
printBoard()
board[curPosY][curPosX] = 0
board[startY][startX] = 2
#print(gridDimX, end = '')
print(gridDimX,gridDimY,energy)
for i in range(gridDimY):
for j in range(gridDimX):
print(board[i][j], end = '')
print()
for i in range(len(directionList)):
print(directionList[i])
|
#! /usr/bin/env python
# coding: utf-8
"""
Module with text preparation functionality.
"""
import re
import string
import numpy as np
import pandas as pd
from src import *
__all__ = ['pre_clean', 'remove_things', 'agressive_clean', 'clean_text',
'tokenize_text', 'join_string']
def pre_clean(text):
""" Remove spaces, break lines, empty spaces and strings and encode to ascii
Parameters
----------
text : list
Returns
-------
text : list
"""
# Pre-cleaning: remove/replace break lines,
# empty spaces and empty strings
text = [elem.replace('\n', ' ') for elem in text]
text = [elem.replace('\t', ' ') for elem in text]
text = [elem.replace('\r', ' ') for elem in text]
text = [elem.strip() for elem in text]
text = filter(None, text)
# Decode unicode
try:
text = [elem.encode('utf-8') for elem in text]
text = [elem.decode('unicode_escape').encode('ascii','ignore')
for elem in text]
except:
text = text
return text
def remove_things(text):
""" Remove duplicates, numbers, URLs, links and emails
Parameters
----------
text : list
Returns
-------
text : list
"""
# Remove duplicates
_txt_arr = np.array(text)
_, idx = np.unique(_txt_arr, return_index=True)
text = list(_txt_arr[np.sort(idx)])
# Remove numbers
text = [re.sub(r'\d+', '', txt) for txt in text]
# Remove URLs, links and emails
text = [re.sub(r'http\S+', '', txt) for txt in text]
text = [re.sub(r'www\S+', '', txt) for txt in text]
text = [re.sub(r'\S+@\S+', '', txt) for txt in text]
return text
def agressive_clean(df, len_thr):
""" Strict rule to clean the text
Parameters
----------
df : dataframe
len_thr: integer
Threshold for number of words in a list item.
Returns
-------
df : dataframe
"""
df['agressive_clean'] = pd.Series('', index=df.index)
for i in range(0, df.shape[0]):
text = df['rawtext'][i]
if (df['scraping'][i] == 1):
# Pre-cleaning steps
text = pre_clean(text)
# Rule for defining sentences:
# start with capital and end with full stop
text = [elem for elem in text if
((elem[0].isupper()) & (elem[-1] == '.'))]
# Remove duplicates, numbers, URLs, links and emails
text = remove_things(text)
# Remove list items with less than len_thr words
text = [txt for txt in text if len(txt.split()) > len_thr]
df['agressive_clean'][i] = text
return df
def clean_text(df, len_thr):
""" Cleaning of the text
Parameters
----------
df : dataframe
len_thr: integer
Threshold for number of words in a list item.
Returns
-------
df : dataframe
"""
KEYS = ['copyright', 'click here', 'cookies',
'cookie policy', 'sitemap', 'website by',
'website design by', 'all rights reserved', '|']
df['clean_text'] = pd.Series('', index=df.index)
for i in range(0, df.shape[0]):
text = df['rawtext'][i]
if (df['scraping'][i] == 1):
# Pre-cleaning steps
text = pre_clean(text)
# Rules for defining sentences:
# filter out using keywords and
# words with capital letters in the middle of non-capital ones
_clean_text = []
for j in range(0, len(text)):
if (len(re.findall(r'[a-z][A-Z][a-z]', text[j])) == 0):
if not any(key in text[j].lower() for key in KEYS):
_clean_text.append(text[j])
text = _clean_text
# Remove duplicates, numbers, URLs, links and emails
text = remove_things(text)
# Remove list items with less than len_thr words
text = [txt for txt in text if len(txt.split()) > len_thr]
df['clean_text'][i] = text
return df
def tokenize_text(df, len_thr, opt):
""" Tokenization of the text
Parameters
----------
df : dataframe
len_thr: integer
Threshold for the number of tokens.
opt: string
Define whether agressive or normal cleaning was used,
can be either 'norm' or 'agre'.
Returns
-------
df : dataframe
"""
from nltk.corpus import stopwords
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from spacy.en import English
if (opt == 'norm'):
df['tokens'] = pd.Series('', index=df.index)
elif (opt == 'agre'):
df['tokens_agressive'] = pd.Series('', index=df.index)
else:
print 'Wrong opt parameter!'
return df
parser = English()
for i in range(0, df.shape[0]):
if (df['scraping'][i] == 1):
if (opt == 'norm'):
text = df['clean_text'][i]
else:
text = df['agressive_clean'][i]
# Get the tokens using spaCy
text = unicode(text)
tokens = parser(text)
# Stemming/Lemmatizing
lemmas = []
for tok in tokens:
lemmas.append(tok.lemma_.lower().strip()
if tok.lemma_ != "-PRON-"
else tok.lower_)
tokens = lemmas
# Remove stopwords
STOPLIST = unicode(stopwords.words('english') +
["n't", "'s", "'m", "ca"] +
list(ENGLISH_STOP_WORDS))
tokens = [tok for tok in tokens if tok not in STOPLIST]
# Remove some punctuation
SYMBOLS = unicode(["-----", "---", "...", "“", "”", "'ve"])
tokens = [tok for tok in tokens if tok not in SYMBOLS]
# Remove punctuation and some strange things
SYMBOLS = unicode(" ".join(string.punctuation).split(" "))
for sym in SYMBOLS:
tokens = [tok.replace(sym, '') for tok in tokens]
# Remove whitespace
while "" in tokens:
tokens.remove("")
while " " in tokens:
tokens.remove(" ")
while "\n" in tokens:
tokens.remove("\n")
while "\n\n" in tokens:
tokens.remove("\n\n")
# Remove tokens with less than len_thr words
if ((opt == 'norm') & (len(tokens) > len_thr)):
df['tokens'][i] = tokens
elif ((opt == 'agre') & (len(tokens) > len_thr)):
df['tokens_agressive'][i] = tokens
else:
df['tokens'][i] = ''
return df
def join_string(df, opt):
""" Join the list of words (tokens) into a single string
Parameters
----------
df : dataframe
opt: string
Define whether agressive or normal cleaning was used,
can be either 'norm' or 'agre'.
Returns
-------
df : dataframe
"""
if (opt == 'norm'):
df['joined_tokens'] = pd.Series('', index=df.index)
elif (opt == 'agre'):
df['joined_tokens_agre'] = pd.Series('', index=df.index)
else:
print 'Wrong opt parameter!'
return df
for i in range(0, df.shape[0]):
if (df['scraping'][i] == 1):
# Create single string
if (opt == 'norm'):
tokens = df['tokens'][i]
else:
tokens = df['tokens_agressive'][i]
text = ''
for j in range(0, len(tokens)):
if (j == 0):
text += ''.join(tokens[j])
else:
text += ''.join(' ' + tokens[j])
if (opt == 'norm'):
df['joined_tokens'][i] = str(text)
else:
df['joined_tokens_agre'][i] = str(text)
return df
|
fin = open("linear.in", "r")
fout = open("linear.out", "w")
#main----------------------------------------------
main_mass = [int(i) for i in fin.read().split()]
n = main_mass[0]
mass_times = main_mass[(main_mass[0]*2) + 2:]
time = []
cor = main_mass[1 : (main_mass[0]*2) + 1 : 2]
massiv_s_n = main_mass[0 : (main_mass[0]*2) + 1 : 2]
quantum = massiv_s_n[1:len(massiv_s_n)]
#counting time-------------------------------------
if len(quantum) % 2 == 0:
for i in range(len(quantum)-1):
if quantum[i+1]<0 and quantum[i]>0:
time.append(abs((cor[i+1] - cor[i])/2))
elif len(quantum) % 2 != 0:
for i in range(len(quantum)-1):
if quantum[i]>0 and quantum[i+1]<0:
time.append(abs((cor[i+1] - cor[i])/2))
time.sort()
#counting and output-------------------------------
i = 0
for moment in mass_times:
while i < len(time) and moment >= time[i]:
i += 1
fout.write(str(n - (i * 2)) + "\n") |
from sqlalchemy import Table, Column, Integer, String
from tools.dbconnect import engine,Session
from sqlalchemy.ext.declarative import declarative_base
import settings
Base = declarative_base()
class DBVersion(Base):
__tablename__ = 'dbversion'
id = Column(Integer, primary_key=True)
Version = Column(Integer)
Base.metadata.create_all(engine)
session = Session()
record = session.query(DBVersion).first()
if not record:
record = DBVersion()
try:
record.Version = max(settings.versions.keys())
except:
record.Version = 1
session.add(record)
session.commit()
session.close() |
from django.contrib import admin
from . models import *
# Register your models here.
admin.site.register(customer)
admin.site.register(order)
admin.site.register(product)
admin.site.register(review)
admin.site.register(ordered_item)
admin.site.register(category)
admin.site.register(shipping)
admin.site.register(contact) |
#!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import os
from openpyxl import Workbook
from selenium.webdriver.common.by import By
from docx import Document
option = webdriver.ChromeOptions()
option.add_argument("--incognito")
option.add_argument("--start-maximised")
browser = webdriver.Chrome("./chromedriver", options=option)
Article = input("Bienvenue : Veuillez coller l'URL de l'article du parisien que vous souhaitez récupérer")
browser.get(Article)
time.sleep(2)
browser.maximize_window()
bouton = browser.find_element_by_id('didomi-notice-agree-button')
bouton.click()
Texte = browser.find_element_by_xpath("//div[@class='article-section margin_bottom_article']").text
Titre = browser.find_element_by_xpath("//h1[@class='title_xl col margin_bottom_headline']").text
document = Document()
document.add_heading(Titre,0)
document.add_paragraph(Texte)
document.save('article.docx')
|
"""
MicroPython Aosong DHT12 I2C driver
"""
class DHTBaseI2C:
def __init__(self, i2c=None, addr=0x5c):
if i2c == None:
from machine import I2C
self.i2c = I2C(sda=21, scl=22)
else:
self.i2c = i2c
self.addr = addr
self.buf = bytearray(5)
def measure(self):
buf = self.buf
self.i2c.readfrom_mem_into(self.addr, 0, buf)
if (buf[0] + buf[1] + buf[2] + buf[3]) & 0xff != buf[4]:
raise Exception("checksum error")
class DHT12(DHTBaseI2C):
def humidity(self):
return self.buf[0] + self.buf[1] * 0.1
def temperature(self):
t = self.buf[2] + (self.buf[3] & 0x7f) * 0.1
if self.buf[3] & 0x80:
t = -t
return t
|
import sys
import random
import signal
from time import time
import copy
from operator import itemgetter
class Team65():
def __init__(self):
self.available_moves = []
self.backup_move = (0, 0)
self.up = [-1, 0, 1, 0]
self.down = [0, 1, 0, -1]
self.inc_costs = [0,1, 100, 10000, 100000]
self.INF = 1000000000
self.initial_level = 2
self.endtime = 14
self.starttime = 0
self.max_player = 1
self.map_symbol = ['o', 'x']
self.blk_zob = []
self.blk_hash = []
self.num_blks_won = [0 , 0]
self.maxlen = 0
self.mindepth = 9
self.last_blk_won = 0
for j in range(2):
col = []
for i in range(3):
col.append([0]*3)
self.blk_hash.append(col)
self.numsteps = 0
for i in range(36):
self.blk_zob.append(2**i)
#print self.blk_zob
self.dict = {}
self.just_start = 1
def init_zobrist(self , board):
# self.dict = {}
# for i in range(4):
# for j in range(4):
# cur_hash =0
# cnt = 0
# for k in range(4):
# for l in range(4):
# x = board.board_status[4*i+k][4*j+l]
# if (x == self.map_symbol[self.max_player]):
# cur_hash ^= self.blk_zob[2*cnt]
# elif (x == self.map_symbol[(self.max_player)^1]):
# cur_hash ^= self.blk_zob[2*cnt+1]
# cnt +=1
#
# self.blk_hash[i][j] = cur_hash
# #print self.blk_hash
self.dict = {}
for i_1 in range(2):
for i in range(3):
for j in range(3):
cur_hash =0
cnt = 0
for k_1 in range(2):
for k in range(3):
for l in range(3):
x = board.big_boards_status[k_1][3*i+k][3*j+l]
if (x == self.map_symbol[self.max_player]):
cur_hash ^= self.blk_zob[2*cnt]
elif (x == self.map_symbol[(self.max_player)^1]):
cur_hash ^= self.blk_zob[2*cnt+1]
cnt +=1
self.blk_hash[i_1][i][j] = cur_hash
#print type(cur_hash)
#print (self.blk_hash)
#print self.blk_hash
def update(self, board, old_move, new_move, ply):
board.board_status[new_move[0]][new_move[1]] = ply ######
x = new_move[0]/4
y = new_move[1]/4
fl = 0
bs = board.board_status
#checking if a block has been won or drawn or not after the current move
for i in range(4):
#checking for horizontal pattern(i'th row)
if (bs[4*x+i][4*y] == bs[4*x+i][4*y+1] == bs[4*x+i][4*y+2] == bs[4*x+i][4*y+3]) and (bs[4*x+i][4*y] == ply):
board.block_status[x][y] = ply
return 'SUCCESSFUL', True
#checking for vertical pattern(i'th column)
if (bs[4*x][4*y+i] == bs[4*x+1][4*y+i] == bs[4*x+2][4*y+i] == bs[4*x+3][4*y+i]) and (bs[4*x][4*y+i] == ply):
board.block_status[x][y] = ply
return 'SUCCESSFUL', True
#checking for diamond pattern
#diamond 1
if (bs[4*x+1][4*y] == bs[4*x][4*y+1] == bs[4*x+2][4*y+1] == bs[4*x+1][4*y+2]) and (bs[4*x+1][4*y] == ply):
board.block_status[x][y] = ply
return 'SUCCESSFUL', True
#diamond 2
if (bs[4*x+1][4*y+1] == bs[4*x][4*y+2] == bs[4*x+2][4*y+2] == bs[4*x+1][4*y+3]) and (bs[4*x+1][4*y+1] == ply):
board.block_status[x][y] = ply
return 'SUCCESSFUL', True
#diamond 3
if (bs[4*x+2][4*y] == bs[4*x+1][4*y+1] == bs[4*x+3][4*y+1] == bs[4*x+2][4*y+2]) and (bs[4*x+2][4*y] == ply):
board.block_status[x][y] = ply
return 'SUCCESSFUL', True
#diamond 4
if (bs[4*x+2][4*y+1] == bs[4*x+1][4*y+2] == bs[4*x+3][4*y+2] == bs[4*x+2][4*y+3]) and (bs[4*x+2][4*y+1] == ply):
board.block_status[x][y] = ply
return 'SUCCESSFUL', True
#checking if a block has any more cells left or has it been drawn
for i in range(4):
for j in range(4):
if bs[4*x+i][4*y+j] =='-':
return 'SUCCESSFUL', False
board.block_status[x][y] = 'd'
return 'SUCCESSFUL', False
def move(self, board, old_move, flag):
self.starttime = time()
#print self.starttime
if flag == "x":
self.max_player = 1
else:
self.max_player = 0
#print self.max_player
player = self.max_player
level = self.initial_level
self.timeup = 0
self.init_zobrist(board)
self.num_blks_won = [0 ,0 ]
if self.last_blk_won :
self.num_blks_won[self.max_player] = 1
self.available_moves = board.find_valid_move_cells(old_move)
length = len(self.available_moves)
prevans = self.available_moves[random.randrange(length)]
if self.just_start ==1 :
self.just_start = 0
return prevans
while(not self.timeup):
self.init_zobrist(board)
ans, val = self.move_minimax(board, old_move, player, level)
self.maxlen = max(self.maxlen, len(self.dict))
if (self.timeup):
break;
prevans = ans
level += 1
cells = board.find_valid_move_cells(old_move)
return cells[random.randrange(len(cells))]
#
# if self.last_blk_won :
# self.num_blks_won[self.max_player] = 1
# #print self.blk_hash
# #self.update_zobrist_block(old_move, player^1 , 0)
# #curmax = -self.INF
# self.available_moves = board.find_valid_move_cells(old_move)
# #print self.available_moves
# length = len(self.available_moves)
# prevans = self.available_moves[random.randrange(length)]
# if self.just_start ==1 :
# self.just_start = 0
# return prevans
# while(not self.timeup):
# self.init_zobrist(board)
# ans, val = self.move_minimax(board, old_move, player, level)
# self.maxlen = max(self.maxlen, len(self.dict))
# if (self.timeup):
# break;
# prevans = ans
# level += 1
# #print level, self.maxlen
# #self.numsteps += 1
# #self.timeup = 0
# #print "Returned answer"
# status, blk_won = self.update(board, old_move, prevans, self.map_symbol[player])
# if blk_won == True :
# self.last_blk_won ^= 1
# else:
# self.last_blk_won = 0
# # do something
# board.board_status[prevans[0]][prevans[1]] = "-"
# board.block_status[prevans[0]/4][prevans[1]/4] = "-"
# self.mindepth = min(self.mindepth, level)
# #print self.mindepth, level , time()-self.starttime
# return prevans
# # except Exception as e:
# # print e
def update_zobrist_block(self,move,player):
#print "Update function called"
#print self.blk_hash
board_no = move[0]/2
row_no = move[1]/3
col_no = move[2]/3
x =
# row_no = move[0]/4
# col_no = move[1]/4
# x = 4*(move[0]%4) + (move[1]%4)
# if (player == self.max_player):
# self.blk_hash[row_no][col_no] ^= self.blk_zob[2*x]
# else:
# self.blk_hash[row_no][col_no] ^= self.blk_zob[2*x+1]
def move_minimax(self, board, old_move, player, level):
self.available_moves = board.find_valid_move_cells(old_move)
#print self.available_moves
length = len(self.available_moves)
best_move = self.available_moves[random.randrange(length)]
maxval = -self.INF
temp = self.num_blks_won[player]
for move in self.available_moves:
self.num_blks_won[player] = temp
self.update_zobrist_block(move,player)
status, blk_won = self.update(board, old_move, move, self.map_symbol[player])
if blk_won:
self.num_blks_won[player] ^= 1
else:
self.num_blks_won[player] = 0
if blk_won and self.num_blks_won[player] ==1:
score = self.minimax(
level-1, player, move, -self.INF, self.INF, board, player)
self.num_blks_won[player] = 0
else:
score = self.minimax(
level-1, player ^ 1, move, -self.INF, self.INF, board, player)
# undo move
self.update_zobrist_block(move,player)
board.board_status[move[0]][move[1]] = "-"
board.block_status[move[0]/4][move[1]/4] = "-"
#print "Moves , Score: " ,i,score
if score > maxval:
best_move = move
maxval = score
self.num_blks_won[player] = temp
#print level, best_move , score
return best_move, score
def minimax(self, level, player, old_move, alpha, beta, board , prev_player):
#print "Reched minimax"
# base conditon for recursion
if self.timeup == 1:
return self.heuristic(board, prev_player,old_move)
#print (time()-self.starttime)
#print self.timeup
if time() - self.starttime >= self.endtime:
self.timeup = 1
return self.heuristic(board, prev_player,old_move)
if level == 0 or board.find_terminal_state() != ('CONTINUE', '-'):
return self.heuristic(board, prev_player,old_move)
possible_moves = board.find_valid_move_cells(old_move)
score = self.INF
if (player == self.max_player):
score = -score
temp = self.num_blks_won[player]
for move in possible_moves:
self.num_blks_won[player] = temp
self.update_zobrist_block(move,player)
status, blk_won = self.update(board, old_move, move, self.map_symbol[player])
if blk_won:
self.num_blks_won[player] ^= 1
else:
self.num_blks_won[player] = 0
if player == self.max_player:
if blk_won and self.num_blks_won[player] ==1:
score = max(score, self.minimax(
level-1, player, move, alpha, beta, board, player))
self.num_blks_won[player] = 0
else:
score = max(score, self.minimax(
level-1, player ^ 1, move, alpha, beta, board,player))
alpha = max(alpha, score)
else:
if blk_won and self.num_blks_won[player] ==1:
score = min(score, self.minimax(
level-1, player, move, alpha, beta, board, player))
self.num_blks_won[player] = 0
else:
score = min(score, self.minimax(
level-1, player ^ 1, move, alpha, beta, board, player))
beta = min(score, beta)
self.update_zobrist_block(move,player)
# undo move
board.board_status[move[0]][move[1]] = "-"
board.block_status[move[0]/4][move[1]/4] = "-"
if (alpha >= beta or self.timeup == 1):
break;
#print "Player is "+ str(player)
#print level , score
self.num_blks_won[player]= temp
return score
def heuristic(self, board, player,old_move ):
#print "Reched mheuristic"
cur_state = board.find_terminal_state()
if cur_state[1] == "WON":
#print player , cur_state[0]
#assert( player == cur_state[0])
if player == self.max_player:
#print "YO"
#print board.block_status
return self.INF
else:
return -self.INF
cost = []
for i in range(4):
cost.append([0]*4)
row_no = old_move[0]/4
col_no = old_move[1]/4
#if (board.block_status[row_no][col_no]=='-' and self.numsteps<=20):
# return self.computecost(board,player,row_no,col_no)
# compute costs for small boards
cur_player = player^1
summ = 0
for i in range(4):
for j in range(4):
if (board.block_status[i][j] == self.map_symbol[self.max_player]):
cost[i][j] = self.INF/100
elif(board.block_status[i][j] == self.map_symbol[self.max_player ^ 1]):
cost[i][j] = -self.INF/100;
else:
if self.blk_hash[i][j] in self.dict:
cost[i][j] = self.dict[self.blk_hash[i][j]]
if len(self.dict) > 1000 :
self.dict = {}
#print cost[i][j] , self.computecost(board, self.max_player, i, j)
#assert(cost[i][j] == self.computecost(board, self.max_player, i, j))
else :
x = self.computecost(board, self.max_player, i, j)
cost[i][j] = x
#print "LOL",self.blk_hash[i][j], cost[i][j]
self.dict[self.blk_hash[i][j]]= x
#summ += cost[i][j];
return self.compute_for_bigboard(board,self.max_player, cost)
def compute_for_bigboard(self, board, player, cost):
row = []
col = []
col_tot = [0]*4
row_tot = [0]*4
for i in range(4):
row.append([])
col.append([])
total = 0
#print row_no
#print "Reched Computecost"
for i in range(4):
for j in range(4):
row[i].append(board.block_status[i][j])
row_tot[i] += cost[i][j]
for i in range(4):
for j in range(4):
col[j].append(board.block_status[i][j])
col_tot[j]+= cost[i][j];
for i in range(4):
cntmx = row[i].count(self.map_symbol[player])
cntmn = row[i].count(self.map_symbol[player ^ 1])
cntemp = row[i].count('-')
if (cntmx+cntemp ==4 or cntmn+cntemp == 4):
total += row_tot[i]
for i in range(4):
cntmx = col[i].count(self.map_symbol[player])
cntmn = col[i].count(self.map_symbol[player ^ 1])
cntemp = col[i].count('-')
if (cntmx+cntemp ==4 or cntmn+cntemp == 4):
total += col_tot[i]
for i in range(1,3):
for j in range(1,3):
cntmx = 0
cntmn =0
cntemp = 0
summ = 0
for k in range(4):
temp = board.block_status[self.up[k]+i][self.down[k]+j]
if temp == self.map_symbol[player]:
cntmx += 1
elif temp == self.map_symbol[player^1]:
cntmn += 1
elif temp == "-":
cntemp += 1
summ += cost[i+self.up[k]][j+self.down[k]]
if (cntmx+cntemp ==4 or cntmn+cntemp == 4):
total += summ
if (total == 0):
for i in range(4):
for j in range(4):
total += cost[i][j]
return total
def computecost(self, board, player, row_no, col_no):
row = []
col = []
for i in range(4):
row.append([])
col.append([])
total = 0
#print row_no
#print "Reched Computecost"
for i in range(4*row_no, 4*row_no+4):
for j in range(4*col_no, 4*col_no+4):
row[i%4].append(board.board_status[i][j])
for i in range(4*row_no, 4*row_no+4):
for j in range(4*col_no, 4*col_no+4):
col[j%4].append(board.board_status[i][j])
for i in range(4):
cntmx = row[i].count(self.map_symbol[player])
cntmn = row[i].count(self.map_symbol[player ^ 1])
if (cntmx > 0 and cntmn == 0):
total += self.inc_costs[cntmx]
elif(cntmx == 0 and cntmn > 0):
total -= self.inc_costs[cntmn]
for i in range(4):
cntmx = col[i].count(self.map_symbol[player])
cntmn = col[i].count(self.map_symbol[player ^ 1])
cntemp = col[i].count('-')
if (cntmx>0 and cntmn==0):
total += self.inc_costs[cntmx]
elif (cntmx==0 and cntmn>0):
total-= self.inc_costs[cntmn]
start_row = 4*row_no
start_col = 4*col_no
#print "Reched Computecost"
for i in range(1,3):
for j in range(1,3):
cntmx = 0
cntmn =0
for k in range(4):
temp = board.board_status[start_row+self.up[k]+i][start_col+self.down[k]+j]
if temp == self.map_symbol[player]:
cntmx += 1
elif temp == self.map_symbol[player^1]:
cntmn += 1
if (cntmx>0 and cntmn==0):
total += self.inc_costs[cntmx]
elif (cntmx==0 and cntmn >0):
total-= self.inc_costs[cntmn]
return total
# o1 = GAME()
# o1.move(1,1,1)
|
def preamble():
return ("""
This script takes a file with pregnacies of interest and looks for prior VBAC or vaginal pregnanicies from another data set
Usage: search_data.py -d [list of all pregnancies saved as CSV UTF-8] -p [Pregnancies and metadata of interest]
Last Updated: 30 July 2021
Maxim Seferovic, seferovi@bcm.edu
""")
import argparse, os.path, collections
from datetime import datetime
def timestamp(action, object):
print(
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S') : <22}"
f"{action : <18}"
f"{object}"
)
def save(outdata):
i = 0
while os.path.exists(f"{samples[0].rsplit('.', 1)[0]}_outlist_{i}.{samples[0].rsplit('.', 1)[-1]}"): i += 1
savename = f"{samples[0].rsplit('.', 1)[0]}_outlist_{i}.{samples[0].rsplit('.', 1)[-1]}"
outdata.insert(0, firstline)
with open(savename, mode='wt', encoding='utf-8') as f: f.write('\n'.join(outdata))
timestamp("Saved", savename)
def opendata():
timestamp ('Open database', file[0])
csv = collections.defaultdict(list)
with open (file[0], 'r') as f:
for line in f:
newline = ''.join(line.split()).split(',')
csv[newline[0]].append([newline[4], newline[3][-4:]])
return csv
def opensamples():
timestamp ('Open preg list', samples[0])
global firstline ### Unhash for headers.
samplelist = []
with open (samples[0], 'r') as f:
firstline = f.readline().strip() + ',preg_year,prior_vag,prior_vbac' ### Unhash for headers.
for line in f: samplelist.append((''.join(line.split(' '))).strip())
return samplelist
def match(data,samplelist):
outlist = []
for line in samplelist:
l = line.split(',')
sampledata = data.get(l[0])
year = l[2][-4:]
if len(year) < 4 : continue
newline = [line,year,0,0]
for i in range (0,len(sampledata)):
if len(sampledata[i][1]) < 4: continue
elif int(year) <= int(sampledata[i][1]): continue
elif sampledata[i][0] == 'Vaginal': newline[2] += 1
elif sampledata[i][0] == 'VBAC': newline[3] += 1
for pos in range (2,4):
if newline[pos] != 0 : newline[pos] = 1
newline[pos] = str(newline[pos])
outlist.append(','.join(newline))
return outlist
def main ():
data = opendata()
samplelist = opensamples()
outlist = match (data,samplelist)
save (outlist)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=print(preamble()))
parser.add_argument('-d', '--DB', nargs = 1, required=True, type=str, dest='in_file')
parser.add_argument('-p', '--pregnancies', nargs = 1, required=True, type=str, dest='sample_list')
args = parser.parse_args()
file = args.in_file
samples = args.sample_list
main() |
import numpy
print(numpy.max(numpy.min(numpy.array([input().split() for _ in range(int(input().split()[0]))],int),axis=1)))
|
#!/usr/bin/env python
#encoding=utf-8
import sys
import os.path
module_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(module_dir+'/../../lib/')
from tencent_ai_texsmart import *
print('##################################################')
print('# Example-2: Parsing text using options')
print('##################################################')
print('Stdout encoding: ' + sys.stdout.encoding)
print('Creating and initializing the NLU engine...')
engine = NluEngine(module_dir + '/../../data/nlu/kb/', 1)
#disable fine-grained NER:
print('Options: Enable NER but disable fine-grained NER')
options = '{"ner\":{"enable":true,"fine_grained":false}}'
print(u'=== 解析一个中文句子 ===')
output = engine.parse_text_ext(u"上个月30号,南昌王先生在自己家里边看流浪地球边吃煲仔饭", options)
print(u'Norm text: {0}'.format(output.norm_text()))
print(u'细粒度分词:')
for item in output.words():
print(u'\t{0}\t{1}\t{2}\t{3}'.format(item.str, item.offset, item.len, item.tag))
print(u'粗粒度分词:')
for item in output.phrases():
print(u'\t{0}\t{1}\t{2}\t{3}'.format(item.str, item.offset, item.len, item.tag))
print(u'命名实体识别(NER):')
for entity in output.entities():
print(u'\t{0}\t({1},{2})\t{3}\t{4}'.format(entity.str, entity.offset, entity.len, entity.type.name, entity.meaning))
|
n, m = list(map(int, input().split()))
relations = {}
for i in range(m):
a, b = list(map(int, input().split()))
if(relations.get(a) == None):
relations[a] = []
if(relations.get(b) == None):
relations[b] = []
relations[a].append(b)
relations[b].append(a)
result = False
def lookup(relations, path, current, visited):
global result
if(len(path) == 5):
result = True
return
if(relations.get(current)==None):
return
for i in relations[current]:
if(i == current or i in visited):
continue
lookup(relations, path+[i], i, visited+[i])
for i in range(n):
lookup(relations, [i], i, [i])
if(result):
break
if(result):
print(1)
else:
print(0)
|
from pynput.mouse import Button, Controller as MouseController
from pynput.keyboard import Key, KeyCode, Controller as KeyboardController
from pyautogui import typewrite
from time import sleep, time
from threading import Thread
class ClickEvent:
KEYWORD = "click"
MOUSE_LEFT = 1
MOUSE_RIGHT = 2
def __init__( self, x, y, button=1 ):
self.button = button
self.x = x
self.y = y
def consume( self, controller ):
controller.position = ( self.x, self.y )
controller.move( 0, 0 )
controller.press( Button.right if self.button == self.MOUSE_RIGHT else Button.left )
controller.release( Button.right if self.button == self.MOUSE_RIGHT else Button.left )
def to_string( self ):
return ",".join( [ self.KEYWORD, str( self.x ), str( self.y ), str( self.button ) ] )
class StringEvent:
def __init__( self, string ):
self.string = string
def consume( self, controller ):
for char in self.string:
keycode = KeyCode.from_char( char )
controller.press( keycode )
controller.release( keycode )
def to_string( self ):
return self.string
class TapEvent:
KEYWORD = "tap"
KEY_DOWN = 1
KEY_UP = 0
def __init__( self, keycode, motion=0 ):
self.keycode = keycode
self.motion = motion
def consume( self, controller ):
if self.motion == self.KEY_DOWN:
controller.press( self.keycode )
else:
controller.release( self.keycode )
def to_string( self ):
keycode = self.keycode
if isinstance( keycode, Key ):
keycode = str( keycode )
elif isinstance( keycode, KeyCode ):
keycode = keycode.char
return ",".join( [ self.KEYWORD, str( keycode ), str( self.motion ) ] )
class EventController ( Thread ):
def __init__( self, tasks=[] ):
Thread.__init__( self )
self.tasks = tasks
self.counter = 0
self.enabled = False
def run( self ):
if not len( self.tasks ):
return
keyboard = KeyboardController()
mouse = MouseController()
while self.enabled:
time, event = self.tasks[ self.counter ]
sleep( time )
if isinstance( event, ClickEvent ):
event.consume( mouse )
elif isinstance( event, TapEvent ) or isinstance( event, StringEvent ):
event.consume( keyboard )
self.counter = ( self.counter + 1 ) % len( self.tasks )
def disable( self ):
self.enabled = False
def enable( self ):
self.enabled = True
def load_auto_file( self, handle ):
self.tasks = []
fhandle = open( handle, "r" )
for line in fhandle:
attr = line.strip().split( "," )
if attr[ 1 ] == ClickEvent.KEYWORD:
self.tasks.append( ( float( attr[ 0 ] ), ClickEvent( int( attr[ 2 ] ), int( attr[ 3 ] ), int( attr[ 4 ] ) ) ) )
elif attr[ 1 ] == TapEvent.KEYWORD:
key = None
keys = dict( [ ( str( e ), e ) for e in Key ] )
if attr[ 2 ] in keys.keys():
key = keys[ attr[ 2 ] ]
else:
try:
key = KeyCode.from_char( attr[ 2 ] )
except:
key = None
if key:
self.tasks.append( ( float( attr[ 0 ] ), TapEvent( key , int( attr[ 3 ] ) ) ) )
fhandle.close()
def load_text_file( self, handle, size=70, interval=6 ):
self.tasks = []
string = ""
flush = False
with open( handle ) as infile:
for line in infile:
if not line.strip():
flush = True
words = line.strip().split( " " )
string = words.pop( 0 )
for word in words:
if len( string + " " + word ) < size:
string = string + " " + word
else:
flush = True
if flush:
self.tasks.append( ( interval, StringEvent( string ) ) )
self.tasks.append( ( 0, TapEvent( Key.enter, 1 ) ) )
self.tasks.append( ( 0, TapEvent( Key.enter, 0 ) ) )
string = ""
class EventRecorder:
def __init__( self ):
self.tasks = []
self.last = time()
def clear( self ):
self.tasks = []
self.last = time()
def record( self, event ):
now = time()
self.tasks.append( ( now - self.last if self.tasks else 0, event ) )
self.last = now
def save( self, handle ):
fhandle = open( handle, "w" )
for time, event in self.tasks:
fhandle.write( ",".join( [ str( time ), event.to_string() ] ) + "\n" )
fhandle.close()
def get_snapshot( self ):
return list( self.tasks )
|
__author__ = 'xiaoj'
#空间首页
from StoneUIFramework.public.common.basepage import Page
class _SPACEPAGE1(Page):
#定位:空间列表
def Kjlb(self):
self.Kjlb = self.p.get_element("id->com.yunlu6.stone:id/navi_item_zone","空间列表")
return self.Kjlb
# 空间列表-主菜单
def Kjlb_mainmenu(self):
self.Kjlb_mainmenu = self.p.get_element("id->com.yunlu6.stone:id/title_main_tv_more_menu","空间列表-主菜单")
return self.Kjlb_mainmenu
# 空间列表-浏览空间列表(通过ID查找)
def Kjlb_browseorgspaceByID(self):
self.Kjlb_browseorgspaceByID = self.p.get_elements("id->com.yunlu6.stone:id/zone_company_title","空间列表-浏览企业空间(通过ID查找)")
return self.Kjlb_browseorgspaceByID
# 空间列表-浏览空间(通过name查找)
def Kjlb_browseorgspaceByName(self,name):
self.Kjlb_browseorgspaceByName = self.p.get_element("name->%s"%name,"定位空间列表-浏览企业空间(通过Name查找)失败")
return self.Kjlb_browseorgspaceByName
# 空间列表-搜索按钮
def Kjlb_searchbutton(self):
self.Kjlb_searchbutton = self.p.get_element("id->com.yunlu6.stone:id/navi_item_zone","空间列表-搜索按钮")
return self.Kjlb_searchbutton
# 空间列表-搜索框
def Kjlb_searchspace(self):
self.Kjlb_searchspace = self.p.get_element("id->com.yunlu6.stone:id/edit_text","空间列表-搜索框")
return self.Kjlb_searchspace
# 空间列表-主菜单-'+机构空间'
def Kjlb_mainmenu_newspace(self):
self.Kjlb_mainmenu_newspace = self.p.get_element("id->com.yunlu6.stone:id/btn_new_space","定位空间列表-主菜单-'+机构空间'失败")
return self.Kjlb_mainmenu_newspace
# 空间列表-主菜单-'+私人空间'
def Kjlb_mainmenu_newpersonspace(self):
self.Kjlb_mainmenu_newpersonspaceP = self.p.get_element("id->com.yunlu6.stone:id/btn_new_person_space","空间列表-主菜单-'+私人空间'")
return self.Kjlb_mainmenu_newpersonspaceP
# 空间列表-主菜单-分享名片
def Kjlb_mainmenu_sharecard(self):
self.Kjlb_mainmenu_sharecard = self.p.get_element("id->com.yunlu6.stone:id/btn_share_space","空间列表-主菜单-分享名片")
return self.Kjlb_mainmenu_sharecard |
import json
import re
file = open('pokemon_full.json')
pokemon_full = file.read()
file.close()
print('1. Общее количество символов:', len(pokemon_full))
pokemon_non_prep = re.sub('[\w]', '', pokemon_full)
print('2. Количество символов без знаков препинания:', len(pokemon_full) - len(pokemon_non_prep))
pokemon_full_list = json.loads(pokemon_full)
max_ch = 0
name_t = ''
for char in pokemon_full_list:
max_ch = max(len(char['description']), max_ch)
if len(char['description']) == max_ch:
name_t = char['name']
print('3. Покемон с самым длинным описанием:', name_t)
col = 0
for skills in pokemon_full_list:
for skill in skills['abilities']:
col = max(len(skill.split()), col)
print('4. Умение(я) с самым большим количеством слов: ')
for skills in pokemon_full_list:
for skill in skills['abilities']:
if col == len(skill.split()):
print(skill) |
from flask import jsonify
from SSMSchema.ssmschema import customersearchesschema
from models.database import Database
import uuid
class CustomerSearches(object):
def __init__(self,customer_id,search,date,search_id=None):
self.search_id = uuid.uuid4().hex if search_id is None else search_id
self.customer_id = customer_id
self.search = search
self.date = date
def save_to_mongo(self):
if customersearchesschema.validate([self.json()]):
Database.insert(collection='customersearches',data=self.json())
res = {'search_id': self.search_id + "Added"}
result = jsonify({'result':res})
return result
else:
return "Schema not matched!"
def json(self):
return {'search_id': self.search_id,
'customer_id': self.customer_id,
'search': self.search,
'date': self.date
}
@staticmethod
def from_mongo(search_id):
return Database.find_one(collection='customersearches',data=self.json())
@staticmethod
def from_mongo_all_searches_of_customer(customer_id):
return Database.find(collection='customersearches',query={'customer_id':customer_id})
@staticmethod
def from_mongo_all_searches():
return Database.find(collection='customersearches',query={}) |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class AddressesConfig(AppConfig):
name = "project.addresses"
verbose_name = _("Address Book")
|
# -*- coding: utf-8 -*-
# @Time : 2019-09-16 21:19
# @Author : icarusyu
# @FileName: zhishu.py
# @Software: PyCharm
import math
def f():
n = int(input())
arr = [0,0]
# if n <3:return arr[n-1]
for i in range(2,n+1):
# 是合数
if simple_cnt(i):
for k in range(2, int(math.sqrt(i))+1):
if i % k ==0:
# print(i/k)
arr.append(arr[k] + arr[i//k])
break
else:arr.append(1)
# print(arr)
return sum(arr)
def simple_cnt(num):
fg = False
i = 2
while i <= int(math.sqrt(num)):
if num % i ==0:
fg = True
break
i +=1
return fg
print(f())
# print(simple_cnt(4))
|
from django.test import TestCase
from dojo.utils import set_duplicate
from dojo.management.commands.fix_loop_duplicates import fix_loop_duplicates
from dojo.models import Finding
import logging
deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication")
class TestDuplication(TestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
self.finding_a = Finding.objects.get(id=2)
self.finding_a.pk = None
self.finding_a.duplicate = False
self.finding_a.duplicate_finding = None
self.finding_a.save()
self.finding_b = Finding.objects.get(id=3)
self.finding_b.pk = None
self.finding_b.duplicate = False
self.finding_b.duplicate_finding = None
self.finding_b.save()
self.finding_c = Finding.objects.get(id=4)
self.finding_c.duplicate = False
self.finding_c.duplicate_finding = None
self.finding_c.pk = None
self.finding_c.save()
def tearDown(self):
if self.finding_a.id:
self.finding_a.delete()
if self.finding_b.id:
self.finding_b.delete()
if self.finding_c.id:
self.finding_c.delete()
# Set A as duplicate of B and check both directions
def test_set_duplicate_basic(self):
set_duplicate(self.finding_a, self.finding_b)
self.assertTrue(self.finding_a.duplicate)
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_b.id)
self.assertEqual(self.finding_b.duplicate_finding, None)
self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id)
self.assertEqual(self.finding_a.duplicate_finding_set().count(), 1)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 1)
self.assertEqual(self.finding_b.duplicate_finding_set().first().id, self.finding_a.id)
# A duplicate should not be considered to be an original for another finding
def test_set_duplicate_exception_1(self):
self.finding_a.duplicate = True
self.finding_a.save()
with self.assertRaisesRegex(Exception, "Existing finding is a duplicate"):
set_duplicate(self.finding_b, self.finding_a)
# A finding should never be the duplicate of itself
def test_set_duplicate_exception_2(self):
with self.assertRaisesRegex(Exception, "Can not add duplicate to itself"):
set_duplicate(self.finding_b, self.finding_b)
# Two duplicate findings can not be duplicates of each other as well
def test_set_duplicate_exception_3(self):
set_duplicate(self.finding_a, self.finding_b)
set_duplicate(self.finding_c, self.finding_b)
with self.assertRaisesRegex(Exception, "Existing finding is a duplicate"):
set_duplicate(self.finding_a, self.finding_c)
# Merge duplicates: If the original of a dupicate is now considered to be a duplicate of a new original the old duplicate should be appended too
def test_set_duplicate_exception_merge(self):
set_duplicate(self.finding_a, self.finding_b)
set_duplicate(self.finding_b, self.finding_c)
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.assertTrue(self.finding_b.duplicate)
self.assertTrue(self.finding_a.duplicate)
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_b.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_c.duplicate_finding, None)
self.assertEqual(self.finding_a.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_c.id)
# if a duplicate is deleted the original should still be present
def test_set_duplicate_exception_delete_1(self):
set_duplicate(self.finding_a, self.finding_b)
self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id)
self.finding_a.delete()
self.assertEqual(self.finding_a.id, None)
self.assertEqual(self.finding_b.original_finding.first(), None)
# if the original is deleted all duplicates should be deleted
def test_set_duplicate_exception_delete_2(self):
set_duplicate(self.finding_a, self.finding_b)
self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id)
self.finding_b.delete()
with self.assertRaises(Finding.DoesNotExist):
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.assertEqual(self.finding_b.id, None)
def test_loop_relations_for_one(self):
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_b
super(Finding, self.finding_b).save()
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
self.assertEqual(candidates, 1)
fix_loop_duplicates()
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
self.assertEqual(candidates, 0)
# if two findings are connected with each other the fix_loop function should detect and remove the loop
def test_loop_relations_for_two(self):
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
fix_loop_duplicates()
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
self.assertEqual(candidates, 0)
# Get latest status
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
if self.finding_a.duplicate_finding:
self.assertTrue(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 0)
else:
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 1)
if self.finding_b.duplicate_finding:
self.assertTrue(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 0)
else:
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 1)
# Similar Loop detection and deletion for three findings
def test_loop_relations_for_three(self):
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_c
self.finding_c.duplicate = True
self.finding_c.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
fix_loop_duplicates()
# Get latest status
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
if self.finding_a.duplicate_finding:
self.assertTrue(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 0)
else:
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 2)
if self.finding_b.duplicate_finding:
self.assertTrue(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 0)
else:
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 2)
if self.finding_c.duplicate_finding:
self.assertTrue(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 0)
else:
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 2)
# Another loop-test for 4 findings
def test_loop_relations_for_four(self):
self.finding_d = Finding.objects.get(id=4)
self.finding_d.pk = None
self.finding_d.duplicate = False
self.finding_d.duplicate_finding = None
self.finding_d.save()
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_c
self.finding_c.duplicate = True
self.finding_c.duplicate_finding = self.finding_d
self.finding_d.duplicate = True
self.finding_d.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
super(Finding, self.finding_d).save()
fix_loop_duplicates()
# Get latest status
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
self.finding_d = Finding.objects.get(id=self.finding_d.id)
if self.finding_a.duplicate_finding:
self.assertTrue(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 0)
else:
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 3)
if self.finding_b.duplicate_finding:
self.assertTrue(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 0)
else:
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 3)
if self.finding_c.duplicate_finding:
self.assertTrue(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 0)
else:
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 3)
if self.finding_d.duplicate_finding:
self.assertTrue(self.finding_d.duplicate)
self.assertEqual(self.finding_d.original_finding.count(), 0)
else:
self.assertFalse(self.finding_d.duplicate)
self.assertEqual(self.finding_d.original_finding.count(), 3)
# Similar Loop detection and deletion for three findings
def test_list_relations_for_three(self):
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_c
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
fix_loop_duplicates()
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
self.assertTrue(self.finding_b.duplicate)
self.assertTrue(self.finding_a.duplicate)
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_b.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_c.duplicate_finding, None)
self.assertEqual(self.finding_a.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 2)
def test_list_relations_for_three_reverse(self):
set_duplicate(self.finding_c, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
fix_loop_duplicates()
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
self.assertTrue(self.finding_b.duplicate)
self.assertTrue(self.finding_c.duplicate)
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_b.duplicate_finding.id, self.finding_a.id)
self.assertEqual(self.finding_c.duplicate_finding.id, self.finding_a.id)
self.assertEqual(self.finding_a.duplicate_finding, None)
self.assertEqual(self.finding_c.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os.path
from Store import Store
# from Writer import Writer
# from Reader import Reader
# from tools import ETimer
# lin
SCAN_PATH = "/home/nia/Music" # 130 - gz 2.6 k
SCAN_PATH = "/home/nia/Android" # 31105 - gz 504.8 k
SCAN_PATH = "/home/nia/Development/_Comcon" # 863285 - gz 15.5 m (14.9 m - strip keys)
# XFILE = "/home/nia/Development/_Python/_DCat/features_files/binvoldict.binvoldict"
XDB = "/home/nia/Development/_Python/_DCat/features_files"
# win
# flat - 9.59 kb
"""
74075
size - 2.66 Mb
time - 5 min
mem - 40 mb
"""
# SCAN_PATH = "E:\\Screens"
# SCAN_PATH = "E:\\_Comcon"
# XFILE = "E:\\Tmp\\binvoldict.binvoldict"
# #--- new objects
# etimer = ETimer()
# # Writer(SCAN_PATH, XFILE).start()
# # etimer.elapsed("write")
# Reader(XFILE).print_root_files()
# etimer.elapsed("read")
# # Reader(XFILE).print_tree()
# storew = Store()
# storew.make_db(SCAN_PATH, XFILE)
DB = "6_xml"
store = Store()
# store.create(XDB, DB)
store.open_db(os.path.join(XDB, DB))
# store.add_volume("vol1", SCAN_PATH)
store.read_volume("vol1", SCAN_PATH)
|
import pickle
import inflection
import pandas as pd
import numpy as np
import math
import time
import datetime
class Rossmann(object):
def __init__(self):
state = 1
self.home_path = 'C:/Users/Caio/Desktop/Caio/repos/data_science_em_producao/'
self.competition_distance_scaler = pickle.load(open(self.home_path + 'parameters/competition_distance_scaler.pkl','rb'))
self.competition_time_month_scaler = pickle.load(open(self.home_path + 'parameters/competition_time_month_scaler.pkl','rb'))
self.promo_time_week_scaler = pickle.load(open(self.home_path + 'parameters/promo_time_week_scaler.pkl','rb'))
self.year_scaler = pickle.load(open(self.home_path + 'parameters/year_scaler.pkl','rb'))
self.store_type_scaler = pickle.load(open(self.home_path + 'parameters/store_type_scaler.pkl','rb'))
def data_cleaning(self, df1,):
## 1.1. Rename Columns
#The idea here is to get agility on development through easy names on the columns
cols_old = ['Store', 'DayOfWeek', 'Date', 'Open', 'Promo',
'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore(x)
cols_new = list(map(snakecase, cols_old))
#rename
df1.columns = cols_new
## 1.3. Data Types
df1['date'] = pd.to_datetime(df1['date'])
## 1.5. Fillout NA
# competition_distance
# Here i'm fillin the NA's with a value that is much higher than the max value for competitor distance on the dataset
df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000 if math.isnan(x) else x)
# competition_open_since_month
# Here, I'm assuming that is important to have this information filled (M02_V02_9min)
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month'])
else x['competition_open_since_month'], axis=1)
# competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year'])
else x['competition_open_since_year'], axis=1)
# promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1)
# promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1)
month_map = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun',7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}
# fill na's with 0 to avoid the comparison using 'isnan'
df1['promo_interval'].fillna(0,inplace=True)
# extract the month of the 'date' column and apply the dictionary created above to use as future comparison.
df1['month_map'] = df1['date'].dt.month.map(month_map)
# verifying if the store is participating in the promo, based on column 'date', represented by 'month_map'
df1['is_promo'] = df1[['promo_interval','month_map']].apply(lambda x: 0 if x['promo_interval'] == 0
else 1 if x['month_map'] in x['promo_interval'].split(',')
else 0, axis=1)
## 1.6. Change Types
#It's important to verify if the types are correct after many modifications on the variables
# These variables were float64.
# competition
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype('int64')
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype('int64')
# promo2
df1['promo2_since_week'] = df1['promo2_since_week'].astype('int64')
df1['promo2_since_year'] = df1['promo2_since_year'].astype('int64')
return df1
def feature_engineering(self, df2):
## 2.4. Feature Engineering
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.isocalendar().week
# year week
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
# competition since
# gather 'competition_open_since_year' and 'competition_open_since_month' together and then
# subtracting it to 'date' so we can obtain how many months have passed for each store since
# competitions opened
df2['competition_since'] = df2.apply(lambda x: datetime.datetime(year=x['competition_open_since_year'],
month=x['competition_open_since_month'],
day=1), axis=1)
# dividing by 30 so we can obtain the result as months
df2['competition_time_month'] = ( (df2['date'] - df2['competition_since'])/30 ).apply(lambda x: x.days).astype(int)
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
# now we have to convert the 'promo_since' to datetime. This method is explained
# on the bonus video, that is not launched at the moment (26/03)
df2['promo_since'] = df2['promo_since'].apply(lambda x: datetime.datetime.strptime(x + '-1', '%Y-%W-%w' ) - datetime.timedelta(days = 7) )
df2['promo_time_week'] = ( (df2['date'] - df2['promo_since'] )/7 ).apply(lambda x: x.days ).astype(int)
# assortment
df2['assortment'] = df2['assortment'].apply(lambda x: 'basic' if x == 'a' else 'extra' if x == 'b'
else 'extended')
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply(lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b'
else 'christmas' if x == 'c' else 'regular_day')
# 3.0. Filtragem de Variáveis
## 3.1. Filtragem das Linhas
# stores open only
df2 = df2[df2['open'] != 0]
## 3.2. Seleção das colunas
#'customers' é uma restrição do negócio, pois não teremos o input no momento da análise de quantas pessoas
# estarão nas lojas nas próximas 6 semanas
# a coluna 'open' não será mais necessária pois estará preenchida totalmente apenas com o valor '1'
# 'promo_interval' e 'month_map' são variáveis auxiliares, e não mais necessárias
cols_drop = ['open','promo_interval','month_map']
df2 = df2.drop(cols_drop, axis=1)
return df2
def data_preparation(self, df5):
## 5.2. Rescaling
# competition distance
# We inserted the outliers on 'fillot NA'. So, we use the 'Robust Scaler' method.
# In this case, we are 'calling' the pickle archive inside the function we are creating.
df5['competition_distance'] = self.competition_distance_scaler.fit_transform(df5[['competition_distance']].values)
# competition time month - Robust Scaler method (a lot of outliers)
df5['competition_time_month'] = self.competition_time_month_scaler.fit_transform(df5[['competition_time_month']].values)
# promo time week - 'MinMaxScaler'
df5['promo_time_week'] = self.promo_time_week_scaler.fit_transform(df5[['promo_time_week']].values)
# year - MinMaxScaler
df5['year'] = self.year_scaler.fit_transform(df5[['year']].values)
### 5.3.1. Encoding
# state holiday - One Hot Encoding
df5 = pd.get_dummies(df5, prefix=['state_holiday'], columns=['state_holiday'])
# store type - Label Encoding
df5['store_type'] = self.store_type_scaler.fit_transform(df5['store_type'])
# assortment - Ordinal Encoding
assortment_dict = {'basic':1,
'extra':2,
'extended':3}
df5['assortment'] = df5['assortment'].map(assortment_dict)
### 5.3.3. Nature Transformation
# day of week
df5['day_of_week_sin'] = df5['day_of_week'].apply(lambda x: np.sin(x*(2*np.pi/7)))
df5['day_of_week_cos'] = df5['day_of_week'].apply(lambda x: np.cos(x*(2*np.pi/7)))
# day
df5['day_sin'] = df5['day'].apply(lambda x: np.sin(x*(2*np.pi/30)))
df5['day_cos'] = df5['day'].apply(lambda x: np.cos(x*(2*np.pi/30)))
# month
df5['month_sin'] = df5['month'].apply(lambda x: np.sin(x*(2*np.pi/12)))
df5['month_cos'] = df5['month'].apply(lambda x: np.cos(x*(2*np.pi/12)))
# week of year
df5['week_of_year_sin'] = df5['week_of_year'].apply(lambda x: np.sin(x*(2*np.pi/52)))
df5['week_of_year_cos'] = df5['week_of_year'].apply(lambda x: np.cos(x*(2*np.pi/52)))
cols_selected_boruta = ['store','promo','store_type','assortment','competition_distance',
'competition_open_since_month','competition_open_since_year','promo2','promo2_since_week',
'promo2_since_year','competition_time_month','promo_time_week','day_of_week_sin','day_of_week_cos',
'month_sin','month_cos','day_sin','day_cos','week_of_year_sin','week_of_year_cos']
return df5[cols_selected_boruta]
def get_prediction(self, model, original_data, test_data):
# prediction
pred = model.predict(test_data)
# join pred into the original data so the users can view all data
original_data['prediction'] = np.expm1(pred)
return original_data.to_json(orient = 'records', date_format = 'iso')
|
from turtle import *
pencolor("red")
for i in range(4):
forward(90)
left(90)
for i in range(7):
forward(90)
if i<6:
left(60)
pencolor("blue")
for i in range(2):
left(120)
forward(90)
left(228)
for j in range(4):
forward(90)
right(72)
mainloop() |
from q17_and_18 import *
import numpy as np
"""
This program solve the high-dimension decision stump problem.
The answer is shown below.
Author: SunnerLi
Finish: 19/10/2016
"""
# Compute the result of specific hypothesis
H = lambda s, x: s if x - theta > 0 else -s
# Variable
trainRowNumber = 100 # The number of row in training data
testRowNumber = 1000 # The number of row in testing data
dimNumber = 9 # The number of feature in rows
# Array
trainX = np.ndarray([trainRowNumber, dimNumber]) # The training data
trainY = np.ndarray([trainRowNumber]) # The training tag
testX = np.ndarray([testRowNumber, dimNumber]) # The testing data
testY = np.ndarray([testRowNumber]) # The testing tag
# File name
trainFileName = 'hw2_train.dat' # The file name of training data
testFileName = 'hw2_test.dat' # The file name of testing data
def read():
"""
Read the training and testing data
"""
global trainX
global trainY
global testX
global testY
# Deal with training data
count = 0
with open(trainFileName, 'r') as f:
while True:
rawData = f.readline().split(' ')
rawData = rawData[1:]
rawData[-1] = rawData[-1][:len(rawData[-1])-1]
for i in range(dimNumber):
trainX[count][i] = rawData[i]
trainY[count] = rawData[-1]
count += 1
if count == trainRowNumber:
break
# Deal with testing data
count = 0
with open(testFileName, 'r') as f:
while True:
rawData = f.readline().split(' ')
rawData = rawData[1:]
rawData[-1] = rawData[-1][:len(rawData[-1])-1]
for i in range(dimNumber):
testX[count][i] = rawData[i]
testY[count] = rawData[-1]
count += 1
if count == testRowNumber:
break
def sort(dimIndex, x, y, _size):
"""
Sort by x and y would swap as well
Arg: The flag of dimension which would be considered,
data, tags and the number of rows
Ret: The ordered data and flags
"""
for i in range(_size):
for j in range(i, _size):
if x[i][dimIndex] > x[j][dimIndex]:
x[[i, j]] = x[[j, i]]
y[[i, j]] = y[[j, i]]
return x, y
def Ein(dimIndex, s):
"""
Compute the error-in-rate for the specific s
Arg: s described in the function
Ret: The value of Ein
"""
errorTime = 0
for i in range(size):
if not H(s, trainX[i][dimIndex]) == trainY[i]:
errorTime += 1
return float(errorTime) / size
def train(dimIndex):
"""
Try to find the minimun Ein of hypothesis with the corresponding dimension index
Arg: The dimension index that want to consult
Ret: The minimun Ein, with the corresponding s and theta
"""
global trainX
global trainY
minEin = 1.0
minTheta = 1.0
minS = 0
trainX, trainY = sort(dimIndex, trainX, trainY, trainRowNumber)
#print trainX[:][dimIndex]
minEin, minTheta, minS = find(dimIndex, 1, minEin, minTheta, minS)
minEin, minTheta, minS = find(dimIndex, -1, minEin, minTheta, minS)
return minEin, minS, minTheta
def find(dimIndex, s, minEin, minTheta, minS):
"""
For each probable theta, test the Ein and find the minimun parameter
Arg: The s want to test,
The original minimun Ein and the corresponding theta and s
Ret: The final minimun Ein and the corresponding theta and s
"""
global theta
for i in range(size):
if i == 0:
theta = ( -1 + trainX[ 0][dimIndex] ) / 2
elif i == size - 1:
theta = ( 1 + trainX[-1][dimIndex] ) / 2
else:
theta = ( trainX[i][dimIndex] + trainX[i-1][dimIndex] ) / 2
if minEin > Ein(dimIndex, s):
minEin, minTheta, minS = Ein(dimIndex, s), theta, s
return minEin, minTheta, minS
def test(dimIndex, minTheta, minS):
"""
Testing the hypothesis with the result
Arg: The paramter that we gain at train function
Ret: The Eout
"""
# Initialize the variable
global testX
global testY
global theta
global s
theta = minTheta
s = minS
testX, testY = sort(dimIndex, testX, testY, testRowNumber)
# Testing
errorTime = 0
for i in range(testRowNumber):
if not H(s, testX[i][dimIndex]) == testY[i]:
errorTime += 1
return float(errorTime) / testRowNumber
if __name__ == "__main__":
minDim = 10
minEin = 1
minS = 2
minTheta = 1
# Find the best of best
read()
for i in range(dimNumber):
_Ein, _s, _theta = train(i)
print "Dimension: ", i, "\tEin: ", _Ein
if _Ein < minEin:
minEin, minS, minTheta, minDim = _Ein, _s, _theta, i
# Show the result
print ""
print "(Ans 19)\tmin Dimension: ", minDim, '\t\tmin Ein: ', minEin
minEout = test(minDim, minTheta, minS)
print "(Ans 20)\tEout: ", minEout |
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
#I0 = 3.56
#F0 = 61.04*2*np.pi
data = np.genfromtxt('current1.2A.csv', delimiter=',', names=['t', 'X', 'J', 'I'])
def friction(x, a):
integral = [0]
for i in range(len(x)-1):
integral.append(integral[-1] + (a)*(x[i+1]-x[i]))
integral2 = [0]
for i in range(len(x)-1):
if (x[i]>0.4):
integral2.append(data['X'][i])
else:
integral2.append(integral2[-1] + integral[i]*(x[i+1]-x[i]))
return integral2
V = [0]
for i in range(len(data['t'])-1):
V.append((data['X'][i+1]-data['X'][i])/(data['t'][i+1]-data['t'][i]))
print(V)
[a] = curve_fit(friction, data['t'], data['X'])[0]
print(a)
ffig = plt.figure()
aax1 = fig.add_subplot(111)
aax1.set_title("Time constant fitting")
aax1.set_xlabel('Time, sec')
aax1.set_ylabel('Current (A), cart position (m)')
ax1.plot(data['t'], data['I'], color='r', label='real current')
aax1.plot(data['t'], data['X'], color='g', label='cart position')
ax1.plot(data['t'], V , color='b', label='cart speed')
mmodel=friction(data['t'], a)
aax1.plot(data['t'], model, color='r', label='fitted curve')
aax1.legend()
plt.show()
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import numpy as np
import tensorflow as tf
from runtime import db
from runtime.tensorflow.get_tf_model_type import is_tf_estimator
from runtime.tensorflow.get_tf_version import tf_is_version2
from runtime.tensorflow.import_model import import_model
from runtime.tensorflow.input_fn import (get_dtype,
parse_sparse_feature_predict,
tf_generator)
from runtime.tensorflow.keras_with_feature_column_input import \
init_model_with_feature_column
# Disable TensorFlow INFO and WARNING logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Disable TensorFlow INFO and WARNING logs
if tf_is_version2():
import logging
tf.get_logger().setLevel(logging.ERROR)
else:
tf.logging.set_verbosity(tf.logging.ERROR)
def keras_predict(estimator, model_params, save, result_table,
feature_column_names, feature_metas, train_label_name,
result_col_name, conn, predict_generator, selected_cols):
classifier = init_model_with_feature_column(estimator, model_params)
def eval_input_fn(batch_size, cache=False):
feature_types = []
for name in feature_column_names:
# NOTE: vector columns like 23,21,3,2,0,0 should use shape None
if feature_metas[name]["is_sparse"]:
feature_types.append((tf.int64, tf.int32, tf.int64))
else:
feature_types.append(get_dtype(feature_metas[name]["dtype"]))
tf_gen = tf_generator(predict_generator, selected_cols,
feature_column_names, feature_metas)
dataset = tf.data.Dataset.from_generator(tf_gen,
(tuple(feature_types), ))
ds_mapper = functools.partial(
parse_sparse_feature_predict,
feature_column_names=feature_column_names,
feature_metas=feature_metas)
dataset = dataset.map(ds_mapper).batch(batch_size)
if cache:
dataset = dataset.cache()
return dataset
if not hasattr(classifier, 'sqlflow_predict_one'):
# NOTE: load_weights should be called by keras models only.
# NOTE: always use batch_size=1 when predicting to get the pairs of
# features and predict results to insert into result table.
pred_dataset = eval_input_fn(1)
one_batch = next(iter(pred_dataset))
# NOTE: must run predict one batch to initialize parameters. See:
# https://www.tensorflow.org/alpha/guide/keras/saving_and_serializing#saving_subclassed_models # noqa: E501
classifier.predict_on_batch(one_batch)
classifier.load_weights(save)
pred_dataset = eval_input_fn(1, cache=True).make_one_shot_iterator()
column_names = selected_cols[:]
try:
train_label_index = selected_cols.index(train_label_name)
except: # noqa: E722
train_label_index = -1
if train_label_index != -1:
del column_names[train_label_index]
column_names.append(result_col_name)
with db.buffered_db_writer(conn, result_table, column_names, 100) as w:
for features in pred_dataset:
if hasattr(classifier, 'sqlflow_predict_one'):
result = classifier.sqlflow_predict_one(features)
else:
result = classifier.predict_on_batch(features)
# FIXME(typhoonzero): determine the predict result is
# classification by adding the prediction result together
# to see if it is close to 1.0.
if len(result[0]) == 1: # regression result
result = result[0][0]
else:
sum = 0
for i in result[0]:
sum += i
if np.isclose(sum, 1.0): # classification result
result = result[0].argmax(axis=-1)
else:
result = result[0] # multiple regression result
row = []
for idx, name in enumerate(feature_column_names):
val = features[name].numpy()[0][0]
row.append(str(val))
if isinstance(result, np.ndarray):
if len(result) > 1:
# NOTE(typhoonzero): if the output dimension > 1, format
# output tensor using a comma separated string. Only
# available for keras models.
row.append(",".join([str(i) for i in result]))
else:
row.append(str(result[0]))
else:
row.append(str(result))
w.write(row)
del pred_dataset
def write_cols_from_selected(result_col_name, selected_cols):
write_cols = selected_cols[:]
if result_col_name in selected_cols:
target_col_index = selected_cols.index(result_col_name)
del write_cols[target_col_index]
else:
target_col_index = -1
# always keep the target column to be the last column
# on writing prediction result
write_cols.append(result_col_name)
return write_cols, target_col_index
def estimator_predict(estimator, model_params, save, result_table,
feature_column_names, feature_column_names_map,
feature_columns, feature_metas, train_label_name,
result_col_name, conn, predict_generator, selected_cols):
write_cols = selected_cols[:]
try:
train_label_index = selected_cols.index(train_label_name)
except ValueError:
train_label_index = -1
if train_label_index != -1:
del write_cols[train_label_index]
write_cols.append(result_col_name)
# load from the exported model
with open("exported_path", "r") as fn:
export_path = fn.read()
if tf_is_version2():
imported = tf.saved_model.load(export_path)
else:
imported = tf.saved_model.load_v2(export_path)
def add_to_example(example, x, i):
feature_name = feature_column_names[i]
dtype_str = feature_metas[feature_name]["dtype"]
if feature_metas[feature_name]["delimiter"] != "":
# NOTE(typhoonzero): sparse feature will get
# (indices,values,shape) here, use indices only
values = x[0][i][0].flatten()
if dtype_str == "float32" or dtype_str == "float64":
example.features.feature[feature_name].float_list.value.extend(
list(values))
elif dtype_str == "int32" or dtype_str == "int64":
example.features.feature[feature_name].int64_list.value.extend(
list(values))
else:
if "feature_columns" in feature_columns:
idx = feature_column_names.index(feature_name)
fc = feature_columns["feature_columns"][idx]
else:
# DNNLinearCombinedXXX have dnn_feature_columns and
# linear_feature_columns param.
idx = -1
try:
idx = feature_column_names_map[
"dnn_feature_columns"].index(feature_name)
fc = feature_columns["dnn_feature_columns"][idx]
except: # noqa: E722
try:
idx = feature_column_names_map[
"linear_feature_columns"].index(feature_name)
fc = feature_columns["linear_feature_columns"][idx]
except: # noqa: E722
pass
if idx == -1:
raise ValueError(
"can not found feature %s in all feature columns")
if dtype_str == "float32" or dtype_str == "float64":
# need to pass a tuple(float, )
example.features.feature[feature_name].float_list.value.extend(
(float(x[0][i][0]), ))
elif dtype_str == "int32" or dtype_str == "int64":
numeric_type = type(tf.feature_column.numeric_column("tmp"))
if type(fc) == numeric_type:
example.features.feature[
feature_name].float_list.value.extend(
(float(x[0][i][0]), ))
else:
example.features.feature[
feature_name].int64_list.value.extend(
(int(x[0][i][0]), ))
elif dtype_str == "string":
example.features.feature[feature_name].bytes_list.value.extend(
x[0][i])
def predict(x):
example = tf.train.Example()
for i in range(len(feature_column_names)):
add_to_example(example, x, i)
return imported.signatures["predict"](
examples=tf.constant([example.SerializeToString()]))
with db.buffered_db_writer(conn, result_table, write_cols, 100) as w:
for row, _ in predict_generator():
features = db.read_features_from_row(row, selected_cols,
feature_column_names,
feature_metas)
result = predict((features, ))
if train_label_index != -1 and len(row) > train_label_index:
del row[train_label_index]
if "class_ids" in result:
row.append(str(result["class_ids"].numpy()[0][0]))
else:
# regression predictions
row.append(str(result["predictions"].numpy()[0][0]))
w.write(row)
def pred(datasource,
estimator_string,
select,
result_table,
feature_columns,
feature_column_names,
feature_column_names_map,
train_label_name,
result_col_name,
feature_metas={},
model_params={},
save="",
batch_size=1):
estimator = import_model(estimator_string)
model_params.update(feature_columns)
is_estimator = is_tf_estimator(estimator)
conn = db.connect_with_data_source(datasource)
predict_generator = db.db_generator(conn, select)
selected_cols = db.selected_cols(conn, select)
if not is_estimator:
if not issubclass(estimator, tf.keras.Model):
# functional model need field_metas parameter
model_params["field_metas"] = feature_metas
print("Start predicting using keras model...")
keras_predict(estimator, model_params, save, result_table,
feature_column_names, feature_metas, train_label_name,
result_col_name, conn, predict_generator, selected_cols)
else:
model_params['model_dir'] = save
print("Start predicting using estimator model...")
estimator_predict(estimator, model_params, save, result_table,
feature_column_names, feature_column_names_map,
feature_columns, feature_metas, train_label_name,
result_col_name, conn, predict_generator,
selected_cols)
print("Done predicting. Predict table : %s" % result_table)
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class RBFNet(tf.keras.Model):
def __init__(self, n, input_dim = 1, output_dim = 1):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.n = n
self.layer_1 = layers.Dense(n, name="dense_1")
self.layer_2 = layers.Dense(output_dim, name="dense_2")
def train(self, x):
# x: [batch_size, number, input_dim]
assert len(x.shape) == 3, "error: " + str(x.shape)
assert x.shape[-1] == self.input_dim, "error: " + str(x.shape) + " " + str(self.input_dim)
x = self.layer_1(x) # [batch_size, number, n]
assert x.shape[-1] == n
x = tf.exp(-tf.pow(x, 2) / 2)
x = self.layer_2(x) # [batch_size, number, 1]
assert x.shape[-1] == self.output_dim
return x
def test(self, x):
# x: [number, input_dim]
x = self.layer_1(x) # [number, n]
assert x.shape[-1] == self.n
x = tf.exp(-tf.pow(x, 2) / 2)
x = self.layer_2(x) # [number, 1]
assert x.shape[-1] == self.output_dim
return x
def call(self, x):
#if is_training:
# return self.train(x)
#else:
# return self.test(x)
assert x.shape[-1] == self.input_dim, "error: " + str(x.shape) + " " + str(self.input_dim)
x = self.layer_1(x) # [batch_size, number, n]
assert x.shape[-1] == self.n
x = tf.exp(-tf.pow(x, 2) / 2)
x = self.layer_2(x) # [batch_size, number, 1]
assert x.shape[-1] == self.output_dim
return x
def get_data_min_max(x):
# x : list
x = tf.constant(x, dtype=tf.float32)
x = tf.expand_dims(x, axis=-1)
x_min = tf.expand_dims(tf.reduce_min(x), axis=-1)
x_max = tf.expand_dims(tf.reduce_max(x), axis=-1)
return x_min, x_max
def norm_data(x, x_min, x_max):
# x: list
x = tf.constant(x, dtype=tf.float32)
x = tf.expand_dims(x, axis=-1)
return (x-x_min) / (x_max-x_min)
def inv_norm_data(x, x_min, x_max):
# x: tensor
return x * (x_max-x_min) + x_min
def train_model(x_train, y_train, x_min, x_max, y_min, y_max):
global model
x_train = norm_data(x_train, x_min, x_max)
y_train = norm_data(y_train, y_min, y_max)
epochs = 100
ret = model.fit(x_train, y_train, epochs=epochs, verbose=0)
loss = ret.history["loss"][-1]
return epochs, loss
def test_model(x_test, x_min, x_max, y_min, y_max):
global model
x_test = norm_data(x_test, x_min, x_max)
# predict
y_test = model(x_test)
y_test = inv_norm_data(y_test, y_min, y_max)
y_test = y_test[..., 0]
y_test = np.array(y_test).tolist()
return y_test
model = RBFNet(30)
model.compile(loss="mean_squared_error")
def get_new_model(n):
global model
model = RBFNet(n)
model.compile(loss="mean_squared_error")
def draw(x_train, y_train, x_test, y_test):
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
plt.plot(x_train, y_train, ".y", color="r")
plt.plot(x_test, y_test, color="b")
plt.show()
if __name__ == "__main__":
#x_train = [-20, -10.0, 10.0, 30.0, 30.5, 50.0]
#y_train = [10, 10.2, 10.5, -40.5, 20.3, -10.6]
#x_test = [i for i in range(-50, 50)]
#x_train = [-100, -50, 0.0, 50, 100]
#y_train = [100, -100, -100, 100, 100]
#x_test = [ i for i in range(-100, 101) ]
x_train = [115, 393, 546, 810]
y_train = [182, 340, 126, 204]
x_test = [i for i in range(0, 1000)]
x_min, x_max = get_data_min_max(x_train)
y_min, y_max = get_data_min_max(y_train)
#for i in range(30):
# train_model(x_train, y_train, x_min, x_max, y_min, y_max)
# y_test = test_model(x_test, x_min, x_max, y_min, y_max)
# draw(x_train, y_train, x_test, y_test)
epo, loss = train_model(x_train, y_train, x_min, x_max, y_min, y_max)
print(epo, loss)
#print(train_info.params)
#print(train_info.history.keys())
#print(train_info.history["loss"])
y_test = test_model(x_test, x_min, x_max, y_min, y_max)
draw(x_train, y_train, x_test, y_test)
print(y_test, type(y_test))
print(y_test[0], type(y_test[0]))
|
import networkx as nx
from subgraph_generator import gen_subgraph_list
def get_api_info():
with open('Sensitive') as f:
lines = f.readlines()
lines = [line.strip().split('#') for line in lines]
api_list = [line[0] for line in lines]
api_coeff = {line[0]: float(line[1]) for line in lines}
return api_list, api_coeff
def merge_subgraphs(subgraph_list, common_api_list):
combining = True
while combining:
combining = False
num_of_subgraphs = len(subgraph_list)
for i in range(num_of_subgraphs):
for j in range(i + 1, num_of_subgraphs):
G = subgraph_list[i]
H = subgraph_list[j]
G_sensitive_api = list(set(common_api_list).intersection(set(G.nodes())))
H_sensitive_api = list(set(common_api_list).intersection(set(H.nodes())))
if set(G_sensitive_api).intersection(set(H_sensitive_api)):
I = nx.compose(G,H)
subgraph_list = list((set(subgraph_list) | set([I])) - set([G,H]))
combining = True
break
if num_of_subgraphs != len(subgraph_list):
break
def subgraph_sensitivity(graph, api_list, sen_coeff):
common_api = list(set(api_list).intersection(set(graph.nodes())))
return sum(map(lambda api: sen_coeff[api], common_api))
def gen_sensitive_subgraph(call_graph):
api_list, api_coeff = get_api_info()
common_api = list(set(api_list).intersection(set(call_graph.nodes())))
subgraph_list = gen_subgraph_list(call_graph, common_api)
merge_subgraphs(subgraph_list, common_api)
try:
sensitive_subgraph = max(subgraph_list, key = lambda graph: subgraph_sensitivity(graph, common_api, api_coeff))
except:
sensitive_subgraph = []
return sensitive_subgraph |
print("Please wait while we import some important libraries and models to run the software.")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
print("*")
from tkinter import Tk
print("*")
from tkinter.filedialog import askopenfilename
print("*")
from time import sleep
print("*")
import pandas as pd
print("*")
import numpy as np
print("*")
import cv2
print("*")
from keras.models import load_model
print("*")
annmodel = load_model(r'C:\git\Breast-Cancer-Detection-Using-CNN\results\ANN.h5')
print("*")
base_cnnmodel = load_model(r'C:\git\Breast-Cancer-Detection-Using-CNN\results\base_CNN.h5')
print("*")
final_cnnmodel = load_model(r'C:\git\Breast-Cancer-Detection-Using-CNN\results\final_CNN.h5')
print("*")
print("The libraries and models are imported successfully.")
sleep(4)
os.system('cls')
n=100
print("*"*n)
print("|"," "*(n-4),"|")
print("|"," "*23,"Welcome to the Breast Cancer Detection Software", " "*24,"|")
print("|"," "*30,"made by Jai, Arishti and Tushar"," "*33,"|")
print("|"," "*(n-4),"|")
print("*"*n)
sleep(2)
input("\n\nPress any key to continue.\n")
status = 1
while(status!=2):
os.system('cls')
sleep(0.75)
print("Please select an image to detect:\n")
sleep(1)
Tk().withdraw()
img_path = askopenfilename()
sleep(1)
#img_name = input("Enter image name: ")
#root_path = "C:\\git\\Breast-Cancer-Detection-Using-CNN\\results"
#img_path = root_path + img_name
img = cv2.imread(img_path)
print("The image is loaded successfully.")
sleep(2)
print("\nPress")
sleep(0.25)
opt = input("1: View image.\n2: Detect Breast Cancer.\n3: Select a new image\n")
sleep(0.5)
if opt == "1":
print("\nDisplaying image.\n")
sleep(1)
cv2.imshow('Image Specimen', cv2.resize(img,(250,250)))
sleep(1.25)
print("Close image to proceed.\n\n")
cv2.waitKey(0)
while(opt=="1"):
print("\nPress")
sleep(0.25)
opt = input("1: View image again.\n2: Detect Breast Cancer.\n3: Select a new image\n")
sleep(0.5)
if opt == "1":
print("\nDisplaying image.\n")
sleep(1)
cv2.imshow('Image Specimen', cv2.resize(img,(250,250)))
sleep(1)
print("Close image to proceed.\n\n")
cv2.waitKey(0)
if opt not in ["1","2","3"]:
opt="1"
if opt != "3":
sleep(1)
print("\nProcessing...\n")
sleep(4)
img = cv2.resize(img, (50,50), interpolation=cv2.INTER_CUBIC)
test_input = img/255.0
test_input = np.array([test_input,])
annpred = annmodel.predict(test_input).argmax()
base_cnnpred = base_cnnmodel.predict(test_input).argmax()
final_cnnpred = final_cnnmodel.predict(test_input).argmax()
label = img_path[-5]
if label not in ["0","1"]:
label = "unknown"
input("Press Enter to view Results")
sleep(1)
print("\nHere are our predictions:")
sleep(2)
print('\nPredicted Value using ann model =',annpred)
sleep(2)
print('\nPredicted Value using base cnn model =',base_cnnpred)
sleep(2)
print('\nPredicted Value using final cnn model =',final_cnnpred)
sleep(2)
print("\nTrue Value =",label)
sleep(1)
if label == "unknown":
print("True value is unknown.")
result = "benign" if final_cnnpred == "0" else "malignant"
print("\nPredicted value",final_cnnpred,"means the sample tested is",result)
else:
result = "benign" if label == "0" else "malignant"
print("\n",label,"means the sample tested is",result)
sleep(5)
status = input("\nPress \n 1 : Select a new image. \n 2 : Exit.\n")
while status not in ["1", "2"]:
status = input("\nWrong input.\nPress \n 1 : Select a new image. \n 2 : Exit.\n")
status = 2 if status!= "1" else 1
print(status)
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tarfile
import mock
from nova import test
from nova.virt.xenapi.image import utils
@mock.patch.object(utils, 'IMAGE_API')
class GlanceImageTestCase(test.NoDBTestCase):
def _get_image(self):
return utils.GlanceImage(mock.sentinel.context,
mock.sentinel.image_ref)
def test_meta(self, mocked):
mocked.get.return_value = mock.sentinel.meta
image = self._get_image()
self.assertEqual(mock.sentinel.meta, image.meta)
mocked.get.assert_called_once_with(mock.sentinel.context,
mock.sentinel.image_ref)
def test_download_to(self, mocked):
mocked.download.return_value = None
image = self._get_image()
result = image.download_to(mock.sentinel.fobj)
self.assertIsNone(result)
mocked.download.assert_called_once_with(mock.sentinel.context,
mock.sentinel.image_ref,
mock.sentinel.fobj)
def test_is_raw_tgz_empty_meta(self, mocked):
mocked.get.return_value = {}
image = self._get_image()
self.assertEqual(False, image.is_raw_tgz())
def test_is_raw_tgz_for_raw_tgz(self, mocked):
mocked.get.return_value = {
'disk_format': 'raw',
'container_format': 'tgz'
}
image = self._get_image()
self.assertEqual(True, image.is_raw_tgz())
def test_data(self, mocked):
mocked.download.return_value = mock.sentinel.image
image = self._get_image()
self.assertEqual(mock.sentinel.image, image.data())
class RawImageTestCase(test.NoDBTestCase):
def test_get_size(self):
glance_image = self.mox.CreateMock(utils.GlanceImage)
glance_image.meta = {'size': '123'}
raw_image = utils.RawImage(glance_image)
self.mox.ReplayAll()
self.assertEqual(123, raw_image.get_size())
def test_stream_to(self):
glance_image = self.mox.CreateMock(utils.GlanceImage)
glance_image.download_to('file').AndReturn('result')
raw_image = utils.RawImage(glance_image)
self.mox.ReplayAll()
self.assertEqual('result', raw_image.stream_to('file'))
class TestIterableBasedFile(test.NoDBTestCase):
def test_constructor(self):
class FakeIterable(object):
def __iter__(_self):
return 'iterator'
the_file = utils.IterableToFileAdapter(FakeIterable())
self.assertEqual('iterator', the_file.iterator)
def test_read_one_character(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
self.assertEqual('c', the_file.read(1))
def test_read_stores_remaining_characters(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
the_file.read(1)
self.assertEqual('hunk1', the_file.remaining_data)
def test_read_remaining_characters(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
self.assertEqual('c', the_file.read(1))
self.assertEqual('h', the_file.read(1))
def test_read_reached_end_of_file(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
self.assertEqual('chunk1', the_file.read(100))
self.assertEqual('chunk2', the_file.read(100))
self.assertEqual('', the_file.read(100))
def test_empty_chunks(self):
the_file = utils.IterableToFileAdapter([
'', '', 'chunk2'
])
self.assertEqual('chunk2', the_file.read(100))
class RawTGZTestCase(test.NoDBTestCase):
def test_as_tarfile(self):
image = utils.RawTGZImage(None)
self.mox.StubOutWithMock(image, '_as_file')
self.mox.StubOutWithMock(utils.tarfile, 'open')
image._as_file().AndReturn('the_file')
utils.tarfile.open(mode='r|gz', fileobj='the_file').AndReturn('tf')
self.mox.ReplayAll()
result = image._as_tarfile()
self.assertEqual('tf', result)
def test_as_file(self):
self.mox.StubOutWithMock(utils, 'IterableToFileAdapter')
glance_image = self.mox.CreateMock(utils.GlanceImage)
image = utils.RawTGZImage(glance_image)
glance_image.data().AndReturn('iterable-data')
utils.IterableToFileAdapter('iterable-data').AndReturn('data-as-file')
self.mox.ReplayAll()
result = image._as_file()
self.assertEqual('data-as-file', result)
def test_get_size(self):
tar_file = self.mox.CreateMock(tarfile.TarFile)
tar_info = self.mox.CreateMock(tarfile.TarInfo)
image = utils.RawTGZImage(None)
self.mox.StubOutWithMock(image, '_as_tarfile')
image._as_tarfile().AndReturn(tar_file)
tar_file.next().AndReturn(tar_info)
tar_info.size = 124
self.mox.ReplayAll()
result = image.get_size()
self.assertEqual(124, result)
self.assertEqual(image._tar_info, tar_info)
self.assertEqual(image._tar_file, tar_file)
def test_get_size_called_twice(self):
tar_file = self.mox.CreateMock(tarfile.TarFile)
tar_info = self.mox.CreateMock(tarfile.TarInfo)
image = utils.RawTGZImage(None)
self.mox.StubOutWithMock(image, '_as_tarfile')
image._as_tarfile().AndReturn(tar_file)
tar_file.next().AndReturn(tar_info)
tar_info.size = 124
self.mox.ReplayAll()
image.get_size()
result = image.get_size()
self.assertEqual(124, result)
self.assertEqual(image._tar_info, tar_info)
self.assertEqual(image._tar_file, tar_file)
def test_stream_to_without_size_retrieved(self):
source_tar = self.mox.CreateMock(tarfile.TarFile)
first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
target_file = self.mox.CreateMock(file)
source_file = self.mox.CreateMock(file)
image = utils.RawTGZImage(None)
image._image_service_and_image_id = ('service', 'id')
self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
image._as_tarfile().AndReturn(source_tar)
source_tar.next().AndReturn(first_tarinfo)
source_tar.extractfile(first_tarinfo).AndReturn(source_file)
utils.shutil.copyfileobj(source_file, target_file)
source_tar.close()
self.mox.ReplayAll()
image.stream_to(target_file)
def test_stream_to_with_size_retrieved(self):
source_tar = self.mox.CreateMock(tarfile.TarFile)
first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
target_file = self.mox.CreateMock(file)
source_file = self.mox.CreateMock(file)
first_tarinfo.size = 124
image = utils.RawTGZImage(None)
image._image_service_and_image_id = ('service', 'id')
self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
image._as_tarfile().AndReturn(source_tar)
source_tar.next().AndReturn(first_tarinfo)
source_tar.extractfile(first_tarinfo).AndReturn(source_file)
utils.shutil.copyfileobj(source_file, target_file)
source_tar.close()
self.mox.ReplayAll()
image.get_size()
image.stream_to(target_file)
|
t = eval(input())
while t:
t -= 1
y = []
z = []
x = str(input())
for i in range(len(x)):
if (not int(i)%2):
y.append(x[i])
else:
z.append(x[i])
print("".join(y) + " " + "".join(z))
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# removing duplicates from class list
def remove_duplicates(values):
output = []
seen = set()
for value in values:
# If value has not been encountered yet,
# ... add it to both list and set.
if value not in seen:
output.append(value)
seen.add(value)
return output
# Creates a Firefox instance
driver = webdriver.Firefox()
# Navigate to a page given by the URL
driver.get("http://catalog.unlv.edu")
# Locates search field and saves it
elem = driver.find_element_by_name("filter[keyword]")
# Types "CS" into saved search field
elem.send_keys("CS")
# Presses enter to search for "CS"
elem.send_keys(Keys.RETURN)
# In case nothing is found
assert "No results found." not in driver.page_source
# Save all links that begin with "CS"
classLinks = driver.find_elements_by_partial_link_text('CS')
# Course names will be written to this file
f = open('computer-science-list','w')
# Wrtie course names to file
for i in classLinks:
f.write(i.text)
f.write('\n')
# Empty list
plainTextClassLinks = []
# Save link text to list
for i in classLinks:
plainTextClassLinks.append(i.text)
# function call to remove duplicates items from list
plainTextClassLinks = remove_duplicates(plainTextClassLinks)
# Click all class links to expose number of credits and pre reqs
for i in classLinks:
linkToClick = i
linkToClick.click()
# Empty list
classDescriptions = []
# starting expath for class descriptions
# tbody/tr[3] is the first course on the site
classDescriptionsXpath ='//*[@id="gateway-page"]/body/table/tbody/tr[3]/td[2]/table/tbody/tr[2]/td[2]/table/tbody/tr/td/table[2]/tbody/tr[3]/td/table/tbody/tr/td/div[2]'
# number of courses
numberOfClassLinks = len(classLinks)
# Save class descriptions
classDescriptions = driver.find_elements_by_xpath('//*[@id="gateway-page"]/body/table/tbody/tr[3]/td[2]/table/tbody/tr[2]/td[2]/table/tbody/tr/td/table[2]/tbody/tr[4]/td/table/tbody/tr/td/div[2]')
#classDescriptions = driver.find_elements_by_xpath('//*[@id="gateway-page"]/body/table/tbody/tr[3]/td[2]/table/tbody/tr[2]/td[2]/table/tbody/tr/td/table[2]/')
for description in classDescriptions:
print description.text
# Close Firefox browser
|
# Copyright (c) 2021 Ichiro ITS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import asyncio
import rclpy
import websockets
from typing import List
from kumo.handlers.session_handler import Connection, SessionHandler
class Bridge:
def __init__(self, port: int, hosts: List[str]):
self.port = port
self.hosts = hosts
self.logger = rclpy.logging.get_logger('bridge')
async def listen(self, connection: Connection, path: str) -> None:
self.logger.info('Session started!')
session = SessionHandler(connection)
while True:
try:
await session.process()
except websockets.ConnectionClosed as e:
self.logger.warn('Session closed! %s' % str(e))
return session.destroy()
except KeyboardInterrupt as e:
session.destroy()
raise e
except Exception as e:
self.logger.error('Something happened! %s' % str(e))
def run(self) -> None:
rclpy.init()
try:
self.logger.info('Starting bridge server on port %d...' % (self.port))
websocket = websockets.serve(self.listen, self.hosts, self.port)
asyncio.get_event_loop().run_until_complete(websocket)
asyncio.get_event_loop().run_forever()
except KeyboardInterrupt as e:
self.logger.error('Keyboard interrupt! %s' % str(e))
except Exception as e:
self.logger.error('Failed to start bridge! %s' % str(e))
rclpy.shutdown()
|
# Muhammad Ibrahim (mi2ye)
print('Think of a number between 1 and 100 and I\'ll guess it.')
guesses = int(input('How many guesses do I get? '))
upper_bound = 100
lower_bound = 0
number_of_guesses = 0
previous_mean_bounds = 0
answer = 'none'
while number_of_guesses < guesses:
mean_bounds = ((upper_bound + lower_bound) // 2)
if answer == 'higher' and lower_bound == 100:
print('It can\'t be higher than 100!')
break
elif answer == 'lower' and upper_bound == 0:
print('It can\'t be lower than 0!')
break
if answer == 'higher' and abs(mean_bounds - previous_mean_bounds) == 0:
print('Wait; how can it be both higher than', previous_mean_bounds, 'and lower than', str(mean_bounds + 1) + '?')
break
if answer == 'lower' and abs(mean_bounds - previous_mean_bounds) <= 1:
print('Wait; how can it be both higher than', mean_bounds, 'and lower than', str(previous_mean_bounds) + '?')
break
answer = input('Is the number higher, lower, or the same as ' + str(mean_bounds) + '? ')
if answer == 'higher':
lower_bound = mean_bounds
elif answer == 'lower':
upper_bound = mean_bounds
elif answer == 'same':
print('I won!')
break
previous_mean_bounds = mean_bounds
number_of_guesses += 1
if number_of_guesses == guesses and answer == 'same':
print('I won!')
elif number_of_guesses == guesses:
correct = int(input('I lost; what was the answer? '))
if lower_bound < correct < upper_bound:
print('Well played!')
elif lower_bound > correct:
print('That can\'t be; you said it was higher than', str(lower_bound) + '!')
elif upper_bound < correct:
print('That can\'t be; you said it was lower than', str(upper_bound) + '!')
|
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
class Canny():
def __init__(self,image_path):
self.image_path = image_path
################################################
# 自定义padding函数
################################################
def Padding(self,image, kernels_size, stride=[1, 1], padding="same"):
'''
对图像进行padding
:param image: 要padding的图像矩阵
:param kernels_size: list 卷积核大小[h,w]
:param stride: 卷积步长 [左右步长,上下步长]
:param padding: padding方式
:return: padding后的图像
'''
if padding == "same":
h, w = image.shape
p_h = max((stride[0] * (h - 1) - h + kernels_size[0]), 0) # 高度方向要补的0
p_w = max((stride[1] * (w - 1) - w + kernels_size[1]), 0) # 宽度方向要补的0
p_h_top = p_h // 2 # 上边要补的0
p_h_bottom = p_h - p_h_top # 下边要补的0
p_w_left = p_w // 2 # 左边要补的0
p_w_right = p_w - p_w_left # 右边要补的0
# print(p_h_top,p_h_bottom,p_w_left,p_w_right) # 输出padding方式
padding_image = np.zeros((h + p_h, w + p_w), dtype=np.uint8)
for i in range(h):
for j in range(w):
padding_image[i + p_h_top][j + p_w_left] = image[i][j] # 将原来的图像放入新图中做padding
return padding_image
else:
return image
#######################################################################################
# 灰度化
#######################################################################################
def gray(self):
'''
:param img: RGB 图
:return: 灰度图(0,255)
对于彩色转灰度,有一个很著名的心理学公式:
Gray = B*0.114 + G*0.587 + R*0.299
plt函数是rgb方式读取的
cv2函数是bgr方式读取的
'''
# 读取图片
img = cv2.imread(self.image_path)
imgInfo = img.shape
gray = np.zeros((imgInfo[0], imgInfo[1]), dtype=np.uint8) # gray.dtype 为 uint8 # 创建矩阵来保存变换后的图片
gray.astype(int)
for i in range(imgInfo[0]):
for j in range(imgInfo[1]):
gray[i][j] = img[i][j][0] * 0.114 + img[i][j][1] * 0.587 + img[i][j][2] * 0.299
return gray
# return cv2.imread(self.image_path,0)
#######################################################################################
# 高斯平滑滤波
#######################################################################################
def gaussian_smooth_filter(self,img_gray):
# 去除噪音 - 使用 5x5 的高斯滤波器
"""
要生成一个 (2k+1)x(2k+1) 的高斯滤波器,滤波器的各个元素计算公式如下:
H[i, j] = (1/(2*pi*sigma**2))*exp(-1/2*sigma**2((i-k-1)**2 + (j-k-1)**2))
"""
# 生成高斯滤波器
sigma1 = sigma2 = 1.52 # 标准差设置
gau_sum = 0
dim = 5 # 高斯卷积核大小
k = (dim-1)/2
Gaussian_filter = np.zeros([dim, dim])
for i in range(dim):
for j in range(dim):
Gaussian_filter[i, j] = math.exp((-1 / (2 * sigma1 * sigma2)) * (np.square(i - k -1)+ np.square(j - k -1))) /(2 * math.pi * sigma1 * sigma2)
gau_sum = gau_sum + Gaussian_filter[i, j]
# 归一化处理,获得高斯滤波器
Gaussian_filter = Gaussian_filter / gau_sum
# 高斯滤波
H,W = img_gray.shape
new_gray = np.zeros(img_gray.shape)
img_gray = self.Padding(img_gray,kernels_size=Gaussian_filter.shape,stride=[1,1],padding="same")
for i in range(H):
for j in range(W):
new_gray[i,j] = (np.sum(img_gray[i:i+dim, j:j+ dim] * Gaussian_filter))
# new_gray = new_gray/255
return new_gray
#######################################################################################
# Sobel算子计算梯度
#######################################################################################
def sobel_filter(self,image):
h = image.shape[0]
w = image.shape[1]
# image.astype(np.uint8)
sobel_filter_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
sobel_filter_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
image_padding = self.Padding(image,kernels_size=sobel_filter_x.shape,stride=[1,1],padding="same")
image_gradient_value = np.zeros(image.shape)
image_gradient_direction = np.zeros(image.shape)
for i in range(h):
for j in range(w):
dx = np.sum(image_padding[i:i+3, j:j+ 3] * sobel_filter_x)
dy = np.sum(image_padding[i:i+3, j:j+ 3] * sobel_filter_y)
image_gradient_value[i][j] = np.sqrt(np.square(dx) + np.square(dy))
image_gradient_direction[i][j] = dy/(dx+0.000000001)
return image_gradient_value,image_gradient_direction
#######################################################################################
# 根据梯度方向角对梯度幅值进行非极大值抑制,梯度方向角image_gradient_direction
#######################################################################################
def Non_maximum_suppression(self,image_gradient_value,image_gradient_direction):
# 梯度插值,计算dTmp1和dTmp2,比较梯度 并判断是否抑制
H,W = image_gradient_value.shape
img_NMS = np.zeros([H,W])
for i in range(1,H-1):
for j in range(1,W-1):
flag = True # 在8邻域内是否要抹去做个标记
temp = image_gradient_value[i - 1:i + 2, j - 1:j + 2] # 梯度幅值的8邻域矩阵
angle = np.abs(image_gradient_direction[i, j])
# 情况 1
if image_gradient_direction[i, j] < -1: # 使用线性插值法判断抑制与否
dTmp1 = (temp[0, 0] - temp[0, 1])/angle + temp[0, 1]
dTmp2 = (temp[2, 2] - temp[2, 1])/angle + temp[2, 1]
if not (image_gradient_value[i, j] > dTmp1 and image_gradient_value[i, j] > dTmp2):
flag = False
# 情况 2
elif image_gradient_direction[i, j] > 1:
dTmp1 = (temp[0, 2] - temp[0, 1])/angle + temp[0, 1]
dTmp2 = (temp[2, 0] - temp[2, 1])/angle + temp[2, 1]
if not (image_gradient_value[i, j] > dTmp1 and image_gradient_value[i, j] > dTmp2):
flag = False
# 情况 3
elif image_gradient_direction[i, j] >= 0:
dTmp1 = (temp[0, 2] - temp[1, 2]) * angle + temp[1, 2]
dTmp2 = (temp[2, 0] - temp[1, 0]) * angle + temp[1, 0]
if not (image_gradient_value[i, j] > dTmp1 and image_gradient_value[i, j] > dTmp2):
flag = False
# 情况 4
elif image_gradient_direction[i, j] < 0:
dTmp1 = (temp[0, 0] - temp[1, 0]) * angle + temp[1, 0]
dTmp2 = (temp[2, 2] - temp[2, 1]) * angle + temp[1, 2]
if not (image_gradient_value[i, j] > dTmp1 and image_gradient_value[i, j] > dTmp2):
flag = False
if flag:
img_NMS[i, j] = image_gradient_value[i, j]
return img_NMS
#######################################################################################
# 根据梯度幅值进行的非极大值抑制结果,进行双阈值算法连接边缘,遍历所有一定是边的点,查看8邻域是否存在有可能是边的点,进栈
#######################################################################################
def double_threshold(self, NMS, gradient):
lower_boundary = gradient.mean() * 0.5
high_boundary = lower_boundary * 3 # 这里我设置高阈值是低阈值的三倍
zhan = []
for i in range(1, NMS.shape[0] - 1): # 外圈不考虑了
for j in range(1, NMS.shape[1] - 1):
if NMS[i, j] >= high_boundary: # 取,一定是边的点,强边缘
NMS[i, j] = 255
zhan.append([i, j])
elif NMS[i, j] <= lower_boundary: # 舍 不是边缘
NMS[i, j] = 0
while not len(zhan) == 0:
temp_1, temp_2 = zhan.pop() # 出栈
a = NMS[temp_1 - 1:temp_1 + 2, temp_2 - 1:temp_2 + 2] # 获得强边缘的邻域像素的梯度
if (a[0, 0] < high_boundary) and (a[0, 0] > lower_boundary): # 如果 强边缘的邻域像素img_yizhi[temp_1 - 1, temp_2 - 1]是弱边缘
NMS[temp_1 - 1, temp_2 - 1] = 255 # 则标记该弱边缘像素img_yizhi[temp_1 - 1, temp_2 - 1]为强边缘,并将新得到的强边缘入栈,以此类推查看强边缘点8邻域的其他像素点
zhan.append([temp_1 - 1, temp_2 - 1]) # 进栈
if (a[0, 1] < high_boundary) and (a[0, 1] > lower_boundary):
NMS[temp_1 - 1, temp_2] = 255
zhan.append([temp_1 - 1, temp_2])
if (a[0, 2] < high_boundary) and (a[0, 2] > lower_boundary):
NMS[temp_1 - 1, temp_2 + 1] = 255
zhan.append([temp_1 - 1, temp_2 + 1])
if (a[1, 0] < high_boundary) and (a[1, 0] > lower_boundary):
NMS[temp_1, temp_2 - 1] = 255
zhan.append([temp_1, temp_2 - 1])
if (a[1, 2] < high_boundary) and (a[1, 2] > lower_boundary):
NMS[temp_1, temp_2 + 1] = 255
zhan.append([temp_1, temp_2 + 1])
if (a[2, 0] < high_boundary) and (a[2, 0] > lower_boundary):
NMS[temp_1 + 1, temp_2 - 1] = 255
zhan.append([temp_1 + 1, temp_2 - 1])
if (a[2, 1] < high_boundary) and (a[2, 1] > lower_boundary):
NMS[temp_1 + 1, temp_2] = 255
zhan.append([temp_1 + 1, temp_2])
if (a[2, 2] < high_boundary) and (a[2, 2] > lower_boundary):
NMS[temp_1 + 1, temp_2 + 1] = 255
zhan.append([temp_1 + 1, temp_2 + 1])
# 将不在强边缘邻域内的弱边缘的像素值置0
for i in range(NMS.shape[0]):
for j in range(NMS.shape[1]):
if NMS[i, j] != 0 and NMS[i, j] != 255:
NMS[i, j] = 0
return NMS
def canny(self):
canny = Canny("lenna.png")
img_gray = canny.gray()
Gaussian = canny.gaussian_smooth_filter(img_gray=img_gray)
gradient, direction = canny.sobel_filter(Gaussian)
img_NMS = canny.Non_maximum_suppression(gradient, direction)
threshold = canny.double_threshold(img_NMS, gradient)
plt.figure()
plt.axis('off')
plt.imshow(threshold, cmap='gray')
plt.show()
# canny = Canny("lenna.png").canny()
canny = Canny("lenna.png")
img_gray = canny.gray()
Gaussian = canny.gaussian_smooth_filter(img_gray=img_gray)
plt.figure(1)
plt.axis('off')
plt.imshow(Gaussian,cmap="gray")
gradient,direction = canny.sobel_filter(Gaussian)
plt.figure(2)
plt.axis('off')
plt.imshow(gradient,cmap="gray")
img_NMS = canny.Non_maximum_suppression(gradient, direction)
plt.figure(3)
plt.axis('off')
plt.imshow(img_NMS, cmap='gray')
threshold = canny.double_threshold(img_NMS,gradient)
plt.figure(4)
plt.axis('off')
plt.imshow(threshold, cmap='gray')
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
__author__ = "Fedor Marchenko"
__email__ = "mfs90@mail.ru"
__date__ = "Jul 13, 2016"
from django.conf import settings
ISSUE_REPOSITORY_USER = getattr(settings, 'ISSUE_REPOSITORY_USER', None)
ISSUE_REPOSITORY_NAME = getattr(settings, 'ISSUE_REPOSITORY_NAME', None)
ISSUE_USER = getattr(settings, 'ISSUE_USER', None)
ISSUE_USER_PASSWORD = getattr(settings, 'ISSUE_USER_PASSWORD', None)
|
'''
For every good kata idea there seem to be quite a few bad ones!
In this kata you need to check the provided array (x) for good ideas 'good' and bad ideas 'bad'.
If there are one or two good ideas, return 'Publish!', if there are more than 2 return 'I smell a series!'.
If there are no good ideas, as is often the case, return 'Fail!'.
'''
##########################################################################################################################################
def well(x):
a = 'good'
if x.count(a) == 1 or x.count(a) == 2:
return 'Publish!'
elif x.count(a) > 1:
return 'I smell a series!'
else:
return 'Fail!'
|
#!/usr/bin/env python
#
# Regenerate JNI headers for
#
# NOTE: doesn't work with JDK 10 because javah was removed [1] from JDK, and javac doesn't seem to
# be able to generate native headers from .class files.
#
# NOTE: this script must be python2/3 compatible
from __future__ import absolute_import, print_function
import distutils.spawn
import os
import subprocess
import sys
def _get_arcadia_root():
arcadia_root = None
path = os.path.dirname(os.path.abspath(sys.argv[0]))
while True:
if os.path.isfile(os.path.join(path, '.arcadia.root')):
arcadia_root = path
break
if path == os.path.dirname(path):
break
path = os.path.dirname(path)
assert arcadia_root is not None, 'you are probably trying to use this script with repository being checkout not from the root'
return arcadia_root
def _get_native_lib_dir(relative=None):
if relative is None:
relative = _get_arcadia_root()
return os.path.join(
relative,
os.path.join(*'catboost/jvm-packages/catboost4j-prediction/src/native_impl'.split('/')))
def _get_classes_dir():
return os.path.join(
_get_arcadia_root(),
os.path.join(*'catboost/jvm-packages/catboost4j-prediction/target/classes'.split('/')))
def _run_javah(args, env=None):
if env is None:
env = os.environ.copy()
java_home = env.get('JAVA_HOME')
if java_home is not None:
javah_path = os.path.join(java_home, os.path.join(*'bin/javah'.split('/')))
subprocess.check_call(
[javah_path] + args,
env=env,
stdout=sys.stdout,
stderr=sys.stderr)
return
distutils.spawn.spawn(['javah'] + args)
def _fix_header(filename):
with open(filename, 'rb') as f:
data = f.read()
if not data.startswith(b'#pragma once\n'):
with open(filename, 'wb') as f:
f.write(b'#pragma once\n\n')
f.write(data)
def _main():
javah_args = [
'-verbose',
'-d', _get_native_lib_dir(),
'-jni',
'-classpath', _get_classes_dir(),
'ai.catboost.CatBoostJNIImpl']
_run_javah(javah_args)
_fix_header(os.path.join(
_get_native_lib_dir(),
'ai_catboost_CatBoostJNIImpl.h'))
if '__main__' == __name__:
_main()
|
###################################### Stacked Autoencoder ############################################
## Author: Sara Regina Ferreira de Faria
## Email: sarareginaff@gmail.com
#Needed libraries
import numpy
import matplotlib.pyplot as plt
import pandas
import math
import scipy.io as spio
import scipy.ndimage
from sklearn.metrics import mean_squared_error, roc_curve, auc
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
def loadData(file, dictName):
matfile = file
matdata = spio.loadmat(matfile)
dataset = numpy.ndarray(shape=(matdata[dictName].shape[1]), dtype=type(matdata[dictName][0,0]))
for i in range(matdata[dictName].shape[1]):
dataset[i] = matdata[dictName][0, i]
return dataset
# normalize dataset
def normalizeData(data):
maxVal = numpy.amax(data)
minVal = numpy.amin(data)
normalizedData = ((data-minVal)/(maxVal-minVal))
return normalizedData
# based on http://machinelearningmastery.com/time-series-prediction-with-deep-learning-in-python-with-keras/
# convert an array of values into a dataset matrix
def createMatrix(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back)]
dataX.append(a)
return numpy.array(dataX)
# based on https://blog.keras.io/building-autoencoders-in-keras.html
# based on http://machinelearningmastery.com/time-series-prediction-with-deep-learning-in-python-with-keras/
# create lstm-based autoencoder
def trainStackedAutoencoder(dataset, timesteps, input_dim, firstLayer, secondLayer, thirdLayer, lossEvaluation, optimizer, epochs, batchSize, verbose=False):
from keras.models import Model, Sequential
from keras.layers import Input, Dense, LSTM, RepeatVector
# split noise and normal data into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# encoder
inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(firstLayer)(inputs)
encoded = Dense(secondLayer, activation='relu')(encoded)
encoded = Dense(thirdLayer, activation='relu')(encoded)
# decoder
decoded = RepeatVector(timesteps)(encoded)
decoded = Dense(secondLayer, activation='relu')(decoded)
decoded = Dense(firstLayer, activation='relu')(decoded)
decoded = LSTM(input_dim, return_sequences=True)(decoded)
# autoencoder
model = Model(inputs, decoded)
model.compile(loss=lossEvaluation, optimizer=optimizer)
model.fit(train, train, epochs=epochs, batch_size=batchSize, verbose=verbose,validation_data=(test, test))
# Estimate model performance
#trainScore = model.evaluate(train, train, verbose=0)
#rint('Train Score: %.6f MSE (%.6f RMSE)' % (trainScore, math.sqrt(trainScore)))
#estScore = model.evaluate(test, test, verbose=0)
#print('Test Score: %.6f MSE (%.6f RMSE)' % (testScore, math.sqrt(testScore)))
return model
# based on https://edouardfouche.com/Neural-based-Outlier-Discovery/
def calculateFprTpr (predicted, labels):
dist = numpy.zeros(len(predicted))
for i in range(len(predicted)):
dist[i] = numpy.linalg.norm(predicted[i])
fpr, tpr, thresholds = roc_curve(labels, dist)
return fpr, tpr
#************* MAIN *****************#
# variables
best_roc_auc = 0
best_epochs = 0
best_limit = 0
best_batchSizeData = 0
best_look_back = 0
best_firstLayer = 0
for epochs in range(4,5):
print("epochs", epochs)
for limitAux in range(11,12):
limit = limitAux/10
print("limit", limit)
for batchSizeData in range (20,21,2):
print("batchSizeData", batchSizeData)
for look_back in range(3,4):
print("look_back", look_back)
for firstLayer in range (9,10,3):
print("firstLayer",firstLayer)
secondLayer = int(firstLayer/3)
thirdLayer = int(secondLayer/3)
batchSizeModel = 5
lossEvaluation = 'mean_squared_error'
optimizer = 'adam'
roc_auc = []
FPRs = []
TPRs = []
# load dataset with all fault simulation
originalDataset = loadData('DadosTodasFalhas.mat', 'Xsep')
# prepare dataset to input model training
filteredDataset = scipy.ndimage.filters.gaussian_filter(originalDataset[0][:,:], 4.0)
#filteredDataset = originalDataset[0][:,:]
normalizedDataset = normalizeData(filteredDataset)
dataset = createMatrix(normalizedDataset, look_back)
#***** Train model with normal data *****#
# Variables
timesteps = dataset.shape[1]
input_dim = dataset.shape[2]
normalPredict = []
normalError = []
j = 0
# train model
Model = trainStackedAutoencoder(dataset, timesteps, input_dim, firstLayer, secondLayer, thirdLayer, lossEvaluation, optimizer, epochs, batchSizeModel, verbose=False)
# get error for each batch of normal data
for k in range(0,len(dataset),batchSizeData):
dataBatch = dataset[k:k+batchSizeData]
normalPredict.append(Model.predict(dataBatch))
normalError.append(mean_squared_error(dataBatch[:,0,:], normalPredict[j][:,0,:]))
j += 1
#***** Testing if it is a fault or not *****#
for i in range(1,len(originalDataset)):
#local variables
j = 0
faults = []
trainPredict = []
faultError = []
predicted = []
# prepare dataset
filteredDataset = scipy.ndimage.filters.gaussian_filter(originalDataset[i][:,:], 4.0)
#filteredDataset = originalDataset[i][:,0]
normalizedDataset = normalizeData(filteredDataset)
dataset = createMatrix(normalizedDataset, look_back)
#dataset = numpy.reshape(dataset, (dataset.shape[0], dataset.shape[1], 22)) # reshape input to be [samples, time steps, features]
# get error for each batch of data
for k in range(0,len(dataset),batchSizeData):
dataBatch = dataset[k:k+batchSizeData]
# generate predictions using model
trainPredict.append(Model.predict(dataBatch))
predicted.append(trainPredict[j][:,0,:])
faultError.append(mean_squared_error(dataBatch[:,0,:], predicted[j]))
# check if it is a fault or not
if (faultError[j] > normalError[j]*limit):
faults.append(1)
else:
faults.append(0)
j = j + 1
#print("Dataset", i, ". IsFaultVector: ", faults)
# define labels to ROC curve
labels = []
for k in range(0,len(dataset),batchSizeData):
if (k >= 100):
labels.append(1)
if (k < 100):
labels.append(0)
# calculate AUC, fpr and tpr
fpr, tpr = calculateFprTpr(faults, labels)
FPRs.append(fpr)
TPRs.append(tpr)
roc_auc.append(auc(fpr, tpr))
sum_roc_auc = 0
for i in range(len(roc_auc)):
sum_roc_auc += roc_auc[i]
if (sum_roc_auc > best_roc_auc):
best_roc_auc = sum_roc_auc
best_epochs = epochs
best_limit = limit
best_batchSizeData = batchSizeData
best_look_back = look_back
best_firstLayer = firstLayer
#plot baseline and predictions
#plt.plot(normalizedDataset)
#plt.plot(numpy.concatenate( predicted, axis=0 ))
#plt.show()
sum_selected_roc_auc = 0
for j in range(len(FPRs)):
i = j+1
if(i == 1 or i == 2 or i == 5 or i == 7 or i == 8 or i == 9 or i == 10 or i == 11 or i == 12 or i == 14 or i == 15 or i == 19):
plt.plot(FPRs[j], TPRs[j], label="AUC{0}= {1:0.2f}".format(i+1, roc_auc[j]))
sum_selected_roc_auc += roc_auc[j]
plt.xlim((0,1))
plt.ylim((0,1))
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('False Positive rate')
plt.ylabel('True Positive rate')
plt.title('ROC curve - Stacked Autoencoder')
plt.legend(loc="lower right")
plt.show()
print("bests parameters")
print("best_limit", best_limit)
print("best_epochs", best_epochs)
print("best_roc_auc", best_roc_auc)
print("best_look_back", best_look_back)
print("best_batchSizeData", best_batchSizeData)
print("best_firstLayer", best_firstLayer)
print("sum_selected_roc_auc", sum_selected_roc_auc)
|
import collections
import envi.symstore.symcache as es_symcache
# Symbol Type Constants ( for serialization )
SYMSTOR_SYM_SYMBOL = 0
SYMSTOR_SYM_FUNCTION = 1
SYMSTOR_SYM_SECTION = 2
SYMSTOR_SYM_MODULE = 3
class Symbol:
symtype = SYMSTOR_SYM_SYMBOL
def __init__(self, name, value, size=0, fname=None):
self.name = name
self.value = value
self.size = size
self.fname = fname
def __ge__(self, other):
return int(self) >= int(other)
def __le__(self, other):
return int(self) <= int(other)
def __gt__(self, other):
return int(self) > int(other)
def __lt__(self, other):
return int(self) < int(other)
def __eq__(self, other):
if not isinstance(other, Symbol):
return False
return int(self) == int(other)
def __add__(self, other):
return int(self) + int(other)
def __sub__(self, other):
return int(self) - int(other)
def __mul__(self, other):
return int(self) * int(other)
def __div__(self, other):
return int(self) / int(other)
def __floordiv__(self, other):
return int(self) // int(other)
def __mod__(self, other):
return int(self) % int(other)
def __divmod__(self, other):
return divmod(int(self), int(other))
def __pow__(self, other, modulo=None):
return pow(int(self), int(other), modulo)
def __lshift__(self, other):
return int(self) << int(other)
def __rshift__(self, other):
return int(self) >> int(other)
def __and__(self, other):
return int(self) & int(other)
def __xor__(self, other):
return int(self) ^ int(other)
def __or__(self, other):
return int(self) | int(other)
# Operator swapped variants
def __radd__(self, other):
return int(other) + int(self)
def __rsub__(self, other):
return int(other) - int(self)
def __rmul__(self, other):
return int(other) * int(self)
def __rdiv__(self, other):
return int(other) / int(self)
def __rfloordiv__(self, other):
return int(other) // int(self)
def __rmod__(self, other):
return int(other) % int(self)
def __rdivmod__(self, other):
return divmod(int(other), int(self))
def __rpow__(self, other, modulo=None):
return pow(int(other), int(self), modulo)
def __rlshift__(self, other):
return int(other) << int(self)
def __rrshift__(self, other):
return int(other) >> int(self)
def __rand__(self, other):
return int(other) & int(self)
def __rxor__(self, other):
return int(other) ^ int(self)
def __ror__(self, other):
return int(other) | int(self)
# Inplace variants
def __iadd__(self, other):
self.value += int(other)
return self
def __isub__(self, other):
self.value -= int(other)
return self
def __imul__(self, other):
self.value *= int(other)
return self
def __idiv__(self, other):
self.value = int(self.value / int(other))
return self
def __ifloordiv__(self, other):
self.value //= int(other)
return self
def __imod__(self, other):
self.vsSetValue(self % other)
self.value %= int(other)
return self
def __ipow__(self, other, modulo=None):
self.value = pow(self.value, other, modulo)
return self
def __ilshift__(self, other):
self.value <<= other
return self
def __irshift__(self, other):
self.value >>= other
return self
def __iand__(self, other):
self.value &= other
return self
def __ixor__(self, other):
self.value ^= other
return self
def __ior__(self, other):
self.value |= other
return self
def __hash__(self):
return hash(int(self))
def __int__(self):
return int(self.value)
def __len__(self):
return self.size
def __str__(self):
if self.fname is not None:
return "%s.%s" % (self.fname, self.name)
return self.name
def __repr__(self):
return str(self)
class FunctionSymbol(Symbol):
"""
Used to represent functions.
"""
symtype = SYMSTOR_SYM_FUNCTION
def __repr__(self):
return "%s.%s()" % (self.fname, self.name)
class SectionSymbol(Symbol):
"""
Used for file sections/segments.
"""
symtype = SYMSTOR_SYM_SECTION
def __repr__(self):
return "%s[%s]" % (self.fname, self.name)
class SymbolResolver:
"""
NOTE: Nothing should reach directly into a SymbolResolver!
"""
def __init__(self, width=4, casesens=True, baseaddr=0):
self.width = width
self.widthmask = (2**(width*8))-1
self.casesens = casesens
self.baseaddr = baseaddr # Set if this is an RVA sym resolver
# Lets use 4096 byte buckes for now
self.bucketsize = 4096
self.bucketmask = self.widthmask ^ (self.bucketsize-1)
self.buckets = collections.defaultdict(list)
# holds tuples by name/addr, instantiated on demand and subsequently
# stored in symobjsbyaddr and symobjsbyname
self.symnames = {}
self.symaddrs = {}
# caches that hold instantiated Symbol objects
self.symobjsbyaddr = {}
self.symobjsbyname = {}
def delSymbol(self, sym):
"""
Delete a symbol from the resolver's namespace
"""
symval = int(sym)
self.symaddrs.pop(symval, None)
# bbase = symval & self.bucketmask
# self.objbuckets[bbase].remove(sym)
subres = None
if sym.fname is not None:
subres = self.symnames.get(sym.fname)
# Potentially del it from the sub resolver's namespace
if subres and isinstance(subres, es_symcache.SymbolCache):
subres.delSymbol(sym)
# Otherwise del it from our namespace
else:
symname = sym.name
if not self.casesens:
symname = symname.lower()
if symname in self.symnames:
self.symnames.pop(symname, None)
if sym.fname in self.symobjsbyname:
self.symobjsbyname.pop(sym.fname, None)
if symval in self.symobjsbyaddr:
self.symobjsbyaddr.pop(symval, None)
def addSymbol(self, sym):
"""
Add a symbol to the resolver.
"""
# Fake these out for the API ( optimized implementations should *not* call this )
symtup = (sym.value, sym.size, sym.name, sym.symtype, sym.fname)
symtups = [symtup]
self._nomSymTupAddrs(symtups)
subres = self.symobjsbyname.get(sym.fname)
if subres:
subres._nomSymTupAddrs(symtups)
subres._nomSymTupNames(symtups)
else:
self._nomSymTupNames(symtups)
self._nomSymTupAddrs(symtups)
return self._addSymObject(sym)
def getSymByName(self, name):
'''
Retrieve a Symbol object by name.
'''
if not self.casesens:
name = name.lower()
# Do we have a cached object?
sym = self.symobjsbyname.get(name)
if sym is not None:
return sym
# Do we have a symbol tuple?
symtup = self.symnames.get(name)
if symtup is not None:
return self._symFromTup(symtup)
def delSymByName(self, name):
if not self.casesens:
name = name.lower()
sym = self.symnames.get(name, None)
if sym is not None:
self.delSymbol(self._symFromTup(sym))
def _symFromTup(self, symtup):
# Create a symbol object and cache it...
symaddr, symsize, symname, symtype, symfname = symtup
symclass = symclasses[symtype]
if symtype == SYMSTOR_SYM_MODULE:
sym = FileSymbol(symname, symaddr, symsize, width=self.width)
else:
sym = symclass(symname, symaddr, size=symsize, fname=symfname)
self._addSymObject(sym)
return sym
def _addSymObject(self, sym):
# Add a symbol object to our datastructures.
self.symobjsbyaddr[sym.value] = sym
symmax = sym.value + sym.size
bbase = sym.value & self.bucketmask
if sym.fname:
subres = self.symobjsbyname.get(sym.fname)
if subres is not None and isinstance(subres, SymbolResolver):
subres._addSymObject(sym)
return
symname = sym.name
if not self.casesens:
symname = symname.lower()
self.symobjsbyname[symname] = sym
def getSymByAddr(self, va, exact=True):
"""
Return a symbol object for the given virtual address.
"""
va = va & self.widthmask
sym = self.symobjsbyaddr.get(va)
if sym is not None:
return sym
symtup = self.symaddrs.get(va)
if symtup:
return self._symFromTup(symtup)
# In the "not exact" case, go by the tuples...
# ...and try 2 buckets... ( more than 8k away is bunk )
if not exact:
bucketva = va & self.bucketmask
b1 = [ b for b in self.buckets[bucketva] if b[0] <= va ]
if not b1:
b1 = self.buckets[bucketva - self.bucketsize]
if b1:
b1.sort()
symtup = b1[-1]
sym = self.symobjsbyaddr.get(symtup[0])
if sym is not None:
return sym
return self._symFromTup(symtup)
def getSymList(self):
"""
Return a list of the symbols which are contained in this resolver.
"""
out = [self.getSymByName(name) for name in self.symobjsbyname]
out.extend([self.getSymByName(name) for name in self.symnames])
return out
def getSymHint(self, va, hidx):
"""
May be used by symbol resolvers who know what type they are
resolving to store and retrieve "hints" with indexes.
Used specifically by opcode render methods to resolve
any memory dereference info for a given operand.
NOTE: These are mostly symbolic references to FRAME LOCAL
names....
"""
return None
def _nomSymTupAddrs(self, symtups):
# Ugly list comprehensions for speed...
[self.symaddrs.__setitem__(n[0], n) for n in symtups]
for symtup in symtups:
# do the size range...
self.buckets[symtup[0] & self.bucketmask].append(symtup)
if symtup[1]:
[self.buckets[b].append(symtup) for b in range(symtup[0], symtup[0] + symtup[1], self.bucketsize)]
def _nomSymTupNames(self, symtups):
if not self.casesens:
[self.symnames.__setitem__( n[2].lower(), n ) for n in symtups]
else:
[self.symnames.__setitem__( n[2], n ) for n in symtups]
def impSymCache(self, symcache, symfname=None, baseaddr=0):
'''
Import a list of symbol tuples (see getCacheSyms()) at the
given base address ( and for the given sub-file )
'''
# Recieve a "cache" list and make it into our kind of tuples.
symtups = [(symaddr + baseaddr, symsize, symname, symtype, symfname) for (symaddr, symsize, symname, symtype) in symcache]
# Either way, index the addresses
self._nomSymTupAddrs(symtups)
if symfname:
# If we have a sub-resolver, no need to add the names to
# our name space...
subres = self.symobjsbyname.get(symfname)
if isinstance(subres, SymbolResolver):
subres._nomSymTupAddrs(symtups)
subres._nomSymTupNames(symtups)
return
self._nomSymTupNames(symtups)
class FileSymbol(Symbol, SymbolResolver):
"""
A file symbol is both a symbol resolver of it's own, and
a symbol.
File symbols are used to do heirarchal symbol lookups and don't
actually add anything but the name to their lookup (it is assumed
that the parent Resolver of the FileSymbol takes care of addr lookups.
"""
symtype = SYMSTOR_SYM_MODULE
def __init__(self, fname, base, size, width=4):
if fname is None:
raise Exception('fname must not be None for a FileSymbol')
SymbolResolver.__init__(self, width=width, baseaddr=base)
Symbol.__init__(self, fname, base, size=size, fname=None)
def __getattr__(self, name):
"""
File symbols may be dereferenced like python objects to resolve
symbols within them.
"""
ret = self.getSymByName(name)
if ret is None:
raise AttributeError("%s has no symbol %s" % (self.name, name))
return ret
def __getitem__(self, name):
"""
Allow dictionary style access for mangled incompatible names...
"""
ret = self.getSymByName(name)
if ret is None:
raise KeyError("%s has no symbol %s" % (self.name, name))
return ret
# we need __getstate__ and __setstate__ because of serialization. if
# these are not overridden, __getattr__ is called, which subsequently calls
# getSymByName, which tries to access self.casesens, which causes a
# __getattr__ call, which leads to recursion.
def __getstate__(self):
return self.__dict__
def __setstate__(self, sdict):
self.__dict__.update(sdict)
# we don't *have* to override the other object methods, but otherwise
# we will get incur the cost of extra symbol lookups for things like
# __eq__, __ne__, etc. we chose not to do it for lt, le, gt, ge, del and
# others that we don't expect to see called often.
def __repr__(self):
return Symbol.__repr__(self)
def __str__(self):
return Symbol.__str__(self)
def __eq__(self, other):
return Symbol.__eq__(self, other)
def __ne__(self, other):
return not Symbol.__eq__(self, other)
def __hash__(self):
return Symbol.__hash__(self)
def __nonzero__(self):
return True
symclasses = (Symbol, FunctionSymbol, SectionSymbol, FileSymbol)
|
from conans import ConanFile, CMake
class ArgumentParser(ConanFile):
name = "cracker"
version = "0.0.0"
license = "MIT"
url = "<Package recipe repository url here, for issues about the package>"
description = "Simple argument parser for CLI in C++"
settings = "os", "compiler", "build_type", "arch"
requires = [
"gtest/1.10.0",
"fmt/7.1.3", # std::format implementation
"arg_parser/0.0.0@codeist/testing",
"openssl/1.1.1k"
]
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake", "cmake_paths"
exports_sources = "*"
def imports(self):
self.copy("*.dll", dst="bin", src="bin") # From bin to bin
self.copy("*.dylib*", dst="bin", src="lib") # From lib to bin
def build(self):
cmake = CMake(self)
cmake.definitions["CMAKE_EXPORT_COMPILE_COMMANDS"] = "ON"
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.libs = ["arg_parser"]
|
import card
import random
class Deck:
suits = ['Club', 'Diamond', 'Heart', 'Spade']
def clear(self):
self.cards = []
def add_deck(self, deck):
self.addrange(deck.cards)
deck.clear()
def add(self, card):
self.cards.append(card)
def addrange(self, cards):
for c in cards:
self.cards.append(c)
def takeOne(self):
if len(self.cards) == 0:
return None
card = self.cards[0]
del self.cards[0]
return card
def takeCard(self, card):
c = [(i,x) for i, x in enumerate(self.cards) if x.short == card.short]
if len(c) == 0:
return None
ret = c[0][1]
del self.cards[c[0][0]]
return ret
def take(self, amount=1):
if amount == 1:
return self.takeOne()
cards = self.cards[:amount]
del self.cards[:amount]
return cards
def shuffle(self):
random.shuffle(self.cards)
def init_suit(self, suit):
for i in range(13):
self.cards.append(card.Card(suit, (i+1)))
def __init__(self, fill=True):
self.cards = []
#should we fill the deck
if(fill):
for s in Deck.suits:
self.init_suit(s) |
import math
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
L, S1, S2, qs = cipher
v = abs(S1 - S2) / math.sqrt(2)
rets = []
for q in qs:
t = (L - math.sqrt(q)) / v
rets.append(t)
return "\n".join(map(lambda x: "%f" % x, rets))
if __name__ == "__main__":
import sys
f = open("1.in", "r")
solution = Solution()
L, S1, S2 = map(int, f.readline().strip().split(' '))
q = int(f.readline().strip())
qs = []
for t in xrange(q):
qs.append(int(f.readline().strip()))
cipher = L, S1, S2, qs
s = "%s\n" % (solution.solve(cipher))
print s,
|
import pandas as pd
import numpy as np
s = pd.Series([1,3,5, np.nan, 44, 1])
#print(s)
dates = pd.date_range('20190910', periods = 6)
#print(dates)
df = pd.DataFrame(np.arange(24).reshape(6,4),index=dates,columns=['a','b','c','d'])
#print(df)
df2 = pd.DataFrame({'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo'})
#print(df2)
#print(df2.columns)
#print(df2.index)
#print(df2.values)
#select by location label
#print(df.loc['20190910'])
#print(df.loc['20190910', ['a']])
#select by index location
#print(df.iloc[3][2])
'''
df.iloc[1, 1] = np.nan
df.iloc[2, 2] = np.nan
print(df)
print(df.fillna((999)))
print(df.dropna(axis = 0, how='any')) #0 means row, 1 means column/// any all
'''
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*2, columns=['a','b','c','d'])
#concat0 纵 1 heng
res = pd.concat([df1, df2, df3], axis=0)
print(res) |
import itertools
flatten_iter = itertools.chain.from_iterable
def factors(n):
return list(set(flatten_iter((i, n//i)
for i in range(1, int(n**0.5)+1) if n % i == 0)))
def is_prime(n):
if factors(n)==[1,n]:
return True
else:
return False
def summation(numbers):
x=0
for a in numbers:
print (a)
x+=a
return x
def getPrimes():
li=[]
for a in range(1,10000000000000000):
if is_prime(a):
li.append(a)
if len(li)==10001:
break
return li
def main():
primeNos= getPrimes()
print (primeNos[10000])
main()
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from .models import Category, Goal, Step, Reward, Profile
from .forms import CatForm, GoalForm, StepForm, RewardForm
# Create your views here.
def index(request):
return render(request, 'goals/index.html')
@login_required
def getrewards(request):
#rewardlist=Reward.objects.all()
userrewards=Reward.objects.filter(user=request.user)
return render(request, 'goals/rewards.html' , {'userrewards' : userrewards})
@login_required
def getcategories(request):
#categorylist=Category.objects.all()
usercats=Category.objects.filter(user=request.user)
return render(request, 'goals/categories.html' ,{'usercats' : usercats})
#To display all the goals within 1 category:
# allgoals=Goal.objects.all
# cat1 = Goal.objects.filter(category=1)
# return render(request, 'goals/cat1.html' , {'catgoals' : catgoals})
@login_required
def catgoals(request, id):
thiscat = get_object_or_404(Category, pk=id)
catgoals=Goal.objects.filter(category=id)
context={
'thiscat' : thiscat,
'catgoals' : catgoals,
}
return render(request, 'goals/catgoals.html', context=context)
@login_required
def gsteps(request, id):
thisgoal= get_object_or_404(Goal, pk=id)
gsteps=Step.objects.filter(goal=id)
context={
'thisgoal' : thisgoal,
'gsteps' : gsteps,
}
return render(request, 'goals/gsteps.html', context=context)
@login_required
def formsuccess(request):
response=redirect('goals/formsuccess.html')
return response
@login_required
def newcat(request):
form=CatForm
if request.method=='POST':
form=CatForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save
return render(request, 'goals/formsuccess.html')
else:
form=CatForm()
return render(request, 'goals/newcat.html', {'form' : form})
@login_required
def newgoal(request):
form=GoalForm()
if request.method=='POST':
form=GoalForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save
return render(request, 'goals/formsuccess.html')
else:
form=GoalForm()
return render(request, 'goals/newgoal.html', {'form': form})
@login_required
def newstep(request):
form=StepForm()
if request.method=='POST':
form=StepForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save
return render(request, 'goals/formsuccess.html')
else:
form=StepForm()
return render(request, 'goals/newstep.html', {'form': form})
@login_required
def newreward(request):
form=RewardForm()
if request.method=='POST':
form=RewardForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save
return render(request, 'goals/formsuccess.html')
else:
form=RewardForm()
return render(request, 'goals/newreward.html', {'form': form})
def loginmessage(request):
return render(request, 'goals/loginmessage.html')
def logoutmessage(request):
return render(request, 'goals/logoutmessage.html') |
from django.db import models
from datetime import datetime
class Realtor(models.Model):
name = models.CharField(max_length=60, verbose_name='Name')
photo = models.ImageField(upload_to='photos/%Y/%m/%d')
description = models.TextField(blank=True, verbose_name='Description')
phone = models.CharField(max_length=11, verbose_name='Phone')
email = models.CharField(max_length=40, verbose_name='Email')
is_mvp = models.BooleanField(default=False, verbose_name='MVP')
hire_date = models.DateTimeField(default=datetime.now, blank=True, verbose_name='Hire Date')
def __str__(self):
return self.name
|
from architectures.core import Graph, Cluster, Node, Edge, Flow
from architectures.themes import Default, LightMode
from architectures.providers.azure.general import Computer
from architectures.providers.azure.compute import VirtualMachineWindows
from architectures.providers.azure.storage import ManagedDiskStandardHdd, StorageAccountBlob
from architectures.providers.azure.security import KeyVault
from architectures.providers.azure.networking import NetworkSecurityGroupClassic, VirtualNetwork, VirtualSubnet
from architectures.providers.azure.management import AzureMonitor
from architectures.providers.azure.deployment import AzureRepo
from architectures.providers.azure.identity import AzureActiveDirectory
with Graph("Jenkins Server on Azure", theme=LightMode()):
with Cluster("Virtual Network") as virtual_network_cluster:
with Cluster("Subnet") as subnet_cluster:
NetworkSecurityGroupClassic("NSG", width=".7")
with Cluster("Scaled Agents") as scaled_agents_cluster:
vm1 = VirtualMachineWindows("Build VM")
vm2 = VirtualMachineWindows("Build VM")
vm3 = VirtualMachineWindows("Build VM")
agent_pool = [vm1, vm2, vm3]
with Cluster(hide_border=True):
jenkins_server = VirtualMachineWindows("Jenkins Server")
computer = Computer()
active_directory = AzureActiveDirectory()
source_control = AzureRepo()
managed_discs = ManagedDiskStandardHdd()
monitor = AzureMonitor()
key_vault = KeyVault()
blob_storage = StorageAccountBlob()
Flow([computer, jenkins_server, [active_directory, managed_discs, scaled_agents_cluster, monitor]])
Edge(vm3, [key_vault, blob_storage], ltail=scaled_agents_cluster.id)
Edge(source_control, jenkins_server)
Flow([vm1, vm2, vm3], style="invis")
|
from Cb_constants import CbServer
from bucket_collections.collections_base import CollectionBase
from bucket_utils.bucket_ready_functions import BucketUtils
from cb_tools.cbstats import Cbstats
from couchbase_helper.documentgenerator import doc_generator
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
from BucketLib.BucketOperations import BucketHelper
from cb_tools.cbstats import Cbstats
class OpsChangeCasTests(CollectionBase):
def setUp(self):
super(OpsChangeCasTests, self).setUp()
self.bucket = self.bucket_util.buckets[0]
# To override default num_items to '0'
self.num_items = self.input.param("num_items", 10)
self.key = "test_collections"
self.doc_size = self.input.param("doc_size", 256)
self.doc_ops = self.input.param("doc_ops", None)
self.mutate_times = self.input.param("mutate_times", 10)
self.expire_time = self.input.param("expire_time", 5)
if self.doc_ops is not None:
self.doc_ops = self.doc_ops.split(";")
def verify_cas(self, ops, generator, scope, collection):
"""
Verify CAS value manipulation.
For update we use the latest CAS value return by set()
to do the mutation again to see if there is any exceptions.
We should be able to mutate that item with the latest CAS value.
For delete(), after it is called, we try to mutate that item with the
cas value returned by delete(). We should see SDK Error.
Otherwise the test should fail.
For expire, We want to verify using the latest CAS value of that item
can not mutate it because it is expired already.
"""
for bucket in self.bucket_util.buckets:
client = SDKClient([self.cluster.master], bucket)
client.select_collection(scope, collection)
self.log.info("CAS test on collection %s: %s"
% (scope, collection))
gen = generator
while gen.has_next():
key, value = gen.next()
vb_of_key = self.bucket_util.get_vbucket_num_for_key(key)
active_node_ip = None
for node_ip in self.shell_conn.keys():
if vb_of_key in self.vb_details[node_ip]["active"]:
active_node_ip = node_ip
break
self.log.info("Performing %s on key %s" % (ops, key))
if ops in ["update", "touch"]:
for x in range(self.mutate_times):
old_cas = client.crud("read", key, timeout=10)["cas"]
if ops == 'update':
result = client.crud(
"replace", key, value,
durability=self.durability_level,
cas=old_cas)
else:
prev_exp = 0
for exp in [0, 60, 0, 0]:
result = client.touch(
key, exp,
durability=self.durability_level,
timeout=self.sdk_timeout)
if exp == prev_exp:
if result["cas"] != old_cas:
self.log_failure(
"CAS updated for "
"touch with same exp: %s"
% result)
else:
if result["cas"] == old_cas:
self.log_failure(
"CAS not updated %s == %s"
% (old_cas, result["cas"]))
old_cas = result["cas"]
prev_exp = exp
if result["status"] is False:
client.close()
self.log_failure("Touch / replace with cas failed")
return
new_cas = result["cas"]
if ops == 'update':
if old_cas == new_cas:
self.log_failure("CAS old (%s) == new (%s)"
% (old_cas, new_cas))
if result["value"] != value:
self.log_failure("Value mismatch. "
"%s != %s"
% (result["value"], value))
else:
self.log.debug(
"Mutate %s with CAS %s successfully! "
"Current CAS: %s"
% (key, old_cas, new_cas))
active_read = client.crud("read", key,
timeout=self.sdk_timeout)
active_cas = active_read["cas"]
replica_cas = -1
cas_in_active_node = \
self.cb_stat[active_node_ip].vbucket_details(
bucket.name)[str(vb_of_key)]["max_cas"]
if str(cas_in_active_node) != str(new_cas):
self.log_failure("CbStats CAS mismatch. %s != %s"
% (cas_in_active_node, new_cas))
poll_count = 0
max_retry = 5
while poll_count < max_retry:
replica_read = client.getFromAllReplica(key)[0]
replica_cas = replica_read["cas"]
if active_cas == replica_cas \
or self.durability_level:
break
poll_count = poll_count + 1
self.sleep(1, "Retry read CAS from replica..")
if active_cas != replica_cas:
self.log_failure("Replica cas mismatch. %s != %s"
% (new_cas, replica_cas))
elif ops == "delete":
old_cas = client.crud("read", key, timeout=10)["cas"]
result = client.crud("delete", key,
durability=self.durability_level,
timeout=self.sdk_timeout)
self.log.info("CAS after delete of key %s: %s"
% (key, result["cas"]))
result = client.crud("replace", key, "test",
durability=self.durability_level,
timeout=self.sdk_timeout,
cas=old_cas)
if result["status"] is True:
self.log_failure("The item should already be deleted")
if SDKException.DocumentNotFoundException \
not in result["error"]:
self.log_failure("Invalid Exception: %s" % result)
if result["cas"] != 0:
self.log_failure("Delete returned invalid cas: %s, "
"Expected 0" % result["cas"])
if result["cas"] == old_cas:
self.log_failure("Deleted doc returned old cas: %s "
% old_cas)
elif ops == "expire":
old_cas = client.crud("read", key, timeout=10)["cas"]
result = client.crud("touch", key, exp=self.expire_time)
if result["status"] is True:
if result["cas"] == old_cas:
self.log_failure("Touch failed to update CAS")
else:
self.log_failure("Touch operation failed")
self.sleep(self.expire_time+1, "Wait for item to expire")
result = client.crud("replace", key, "test",
durability=self.durability_level,
timeout=self.sdk_timeout,
cas=old_cas)
if result["status"] is True:
self.log_failure("Able to mutate %s with old cas: %s"
% (key, old_cas))
if SDKException.DocumentNotFoundException \
not in result["error"]:
self.log_failure("Invalid error after expiry: %s"
% result)
def ops_change_cas(self):
"""
CAS value manipulation by update, delete, expire test.
We load a certain number of items. Then for half of them, we use
MemcachedClient cas() method to mutate those item values in order
to change CAS value of those items.
We use MemcachedClient set() to set a quarter of the items expired.
We also use MemcachedClient delete() to delete a quarter of the items
"""
gen_update = doc_generator(self.key, 0, self.num_items/2,
doc_size=self.doc_size)
gen_delete = doc_generator(self.key,
self.num_items/2,
(self.num_items * 3 / 4),
doc_size=self.doc_size)
gen_expire = doc_generator(self.key,
(self.num_items * 3 / 4),
self.num_items,
doc_size=self.doc_size)
# Create cbstat objects
self.shell_conn = dict()
self.cb_stat = dict()
self.vb_details = dict()
for node in self.cluster_util.get_kv_nodes():
self.vb_details[node.ip] = dict()
self.vb_details[node.ip]["active"] = list()
self.vb_details[node.ip]["replica"] = list()
self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
self.vb_details[node.ip]["active"] = \
self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
self.vb_details[node.ip]["replica"] = \
self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")
collections = BucketUtils.get_random_collections(
self.bucket_util.buckets, 2, 2, 1)
for self.bucket_name, scope_dict in collections.iteritems():
bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
self.bucket_name)
scope_dict = scope_dict["scopes"]
for scope_name, collection_dict in scope_dict.items():
collection_dict = collection_dict["collections"]
for c_name, c_data in collection_dict.items():
if self.doc_ops is not None:
if "update" in self.doc_ops:
self.verify_cas("update", gen_update, scope_name, c_name)
if "touch" in self.doc_ops:
self.verify_cas("touch", gen_update, scope_name, c_name)
if "delete" in self.doc_ops:
self.verify_cas("delete", gen_delete, scope_name, c_name)
if "expire" in self.doc_ops:
self.verify_cas("expire", gen_expire, scope_name, c_name)
# Validate test failure
self.validate_test_failure()
def touch_test(self):
self.log.info("Loading bucket into DGM")
load_gen = doc_generator(self.key, 0, self.num_items,
doc_size=self.doc_size)
dgm_gen = doc_generator(
self.key, self.num_items, self.num_items+1)
dgm_task = self.task.async_load_gen_docs(
self.cluster, self.bucket_util.buckets[0], dgm_gen, "create", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
batch_size=10,
process_concurrency=4,
active_resident_threshold=self.active_resident_threshold)
self.task_manager.get_task_result(dgm_task)
self.log.info("Touch intial self.num_items docs which are "
"residing on disk due to DGM")
client = SDKClient([self.cluster.master],
self.bucket_util.buckets[0])
collections = BucketUtils.get_random_collections(
self.bucket_util.buckets, 2, 2, 1)
for self.bucket_name, scope_dict in collections.iteritems():
bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
self.bucket_name)
scope_dict = scope_dict["scopes"]
for scope_name, collection_dict in scope_dict.items():
collection_dict = collection_dict["collections"]
for c_name, c_data in collection_dict.items():
self.log.info("CAS test on collection %s: %s"
% (scope_name, c_name))
client.select_collection(scope_name, c_name)
while load_gen.has_next():
key, _ = load_gen.next()
result = client.crud("touch", key,
durability=self.durability_level,
timeout=self.sdk_timeout)
if result["status"] is not True:
self.log_failure("Touch on %s failed: %s" % (key, result))
client.close()
self.bucket_util._wait_for_stats_all_buckets()
# Validate doc count as per bucket collections
self.bucket_util.validate_docs_per_collections_all_buckets()
self.validate_test_failure()
def key_not_exists_test(self):
client = SDKClient([self.cluster.master], self.bucket)
collections = BucketUtils.get_random_collections(
[self.bucket], 1, 1, 1)
scope_dict = collections[self.bucket.name]["scopes"]
scope_name = scope_dict.keys()[0]
collection_name = scope_dict[scope_name]["collections"].keys()[0]
client.select_collection(scope_name, collection_name)
self.log.info("CAS test on collection %s: %s"
% (scope_name, collection_name))
load_gen = doc_generator(self.key, 0, self.num_items,
doc_size=256)
key, val = load_gen.next()
for _ in range(1500):
result = client.crud("create", key, val,
durability=self.durability_level,
timeout=self.sdk_timeout)
if result["status"] is False:
self.log_failure("Create failed: %s" % result)
create_cas = result["cas"]
# Delete and verify get fails
result = client.crud("delete", key,
durability=self.durability_level,
timeout=self.sdk_timeout)
if result["status"] is False:
self.log_failure("Delete failed: %s" % result)
elif result["cas"] <= create_cas:
self.log_failure("Delete returned invalid cas: %s" % result)
result = client.crud("read", key,
timeout=self.sdk_timeout)
if result["status"] is True:
self.log_failure("Read succeeded after delete: %s" % result)
elif SDKException.DocumentNotFoundException \
not in str(result["error"]):
self.log_failure("Invalid exception during read "
"for non-exists key: %s" % result)
# cas errors do not sleep the test for 10 seconds,
# plus we need to check that the correct error is being thrown
result = client.crud("replace", key, val, exp=60,
timeout=self.sdk_timeout,
cas=create_cas)
if result["status"] is True:
self.log_failure("Replace succeeded after delete: %s" % result)
if SDKException.DocumentNotFoundException \
not in str(result["error"]):
self.log_failure("Invalid exception during read "
"for non-exists key: %s" % result)
# Validate doc count as per bucket collections
self.bucket_util.validate_docs_per_collections_all_buckets()
self.validate_test_failure()
|
# Generated by Django 2.2 on 2021-03-10 02:57
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('CFP_Portal', '0015_auto_20210310_0105'),
]
operations = [
migrations.AddField(
model_name='person',
name='submission_date',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='review',
name='review_date',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
class Solution:
# @param S, a list of integer
# @return a list of lists of integer
def subsets(self, S):
n = len(S)
m = 1<<n
ans = []
S.sort()
for i in range(m):
temp = []
for j in range(n):
if i&(1<<j)>0:
temp.append(S[j])
ans.append(temp)
return ans
s = Solution()
print s.subsets([1]) |
import torchvision
models_dict = {
'inception_v3': torchvision.models.inception_v3(pretrained =True),
'resnet18': torchvision.models.resnet18(pretrained =True),
'resnet50': torchvision.models.resnet50(pretrained =True),
'resnet101': torchvision.models.resnet101(pretrained =True),
'googlenet': torchvision.models.googlenet(pretrained =True),
'deeplabv3_resnet50': torchvision.models.segmentation.deeplabv3_resnet50(pretrained= True)
} |
if __name__ == '__main__':
for i in range(32):
b = bin(i)[2:]
length = len(b)
if length != 5:
b = '0'*(5-length)+b
print(b) |
PasswordChangedEmail = \
"""Hi {0},
Your password has been successfully changed. If you did not request a password
change, please let an IMSS rep know immediately.
Thanks!
The Ruddock Website
"""
ResetPasswordEmail = \
"""Hi {0},
We have received a request to reset this account's password. If you didn't
request this change, let an IMSS rep know immediately. Otherwise, you can use
this link to change your password:
{1}
Your link will expire in {2}.
Thanks!
The Ruddock Website
"""
ResetPasswordSuccessfulEmail = \
"""Hi {0},
Your password has been successfully reset. If you did not request a password
reset, please let an IMSS rep know immediately.
Thanks!
The Ruddock Website
"""
AddedToWebsiteEmail = \
"""Hi {0},
You have been added to the Ruddock House Website. In order to access private
areas of our site, please complete registration by creating an account here:
{1}
If you have any questions or concerns, please find an IMSS rep or email us at
imss@ruddock.caltech.edu.
Thanks!
The Ruddock Website
"""
CreateAccountRequestEmail = \
"""Hi {0},
To create an account on the Ruddock Website, please use this link:
{1}
If you did not initiate this request, please let an IMSS rep know immediately.
Thanks!
The Ruddock Website
"""
CreateAccountSuccessfulEmail = \
"""Hi {0},
Your Ruddock Website account with the username "{1}" has been created. If this
was not you, please let an IMSS rep know immediately.
Thanks!
The Ruddock Website
"""
MembersAddedEmail = \
"""The following members have been added to the Ruddock Website:
{0}
and the following members were skipped (they were already in the database):
{1}
You should run the email update script to add the new members.
Thanks!
The Ruddock Website
"""
ErrorCaughtEmail = \
"""An exception was caught by the website. This is probably a result of a bad
server configuration or bugs in the code, so you should look into this. This
was the exception:
{0}
"""
|
from django.db import models
from account.models import StudentUser
from result.models import Class
from django.utils.translation import ugettext as _
class Attendance(models.Model):
DAY_OF_THE_WEEK = [
('1',_(u'Sunday')),
('2',_(u'Monday')),
('3',_(u'Tuesday')),
('4',_(u'Wednesday')),
('5',_(u'Thursday')),
('6',_(u'Friday')),
('7',_(u'Saturday')),
]
student = models.ForeignKey(StudentUser, on_delete=models.DO_NOTHING)
date_time = models.DateTimeField()
day = models.CharField(max_length=2, choices=DAY_OF_THE_WEEK)
std_class = models.ForeignKey(Class, on_delete=models.DO_NOTHING)
is_present = models.BooleanField()
def __str__(self):
return f'{self.student} {self.day} {self.date_time}'
class Meta:
unique_together = ('student', 'date_time', 'day', 'std_class')
|
#########################################################
#########################################################
##################### TWITTER #####################
#########################################################
#### in case of twitter API interrupting, this code ####
# still works (there are few try-exception in the code) #
#########################################################
import tweepy as tw
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import os
import numpy as np
import psycopg2
from nltk.corpus import stopwords
import re
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import pickle
from .. import UTILS_FOLDER_PATH
## PARAMS
TOPIC_MODELLING_CLASSFIER_PATH = UTILS_FOLDER_PATH+"topic_model.pickle"
TFIDF_PATH = UTILS_FOLDER_PATH+"tfidf.pickle"
consumer_key = "n1s4JvfETvz0hv8xsZxextI4K"
consumer_secret = "C1yHFjCW6ZIu3BjV9L5vj2huCEZW2jK14SQHkkxyXDx7RSmUf1"
access_key = "1367830484173066243-iiTH7gTAP7xiRVIAkk8zObE0q0d3xu"
access_secret = "b8MWjtlO52sEA5cgsoy4CfcS4nPKs5ar9x3yHDd1agBPE"
# TWITTER authentication :
try:
print('Connecting to TWITTER API ...')
auth = tw.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tw.API(auth, wait_on_rate_limit=True)
except:
print("Cannot connect to TWITTER API !")
# read the data (composed from our recipes steps and a foreign text)
def get_data():
# load the dataset
other_data = open('recipes/scripts/topic_modelling/corpus.txt').readlines()
print(f"other data count: {len(other_data)}")
conn = psycopg2.connect(
host="157.230.24.228",
database="cookix_db",
user="cookix_user_db",
password="f9d6UVP6gxEqueopMCiKdpjC0A5Pi5Ww",
)
cursor = conn.cursor()
cursor.execute("SELECT steps FROM recipes_recipe;")
recipes_steps = cursor.fetchall()
recipes_steps = [steps[0] for steps in recipes_steps]
recipes_steps = [steps for steps in recipes_steps if steps.strip() != ""]
print(f"steps recipes count: {len(recipes_steps)}")
cursor.execute("SELECT ingredients FROM recipes_recipe;")
# get all recipes ingredients
recipes_ingredients = cursor.fetchall()
recipes_ingredients = [ings[0] for ings in recipes_ingredients]
recipes_ingredients = [ings for ings in recipes_ingredients if ings.strip() != ""]
print(f"ingredients recipes count: {len(recipes_ingredients)}")
x_data = other_data + recipes_steps + recipes_ingredients
y_data = list(np.zeros((len(other_data)), dtype="int")) + list(
np.ones((len(recipes_steps) + len(recipes_ingredients)), dtype="int"))
return x_data, y_data
def clean_text(sentences):
processed_sen = []
stop_words = set(stopwords.words('english'))
for sen in sentences:
# keep only text
sentence = re.sub(r"[^a-z, ]+", ' ', sen.lower())
sentence = re.sub(r'\b\w\b', ' ', sentence.strip())
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence.strip())
# remove stopwords
#sen_tokens = [w for w in sentence.split(" ")]
#sentence = " ".join(sen_tokens)
processed_sen.append(sentence)
return processed_sen
# transform text sentences to TF IDF
def transform_to_tfidf(data):
tfidf = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=5000)
xdata_tfidf = tfidf.fit_transform(data)
print("output shape (sentences_count, features_count): ", xdata_tfidf.shape)
with open(TFIDF_PATH, 'wb') as file:
pickle.dump(tfidf, file, protocol=pickle.HIGHEST_PROTOCOL)
return xdata_tfidf
# train the topic
def train_topic_model_classifier(classifier_name=TOPIC_MODELLING_CLASSFIER_PATH):
x_data, y_data = get_data()
processed_data = clean_text(x_data)
tfidf_data = transform_to_tfidf(processed_data)
classifier = svm.SVC()
classifier.fit(tfidf_data, y_data)
with open(classifier_name, 'wb') as file:
pickle.dump(classifier, file, protocol=pickle.HIGHEST_PROTOCOL)
return classifier
## TOPIC MODELLING CLASSIFIER
def filter_recipes_topic(sentences):
tfidf_sentences = tfidf.transform(sentences).toarray()
predictions = classifier.predict(tfidf_sentences)
cooking_topic_sentences = [sen for sen, pred in zip(sentences, predictions) if pred == 1]
return cooking_topic_sentences
# return positive tweets percentage and count
def get_users_feedbacks(keywords, num_items=100, sentiment_pct = 0.01):
try:
tweets = tw.Cursor(api.search,
q=keywords,
lang="en",
since='2020-11-01').items(num_items)
all_tweets = [tweet.text for tweet in tweets]
# remove duplicated tweets
all_tweets = clean_text(all_tweets)
all_tweets = list(set(all_tweets))
cooking_sentences = filter_recipes_topic(all_tweets)
sentiment_analyser = SentimentIntensityAnalyzer()
pos_sen = 0
neg_sen = 0
for sen in cooking_sentences:
result = sentiment_analyser.polarity_scores(sen) # returns ex: {'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}
if result['pos'] > sentiment_pct: pos_sen += 1
elif result['neg'] > sentiment_pct: neg_sen += 1
count = pos_sen + neg_sen
print(count)
if count > 0:
return int((pos_sen / count) * 100), pos_sen
else:
return 0, 0
except:
return 0, 0
if not os.path.isfile(TOPIC_MODELLING_CLASSFIER_PATH):
train_topic_model_classifier()
else:
print("loading topic modeling classifier ...")
with open(TFIDF_PATH, 'rb') as file:
tfidf = pickle.load(file)
with open(TOPIC_MODELLING_CLASSFIER_PATH, 'rb') as file:
classifier = pickle.load(file)
|
import os
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
def filter_file(filename,filter_path):
all_data = pd.read_excel(filename)
a_list = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8']
indexes = False
for item in a_list:
indexes = ((all_data.loc[:, item] > 100) | (all_data.loc[:, item] < 0) ) | indexes
error_a_data = all_data[indexes]
error_a_file = os.path.join(filter_path,'errors_a.xlsx')
error_a_data.to_excel(error_a_file,index=False)
error_b_data = all_data[pd.isnull(all_data.loc[:,'B7'])]
error_b_file = os.path.join(filter_path, 'errors_b.xlsx')
error_b_data.to_excel(error_b_file, index=False)
# 修正数值B7
length = len(all_data)
for k in range(length):
if pd.isnull(all_data.loc[k,'B7']):
all_data.loc[k, 'B7'] = 0
# 修正数值a
for k in range(length):
for a_tag in a_list:
if all_data.loc[k,a_tag]>100:
all_data.loc[k,a_tag] = all_data.loc[k,a_tag]/10
filter_file1 = os.path.join(filter_path,'filter_data1.xlsx')
all_data.to_excel(filter_file1, index=False)
def filter_split_file(filename,filter_path):
all_data = pd.read_excel(filename)
for k in range(1,4):
type_id_data = all_data[all_data['品牌类型'] == k]
type_id_file = os.path.join(filter_path,'type_%d.xlsx'%k)
type_id_data.to_excel(type_id_file, index=False)
def create_dataset(filename,processed_path,percentage):
# 准备数据集
all_data = pd.read_excel(filename)
# 标签a数据集准备
a_list = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8']
# 标签b连续型数据集准备
b_z_score = ['B2','B4','B8','B10','B13','B14','B15']
b_max_min = ['B5','B7']
b_other = ['B16','B17']
b_c_list = ['B2', 'B4', 'B5', 'B7', 'B8', 'B10', 'B13', 'B14', 'B15', 'B16', 'B17']
# 标签b离散型数据集准备
b_d_list = ['B1', 'B3', 'B6', 'B9', 'B11', 'B12']
split_path = os.path.join(processed_path, 'train')
for num in range(1,4):
data = all_data[all_data['品牌类型'] == num]
data_length = len(data)
pred_data = data.loc[:,'购买意愿']
a_data = data.loc[:, a_list]
a_data = a_data / 100.0
b_c_data = data.loc[:,b_c_list]
b_c_data.loc[:, b_max_min] = (b_c_data.loc[:, b_max_min]-b_c_data.loc[:, b_max_min].min())\
/(b_c_data.loc[:, b_max_min].max()-b_c_data.loc[:, b_max_min].min())
b_c_data.loc[:,b_z_score] = (b_c_data.loc[:,b_z_score]-b_c_data.loc[:,b_z_score].mean())\
/b_c_data.loc[:,b_z_score].std()
b_c_data.loc[:,b_other] /= 100
b_d_data = data.loc[:, b_d_list]
for index,item in b_d_data.iterrows():
value = b_d_data.loc[index, 'B9']
if value == 8:
b_d_data.loc[index, 'B9'] = 7
b_d_data = b_d_data - 1
# 将数据分为训练数据和测试数据
train_len = int(percentage*data_length)
# 将数据打乱
pred_data = pred_data.values.reshape(data_length,1)
values = [a_data.values,b_c_data.values,b_d_data.values,pred_data]
output = np.hstack(values)
output = shuffle(output)
# 分开训练数据和测试数据
a_len = len(a_list)
b_c_len = len(b_c_list)
b_d_len = len(b_d_list)
a_data = output[:,:a_len]
b_c_data = output[:,a_len:a_len+b_c_len]
b_d_data = output[:,a_len+b_c_len:a_len+b_c_len+b_d_len]
pred_data = output[:,-1].reshape(data_length,1)
train_a_data = a_data[:train_len]
test_a_data = a_data[train_len:]
train_b_c_data = b_c_data[:train_len]
test_b_c_data = b_c_data[train_len:]
train_b_d_data = b_d_data[:train_len]
test_b_d_data = b_d_data[train_len:]
train_pred_data = pred_data[:train_len]
test_pred_data = pred_data[train_len:]
train_data = {"X":[train_a_data,train_b_c_data,train_b_d_data],
"Y":train_pred_data,
"length":len(train_pred_data)}
test_data = {"X":[test_a_data,test_b_c_data,test_b_d_data],
"Y":test_pred_data,
"length":len(test_pred_data)}
print("train dataset length:",len(train_pred_data))
print("test dataset length:",len(test_pred_data))
filename = os.path.join(split_path,"dataset_type%d.npz"%num)
np.savez(filename,train_data=train_data,test_data=test_data)
def create_all_dataset(filename,processed_path,percentage):
# 准备数据集
all_data = pd.read_excel(filename)
# 标签a数据集准备
a_list = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8']
# 标签b连续型数据集准备
b_z_score = ['B2', 'B4', 'B8', 'B10', 'B13', 'B14', 'B15']
b_max_min = ['B5', 'B7']
b_other = ['B16', 'B17']
b_c_list = ['B2', 'B4', 'B5', 'B7', 'B8', 'B10', 'B13', 'B14', 'B15', 'B16', 'B17']
# 标签b离散型数据集准备
b_d_list = ['B1', 'B3', 'B6', 'B9', 'B11', 'B12','品牌类型']
split_path = os.path.join(processed_path, 'train')
data_length = len(all_data)
pred_data = all_data.loc[:, '购买意愿']
user_indexes = all_data.loc[:,'目标客户编号']
a_data = all_data.loc[:, a_list]
a_data = a_data / 100.0
b_c_data = all_data.loc[:, b_c_list]
b_c_data.loc[:, b_max_min] = (b_c_data.loc[:, b_max_min] - b_c_data.loc[:, b_max_min].min()) \
/ (b_c_data.loc[:, b_max_min].max() - b_c_data.loc[:, b_max_min].min())
b_c_data.loc[:, b_z_score] = (b_c_data.loc[:, b_z_score] - b_c_data.loc[:, b_z_score].mean()) \
/ b_c_data.loc[:, b_z_score].std()
b_c_data.loc[:, b_other] /= 100
b_d_data = all_data.loc[:, b_d_list]
for index, item in b_d_data.iterrows():
value = b_d_data.loc[index, 'B9']
if value == 8:
b_d_data.loc[index, 'B9'] = 7
b_d_data = b_d_data - 1
# 将数据分为训练数据和测试数据
train_len = int(percentage * data_length)
# 将数据打乱
pred_data = pred_data.values.reshape(data_length, 1)
user_indexes = user_indexes.values.reshape(data_length, 1)
values = [a_data.values, b_c_data.values, b_d_data.values, pred_data,user_indexes]
output = np.hstack(values)
output = shuffle(output)
# 分开训练数据和测试数据
a_len = len(a_list)
b_c_len = len(b_c_list)
b_d_len = len(b_d_list)
a_data = output[:, :a_len]
b_c_data = output[:, a_len:a_len + b_c_len]
b_d_data = output[:, a_len + b_c_len:a_len + b_c_len + b_d_len]
pred_data = output[:, -2].reshape(data_length, 1)
user_indexes = output[:, -1].reshape(data_length, 1)
train_a_data = a_data[:train_len]
test_a_data = a_data[train_len:]
train_b_c_data = b_c_data[:train_len]
test_b_c_data = b_c_data[train_len:]
train_b_d_data = b_d_data[:train_len]
test_b_d_data = b_d_data[train_len:]
train_pred_data = pred_data[:train_len]
train_indexes = user_indexes[:train_len]
test_pred_data = pred_data[train_len:]
test_indexes = user_indexes[train_len:]
train_data = {"X": [train_a_data, train_b_c_data, train_b_d_data],
"Y": train_pred_data,
"length": len(train_pred_data),
'index':train_indexes}
test_data = {"X": [test_a_data, test_b_c_data, test_b_d_data],
"Y": test_pred_data,
"length": len(test_pred_data),
'index':test_indexes}
print("train dataset length:", len(train_pred_data))
print("test dataset length:", len(test_pred_data))
filename = os.path.join(split_path, "dataset.npz")
np.savez(filename, train_data=train_data, test_data=test_data)
def create_test_dataset(filename,processed_path):
# 准备数据集
all_data = pd.read_excel(filename)
# 标签a数据集准备
a_list = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8']
# 标签b连续型数据集准备
b_z_score = ['B2', 'B4', 'B8', 'B10', 'B13', 'B14', 'B15']
b_max_min = ['B5', 'B7']
b_other = ['B16', 'B17']
b_c_list = ['B2', 'B4', 'B5', 'B7', 'B8', 'B10', 'B13', 'B14', 'B15', 'B16', 'B17']
# 标签b离散型数据集准备
b_d_list = ['B1', 'B3', 'B6', 'B9', 'B11', 'B12', '品牌编号 ']
user_indexes = all_data.loc[:, '客户编号']
a_data = all_data.loc[:, a_list]
a_data = a_data / 100.0
b_c_data = all_data.loc[:, b_c_list]
b_c_data.loc[:, b_max_min] = (b_c_data.loc[:, b_max_min] - b_c_data.loc[:, b_max_min].min()) \
/ (b_c_data.loc[:, b_max_min].max() - b_c_data.loc[:, b_max_min].min())
b_c_data.loc[:, b_z_score] = (b_c_data.loc[:, b_z_score] - b_c_data.loc[:, b_z_score].mean()) \
/ b_c_data.loc[:, b_z_score].std()
b_c_data.loc[:, b_other] /= 100
b_d_data = all_data.loc[:, b_d_list]
for index, item in b_d_data.iterrows():
value = b_d_data.loc[index, 'B9']
if value == 8:
b_d_data.loc[index, 'B9'] = 7
b_d_data = b_d_data - 1
a_data = a_data.values
b_c_data = b_c_data.values
b_d_data = b_d_data.values
user_indexes = user_indexes.values
test_data = {"X": [a_data, b_c_data,b_d_data],
"length": len(a_data),
'index': user_indexes}
split_path = os.path.join(processed_path, 'train')
filename = os.path.join(split_path, "test_dataset.npz")
np.savez(filename, test_data = test_data)
|
from setuptools import setup, find_packages, Extension
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="dnn_from_scratch",
version="0.1.dev1",
author="Shivam Shrirao",
author_email="shivamshrirao@gmail.com",
description="A high level deep learning library for Convolutional Neural Networks,GANs and more, made from scratch(numpy/cupy implementation).",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ShivamShrirao/dnn_from_scratch",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Development Status :: 1 - Planning",
"Environment :: GPU :: NVIDIA CUDA",
],
python_requires='>=3.6',
package_data={"": ["libctake.so"]}
) |
from rest_framework import serializers
from .models import *
from django.db import models
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Event
fields = ('id', 'name', 'date', 'description' , 'image')
class TrackSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Track
fields = ('id', 'name', 'status' )
class NewsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = News
fields = ('id', 'title', 'date', 'description','image')
class AdvertisingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Advertising
fields = ('id', 'name', 'type', 'time', 'file')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name')
class ShopSerializer(serializers.ModelSerializer):
category = serializers.StringRelatedField(source='category.name', read_only=True, many =False)
class Meta:
model = Shop
fields = ('id', 'name', 'category', 'description', 'image','location')
class PlayListAdvertisingSerializer(serializers.ModelSerializer):
advertisings = AdvertisingSerializer(many=True,read_only=True)
class Meta:
model = PlayListAdvertising
fields = ('id' , 'name','advertisings')
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 12:45:02 2019
@author: ZhuangChi
"""
"""
请你来实现一个 atoi 函数,使其能将字符串转换成整数。
首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。
当我们寻找到的第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字组合起来,作为该整数的正负号;假如第一个
非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成整数。
该字符串除了有效的整数部分之后也可能会存在多余的字符,这些字符可以被忽略,它们对于函数不应该造成影响。
注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换。
在任何情况下,若函数不能进行有效的转换时,请返回 0。
说明:
假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−2**31, 2**31 − 1]。如果数值超过这个范围,请返回
INT_MAX (2**31 − 1) 或 INT_MIN (−2**31) 。
示例 1:
输入: "42"
输出: 42
示例 2:
输入: " -42"
输出: -42
解释: 第一个非空白字符为 '-', 它是一个负号。
我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。
示例 3:
输入: "4193 with words"
输出: 4193
解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。
示例 4:
输入: "words and 987"
输出: 0
解释: 第一个非空字符是 'w', 但它不是数字或正、负号。
因此无法执行有效的转换。
示例 5:
输入: "-91283472332"
输出: -2147483648
解释: 数字 "-91283472332" 超过 32 位有符号整数范围。
因此返回 INT_MIN (−231) 。
"""
"""
执行结果:通过
执行用时 :36 ms, 在所有 Python 提交中击败了38.68%的用户
内存消耗 :11.7 MB, 在所有 Python 提交中击败了30.82%的用户
class Solution(object):
def myAtoi(self,s):
INT_MIN = -2**31
INT_MAX = 2**31-1
result = ''
s = s.strip(' ')
if s == '':
return 0
i=0
if ord(s[i])== 45 or ord(s[i])== 43 or 48<=ord(s[i])<=57:
result += s[i]
i+=1
else:
return 0
for j in range(i,len(s)):
if 48<=ord(s[j])<=57:
result += s[j]
else:
break
if len(result)==1:
if result[0]=='-' or result[0]=='+':
return 0
else:
return result
if result[0]=='-':
result = -(int(result[1:]))
elif result[0]=='+':
result = int(result[1:])
else:
result = int(result)
if result<-2**31:
return INT_MIN
elif result>2**31-1:
return INT_MAX
else:
return result
"""
"""
ord('a'):97
chr(97):'a'
"""
import re
class Solution:
def myAtoi(self, s: str) -> int:
return max(min(int(*re.findall('^[\+\-]?\d+', s.lstrip())), 2**31 - 1), -2**31)
|
from django.contrib import admin
from djcelery.admin import PeriodicTaskAdmin
from djcelery.models import PeriodicTask
class PeriodicTaskDbaas(PeriodicTaskAdmin):
actions = ['action_enable_tasks', 'action_disable_tasks']
def _set_tasks_status(self, queryset, status):
for periodic_task in queryset:
periodic_task.enabled = status
periodic_task.save()
def action_enable_tasks(self, request, queryset):
self._set_tasks_status(queryset, True)
action_enable_tasks.short_description = "Enable selected tasks"
def action_disable_tasks(self, request, queryset):
self._set_tasks_status(queryset, False)
action_disable_tasks.short_description = "Disable selected tasks"
admin.site.unregister(PeriodicTask)
admin.site.register(PeriodicTask, PeriodicTaskDbaas)
|
import os, pickle
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_samples, silhouette_score
from collections import Counter
from pathlib import PurePath
current_dir = os.path.realpath(__file__)
p = PurePath(current_dir)
#==============================================================================
df_results = pd.read_pickle(str(p.parents[6])+'/test/df_results.plk')
with open(str(p.parents[1])+'/dataset_per_cluster.pickle', 'rb') as handle:
dataset_per_cluster = pickle.load(handle)
with open(str(p.parents[1])+'/datasetIndex_per_cluster.pickle', 'rb') as handle:
cluster_indexes = pickle.load(handle)
with open(str(p.parents[1])+'/models_per_cluster_ROC.pickle', 'rb') as handle:
roc_indexes = pickle.load(handle)
#==============================================================================
# CLUSTER 1
#==============================================================================
#For the datasets in cluster 1, create a dataframe with their statistical
#and information-theoretic meta-features to perform clustering on:
cluster_1_data = df_results.iloc[cluster_indexes[1]]
cluster_1_data.index = list(np.arange(0,len(cluster_1_data.index)))
cluster_1_data_numpy = df_results.iloc[cluster_indexes[1], [7,9,11,18,19]].to_numpy()
cluster_1_data_numpy = StandardScaler().fit(cluster_1_data_numpy).transform(cluster_1_data_numpy)
#==============================================================================
#==========================SILHOUETTE METHOD===================================
#==============================================================================
range_n_clusters = [2, 3]
silhouette_averages = []
for n_clusters in range_n_clusters:
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(cluster_1_data_numpy)
silhouette_avg = silhouette_score(cluster_1_data_numpy, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
silhouette_averages.append(silhouette_avg)
sample_silhouette_values = silhouette_samples(cluster_1_data_numpy, cluster_labels)
#Silhouete graph
x = list(np.arange(2,4))
plt.xticks([2,3])
plt.xlabel('Number of clusters, $\it{k}$')
plt.ylabel("Average silhouette score")
plt.title("Silhouette analysis", weight = 'bold')
plt.axvline(x=2, linestyle='--', color='blue')
plt.plot(x, silhouette_averages) #=============> 2 sub-clusters
#==============================================================================
#==========================2-ND LEVEL CLUSTERING===============================
#==============================================================================
kmeans = KMeans(n_clusters = 2,
init = "k-means++",
n_init = 50,
max_iter = 500).fit(cluster_1_data_numpy)
print(kmeans.labels_)
clusters_1 = list(kmeans.labels_)
cluster_indexes_1 = {0: [], 1: []}
for index, value in enumerate(clusters_1):
if value == 0:
cluster_indexes_1[0].append(index)
elif value == 1:
cluster_indexes_1[1].append(index)
#==============================================================================
# BAR CHART FOR EACH CLUSTER - AUC-ROC
#==============================================================================
roc_indexes_1 = {0: [], 1: []}
for key in list(cluster_indexes_1.keys()):
for j in cluster_indexes_1[key]:
roc_indexes_1[key].append(cluster_1_data['Best model ROC'][j])
for key in list(roc_indexes_1.keys()):
plt.figure()
plt.bar(range(len(dict(Counter(roc_indexes_1[key])))), list(dict(Counter(roc_indexes_1[key])).values()), align='center', width = 0.25)
plt.xticks(range(len(dict(Counter(roc_indexes_1[key])))), list(dict(Counter(roc_indexes_1[key])).keys()), rotation=45)
plt.title("Sub-cluster {} of Cluster 1".format(key), weight = 'bold')
plt.ylabel("Counts of best-performing model (AUC-ROC)")
#==============================================================================
# Save the datasets that lie within each sub-cluster
#==============================================================================
with open(str(p.parents[1])+'/datasetIndex_per_cluster.pickle', 'rb') as handle:
indeces = pickle.load(handle)
cluster_1_data_ = df_results.iloc[indeces[1], [7,9,11,18,19]]
datasets_in_each_sub_cluster_1 = {0: [], 1: []}
for key in list(datasets_in_each_sub_cluster_1.keys()):
for j in cluster_indexes_1[key]:
datasets_in_each_sub_cluster_1[key].append(cluster_1_data_.index[j])
# with open('subclusters_of_cluster_1.pickle', 'wb') as handle:
# pickle.dump(datasets_in_each_sub_cluster_1, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
from datetime import date, datetime
from decimal import Decimal
from itertools import chain
from typing import (Any, Callable, Dict, Iterator, List, Optional, Set, Tuple,
Type, Union)
from uuid import UUID
from django.db.backends.sqlite3.base import DatabaseWrapper
from django.db.backends.utils import CursorWrapper
from django.db.models.base import Model
from django.db.models.expressions import (BaseExpression, Col, Expression,
OrderBy, RawSQL, SQLiteNumericMixin)
from django.db.models.fields import DateTimeCheckMixin, Field
from django.db.models.functions.text import Lower
from django.db.models.options import Options
from django.db.models.sql.query import Query, RawQuery
from django.utils.datastructures import ImmutableList
FORCE: Any
class SQLCompiler:
query: Any = ...
connection: Any = ...
using: Any = ...
quote_cache: Any = ...
select: Any = ...
annotation_col_map: Any = ...
klass_info: Any = ...
ordering_parts: Any = ...
def __init__(
self,
query: Union[Query, RawQuery],
connection: DatabaseWrapper,
using: Optional[str],
) -> None: ...
col_count: Any = ...
def setup_query(self) -> None: ...
has_extra_select: Any = ...
def pre_sql_setup(
self
) -> Tuple[
List[
Tuple[OrderBy, Tuple[str, Union[List[Any], Tuple[str, str]]], None]
],
List[Tuple[OrderBy, Tuple[str, List[Union[int, str]], bool]]],
List[Tuple[str, List[float]]],
]: ...
def get_group_by(
self,
select: List[
Tuple[
Union[BaseExpression, SQLiteNumericMixin],
Tuple[str, List[float]],
Optional[str],
]
],
order_by: List[Tuple[OrderBy, Tuple[str, List[Union[int, str]], bool]]],
) -> List[Tuple[str, List[float]]]: ...
def collapse_group_by(
self,
expressions: List[Expression],
having: Union[List[Expression], Tuple],
) -> List[Expression]: ...
def get_select(
self
) -> Tuple[
List[
Tuple[
Union[Expression, SQLiteNumericMixin],
Tuple[str, List[Union[int, str]]],
Optional[str],
]
],
Optional[
Dict[str, Any]
],
Dict[str, int],
]: ...
def get_order_by(
self
) -> List[Tuple[OrderBy, Tuple[str, List[Any], bool]]]: ...
def get_extra_select(
self,
order_by: List[Tuple[OrderBy, Tuple[str, List[Any], bool]]],
select: List[
Tuple[
Union[Expression, SQLiteNumericMixin],
Tuple[str, List[float]],
Optional[str],
]
],
) -> List[Tuple[OrderBy, Tuple[str, List[Any]], None]]: ...
def quote_name_unless_alias(self, name: str) -> str: ...
def compile(
self, node: Any, select_format: Any = ...
) -> Tuple[str, Union[List[Optional[int]], Tuple[int, int]]]: ...
def get_combinator_sql(
self, combinator: str, all: bool
) -> Tuple[List[str], Union[List[int], List[str]]]: ...
def as_sql(
self, with_limits: bool = ..., with_col_aliases: bool = ...
) -> Any: ...
def get_default_columns(
self,
start_alias: Optional[str] = ...,
opts: Optional[Options] = ...,
from_parent: Optional[Type[Model]] = ...,
) -> List[Col]: ...
def get_distinct(self) -> Tuple[List[Any], List[Any]]: ...
def find_ordering_name(
self,
name: str,
opts: Options,
alias: Optional[str] = ...,
default_order: str = ...,
already_seen: Optional[
Set[Tuple[Optional[Tuple[Tuple[str, str]]], Tuple[Tuple[str, str]]]]
] = ...,
) -> List[Tuple[OrderBy, bool]]: ...
def get_from_clause(self) -> Tuple[List[str], List[Union[int, str]]]: ...
def get_related_selections(
self,
select: List[Tuple[Expression, Optional[str]]],
opts: Optional[Options] = ...,
root_alias: Optional[str] = ...,
cur_depth: int = ...,
requested: Optional[
Union[Dict[str, Dict[str, Dict[str, Dict[Any, Any]]]], bool]
] = ...,
restricted: Optional[bool] = ...,
) -> List[Dict[str, Any]]: ...
def get_select_for_update_of_arguments(self): ...
def deferred_to_columns(self) -> Dict[Type[Model], Set[str]]: ...
def get_converters(
self, expressions: Union[List[RawSQL], List[SQLiteNumericMixin]]
) -> Dict[
int, Tuple[List[Callable], Union[Expression, SQLiteNumericMixin]]
]: ...
def apply_converters(
self,
rows: chain,
converters: Dict[
int, Tuple[List[Callable], Union[Expression, SQLiteNumericMixin]]
],
) -> Iterator[
Union[
List[Optional[Union[bytes, datetime, int, str]]],
List[Optional[Union[date, Decimal, float, str]]],
List[Optional[Union[datetime, float, str, UUID]]],
]
]: ...
def results_iter(
self,
results: Optional[
Union[Iterator[Any], List[List[Tuple[Union[int, str]]]]]
] = ...,
tuple_expected: bool = ...,
chunked_fetch: bool = ...,
chunk_size: int = ...,
) -> Union[Iterator[Any], chain, map]: ...
def has_results(self) -> bool: ...
def execute_sql(
self,
result_type: str = ...,
chunked_fetch: bool = ...,
chunk_size: int = ...,
) -> Optional[Union[Iterator[Any], CursorWrapper]]: ...
def as_subquery_condition(
self, alias: str, columns: List[str], compiler: SQLCompiler
) -> Tuple[str, Tuple]: ...
def explain_query(self) -> Iterator[str]: ...
class SQLInsertCompiler(SQLCompiler):
return_id: bool = ...
def field_as_sql(
self, field: Optional[Field], val: Optional[Union[Lower, float, str]]
) -> Tuple[str, Union[List[int], List[str]]]: ...
def prepare_value(
self, field: Field, value: Any
) -> Optional[Union[Lower, float, str]]: ...
def pre_save_val(self, field: Field, obj: Model) -> Any: ...
def assemble_as_sql(
self,
fields: Union[
List[None], List[DateTimeCheckMixin], List[Field], ImmutableList
],
value_rows: Union[
List[List[Optional[Union[Lower, int]]]], List[List[Union[int, str]]]
],
) -> Tuple[Tuple[Tuple[str]], List[List[Optional[Union[int, str]]]]]: ...
def as_sql(self) -> List[Tuple[str, Tuple[Union[float, str]]]]: ...
def execute_sql(self, return_id: Optional[bool] = ...) -> Any: ...
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self) -> Tuple[str, Tuple]: ...
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self) -> Tuple[str, Tuple]: ...
def execute_sql(self, result_type: str) -> int: ...
def pre_sql_setup(self) -> None: ...
class SQLAggregateCompiler(SQLCompiler):
col_count: Any = ...
def as_sql(self) -> Tuple[str, Tuple]: ...
def cursor_iter(
cursor: CursorWrapper,
sentinel: List[Any],
col_count: Optional[int],
itersize: int,
) -> Iterator[List[Tuple[Union[date, int]]]]: ...
|
# run this script to take a list of fighter names, create INSERT statements, and write them to a file
from get_data import *
with open('fighter_stats.csv', 'a') as f:
f.write('name, age, height, weight, reach, gym\n')
list_of_fighter_names = all_fighters()
for f in list_of_fighter_names:
try:
f_dict = find_data(f)
if f_dict != 6:
with open('uninserted_fighters.txt', 'a') as w:
w.write(f + '\n')
except IndexError:
print('Try adding \"_(fighter)\" to the end of the wiki link')
try:
df = find_data(f+"_(fighter)")
except:
print("")
print("Adding (fighter) didn't work for " + f +'. The name of the fighter has been saved to \"uninserted_list\" text')
print("")
with open('uninserted_fighters.txt', 'a') as w:
w.write(f + '\n')
except UnicodeEncodeError:
print('Normalize weird characters')
try:
normal = unicodedata.normalize('NFKD', f).encode('ASCII', 'ignore')
normal = normal.decode()
df = find_data(normal)
except:
print("")
print("Encoding did not work for "+ f +'. The name of the fighter has been saved to \"uninserted_list\" text')
print("")
with open('uninserted_fighters.txt', 'a') as w:
w.write(f + '\n')
except:
print('Unexpected error for ' + f + '. The name of the fighter has been saved to \"uninserted_list\" text')
with open('uninserted_fighters.txt', 'a') as w:
w.write(f + '\n') |
import math as m
from seqfold import dg, dg_cache, fold, Struct#download seqfold by JJ Timons from github
from typing import List
header=num=''
count=add=0
with open("input1.txt","r") as file1:#single line fasta file required
read=file1.read()
data=read.split("\n")
#print(data)# Check your process
for each in data:
if(">" in each):
header=each
#print(each)#check your process
else:
for i in range(0,len(each)-30,30):#window of 30 nucleotides because ribosmes shadow those many nucleotides
str=each[i:i+42:]#this will input the step size. you can change 42 to any value depending upon your requirement
#print(str)#check your process
dg(str, temp = 37.0)
structs: List[Struct] = fold(str)
num=sum(s.e for s in structs)
#print(num)#check your process
if(m.isinf(num)):
pass
else:
add+=num
#print(add)#check your process
count=count+1
if (count!=0):
str=format(float((add/count)),'.2f')
print(str)
add=count=0
with open("deltaG.txt","a") as file2:#output filename
file2.write(header+":"+str+"\n")#output style
else:
pass |
from django.contrib import admin
from .models import BillingProfil
admin.site.register(BillingProfil) |
from django.contrib.auth.models import AnonymousUser, User
from django.test import TestCase, RequestFactory
from.views import user_login, auth_view
class SimpleTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='sc13dad', email='ddal10@hotmail.co.uk', password='password')
def test_details(self):
request = self.factory.get('/accounts/login')
request.user = self.user
request.user = AnonymousUser()
response = auth_view(request)
response = user_login.as_view(request)
self.assertEqual(response.status_code, 200)
|
import pandas as pd
import numpy as np
from keras.layers import Input, Conv2D
from keras.models import Model
from PIL import Image
import matplotlib.pyplot as plt
input_file = ("/home/flipper/ananthsmap/req/27220156.csv")
SMAP_LABLE='SoilMoisture'
latD=6
lonD=6
varD=1
inputs = Input((latD,lonD,varD))
df = pd.read_csv(input_file)
#filter bad val -9999
df=df[df[SMAP_LABLE]>-2000]
print(df)
#get Original output data
orig_x= df[SMAP_LABLE].values
print('1d shape')
print(orig_x.shape)
soilm=orig_x.reshape(-1, 6)
#soilm = np.array(orig_x.reshape(latD,lonD))
print('2d shape')
print(soilm.shape)
print(soilm)
soilm = 255 * (1.0 - soilm)
soilm.resize((20,20))
im = Image.fromarray(soilm.astype(np.uint8), mode='L')
#im = im.resize((140, 140))
plt.imshow(im)
#im.show()
|
#!/usr/bin/env python
import argparse
import subprocess
header = """
import Pike.Pike
object prog extends Pike {
def main(args: Array[String]): Unit = {
"""
closer = """
run
}
}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("file")
parser.add_argument("-j", "--jar", default="pike_2.9.1-1.0.jar")
args = parser.parse_args()
with open(args.file, 'r') as f:
read_data = f.read()
newfile = args.file + ".scala"
with open(newfile, 'w') as f:
f.write(header)
f.write(read_data)
f.write(closer)
subprocess.call(["scala", "-cp", args.jar, newfile])
if __name__ == '__main__':
main()
|
import argparse
import json
from collections import defaultdict
import serifxml3
def parse_args():
parser = argparse.ArgumentParser(description="convert serifxml to jsonlines format")
parser.add_argument("-i", "--input_serifxml_list", type=str, required=True, help="input serifxml filepaths list")
parser.add_argument("-o", "--output_jsonlines", type=str, required=True, help="output jsonlines filepath")
args = parser.parse_args()
return args
# def sent_level_token_offsets_to_doc_level_token_offsets(serif_doc):
# offset_map = dict()
# c = 0
# for i, s in enumerate(serif_doc.sentences):
# for j, t in enumerate(s.token_sequence):
# offset_map[(i,j)] = c
# c += 1
# return offset_map
def sent_level_token_offsets_to_corpus_level_token_offsets(serif_docs):
offset_map = dict()
count = 0
for i, serif_doc in enumerate(serif_docs):
for j, sentence in enumerate(serif_doc.sentences):
for k, token in enumerate(sentence.token_sequence):
offset_map[(serif_doc.docid, j, k)] = (count, token.text)
count += 1
assert len(offset_map.keys()) == count
return offset_map
def main():
args = parse_args()
with open(args.input_serifxml_list, "r") as f:
serifxml_filepaths = [l.strip() for l in f.readlines()]
serif_docs = sorted([serifxml3.Document(fp) for fp in serifxml_filepaths], key=lambda d: d.docid) # make sure docs are sorted!
assert len(set([d.docid for d in serif_docs])) == len(serif_docs)
offset_map = sent_level_token_offsets_to_corpus_level_token_offsets(serif_docs)
cross_document_id_to_event_mentions = defaultdict(list)
sentences = []
seen_event_mentions = set()
seen_event_mentions_full_refs = set()
INTRA_DOC_EVENT_CORPUS_LEVEL_ID_COUNTER = 0
for d in serif_docs:
sentences.extend([[t.text for t in s.token_sequence] for s in d.sentences])
for e in d.event_set:
if e.cross_document_instance_id is None: # intra-doc events in ECB+ serifxmls don't have cross_document_instance_id, so create one for them
e.cross_document_instance_id = "INTRA_DOC_EVENT_CORPUS_LEVEL_ID_{}_{}".format(d.docid, INTRA_DOC_EVENT_CORPUS_LEVEL_ID_COUNTER)
INTRA_DOC_EVENT_CORPUS_LEVEL_ID_COUNTER += 1
assert e.cross_document_instance_id is not None
for em in e.event_mentions:
start_token_index = em.owner_with_type("Sentence").token_sequence[int(em.semantic_phrase_start)].index()
end_token_index = em.owner_with_type("Sentence").token_sequence[int(em.semantic_phrase_end)].index()
start = offset_map[(d.docid, em.owner_with_type("Sentence").sent_no, start_token_index)][0]
end = offset_map[(d.docid, em.owner_with_type("Sentence").sent_no, end_token_index)][0]
assert(((d.docid, em.owner_with_type("Sentence").sent_no, start_token_index),
(d.docid, em.owner_with_type("Sentence").sent_no, end_token_index))) not in seen_event_mentions_full_refs
assert (start, end) not in seen_event_mentions
seen_event_mentions_full_refs.add(((d.docid, em.owner_with_type("Sentence").sent_no, start_token_index),
(d.docid, em.owner_with_type("Sentence").sent_no, end_token_index)))
seen_event_mentions.add((start, end))
cross_document_id_to_event_mentions[e.cross_document_instance_id].append([start, end])
# Second pass: create singleton clusters for each event mention that didn't participate in clustering.
# Note that nlplingo eventcoref_cross_document/decoder.py only returns coreferent spanpair predictions, which means
# that a number of event mentions in the original serifxmls might not have been present in the decoder's output;
# since they weren't deemed coreferent with any other event mentions by the decoder, we create singleton clusters
# for them.
cross_document_id_for_singleton = 0
for d in serif_docs:
for s in d.sentences:
for em in s.event_mention_set:
start_token_index = em.owner_with_type("Sentence").token_sequence[int(em.semantic_phrase_start)].index()
end_token_index = em.owner_with_type("Sentence").token_sequence[int(em.semantic_phrase_end)].index()
start = offset_map[(d.docid, em.owner_with_type("Sentence").sent_no, start_token_index)][0]
end = offset_map[(d.docid, em.owner_with_type("Sentence").sent_no, end_token_index)][0]
if (start, end) not in seen_event_mentions:
seen_event_mentions.add((start, end))
seen_event_mentions_full_refs.add(((d.docid, em.owner_with_type("Sentence").sent_no, start_token_index),
(d.docid, em.owner_with_type("Sentence").sent_no, end_token_index)))
cross_document_id_to_event_mentions["SINGLETON_{}".format(cross_document_id_for_singleton)].append([start, end])
cross_document_id_for_singleton += 1
print("# event mentions collected: {}".format(str(len(seen_event_mentions))))
# print(sorted(list(seen_event_mentions)))
# print(sorted(list(seen_event_mentions_full_refs)))
# print(sorted(list(set(offset_map.values()))))
clusters = [v for v in cross_document_id_to_event_mentions.values()]
assert len([em for c in clusters for em in c]) == len(seen_event_mentions_full_refs) == len(seen_event_mentions)
ret = {"doc_key": "CORPUS",
"predicted_clusters": clusters,
"sentences": sentences}
ljsons = [json.dumps(ret)]
with open(args.output_jsonlines, "w") as f:
f.write("\n".join(ljsons))
if __name__ == '__main__':
main()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.signals import user_logged_out
from django.dispatch import receiver
from django.contrib import messages
# Create your models here.
def img_upload_directory(instance, filename):
return f"profile/{instance.username}/{filename}"
class User(AbstractUser):
image = models.ImageField(default='media/images/ninja.png', blank=True, upload_to=img_upload_directory)
mobile_no = models.CharField(max_length=11,default='')
dob = models.DateField(default='')
location = models.CharField(max_length=100, default='')
@receiver(user_logged_out)
def on_user_logged_out(sender, request, **kwargs):
messages.add_message(request, messages.ERROR ,"You hvae successfully logged out") |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' ESP Websocket Connector '''
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class WebSocketPublisher(Connector):
'''
Publish websocket events
Parameters
----------
url : string
Specifies the URL for the WebSocket connection.
configUrl : string
Specifies the URL for the connector configuration file.
This configuration file contains information about the
transformation steps required to publish events.
contentType : string
Specifies XML or JSON as the type of content received
over the WebSocket connection.
sslCertificate : string, optional
Specifies the location of the SSL certificate to use
when connecting to a secure server.
sslPassphrase : string, optional
Specifies the password for the SSL certificate.
requestHeaders : string, optional
Specifies a comma-separated list of request headers to
send to the server. The list must consist of
name-value pairs in name:value format.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`WebSocketPublisher`
'''
connector_key = dict(cls='websocket', type='publish')
property_defs = dict(
url=prop('url', dtype='string', required=True),
configUrl=prop('configUrl', dtype='string', required=True),
contentType=prop('contentType', dtype='string', required=True),
sslCertificate=prop('sslCertificate', dtype='string'),
sslPassphrase=prop('sslPassphrase', dtype='string'),
requestHeaders=prop('requestHeaders', dtype='string'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, url, configUrl, contentType, name=None, is_active=None,
sslCertificate=None, sslPassphrase=None, requestHeaders=None,
maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'websocket', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['url',
'configUrl',
'contentType'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
import os
from flask import Flask
from flask import render_template
from flask_script import Manager
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask import Flask, render_template, session, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
basedir=os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:123456@localhost/test'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db=SQLAlchemy(app)
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
mail = Mail(app)
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
class Role(db.Model):
__tablename__='roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %s>' % self.name
class User(db.Model):
__tablename__='users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User> %s' % self.username
@app.route('/test/')
def test():
return render_template('test.html', current_time=datetime.utcnow())
@app.route('/mysqlquery')
def mysqlquery():
user_all=User.query.all()
print user_all
return render_template('mysqlquery.html', user_all=user_all)
@app.route('/bad/')
def badreq():
return render_template('bad.html')
@app.route('/user/')
@app.route('/user/<name>')
def user():
return render_template('user.html')
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username = form.name.data)
db.session.add(user)
db.session.commit()
session['known']= False
else:
session['known']=True
session['name'] = form.name.data
form.name.data=''
return redirect(url_for('index'))
return render_template("index.html", form=form, name=session.get('name'), known=session.get('known', False))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
def db_test():
db.drop_all()
db.create_all()
admin_role=Role(name='Admin')
mod_role=Role(name='Moderator')
user_role=Role(name='User')
user_john=User(username='john', role=admin_role)
user_susan=User(username='susan', role=user_role)
user_david=User(username='david', role=user_role)
print(admin_role.id)
print(mod_role.id)
print(user_role.id)
db.session.add(admin_role)
db.session.add(mod_role)
db.session.add(user_role)
db.session.add(user_john)
db.session.add(user_susan)
db.session.add(user_david)
db.session.commit()
print(admin_role.id)
print(mod_role.id)
print(user_role.id)
role_all=[]
role_all = Role.query.all()
print("all roles list: %s") %role_all
user_all=[]
user_all=User.query.filter_by(role=user_role).all()
print("all users:%s") %user_all
if __name__ == '__main__':
#db_test()
manager.run()
#db_test()
|
from lxml import etree
from fontFeatures import FontFeatures, Routine, Substitution
from babelfont import Babelfont
from fontFeatures.feaLib import FeaUnparser
from fontTools.feaLib.builder import Builder
from fontTools.ttLib import TTFont
from fontFeatures.ttLib import unparse
from Flux.computedroutine import ComputedRoutine
from Flux.dividerroutine import DividerRoutine
from io import StringIO as UnicodeIO
from Flux.UI.GlyphActions import GlyphAction
from Flux.UI.glyphpredicateeditor import GlyphClassPredicateTester, GlyphClassPredicate
from babelfont.variablefont import VariableFont
import os
class FluxProject:
@classmethod
def new(klass, fontfile, editor=None):
self = FluxProject()
self.fontfeatures = FontFeatures()
self.fontfile = fontfile
self.editor = editor
if not self._load_fontfile():
return
self.glyphclasses = {}
self.glyphactions = {}
self.debuggingText = ""
self.filename = None
if self.fontfile.endswith(".ttf") or self.fontfile.endswith(".otf"):
self._load_features_binary()
else:
self._load_features_source()
for groupname, contents in self.font.groups.items():
self.glyphclasses[groupname] = {
"type": "manual",
"contents": contents
}
self.fontfeatures.namedClasses.forceput(groupname, tuple(contents))
# Load up the anchors too
self._load_anchors()
return self
def __init__(self, file=None):
if not file:
return
self.filename = file
self.xml = etree.parse(file).getroot()
dirname = os.path.dirname(file)
self.fontfile = os.path.join(dirname,self.xml.find("source").get("file"))
self.fontfeatures = FontFeatures()
if not self._load_fontfile():
return
self.glyphactions = {}
self.xmlToFontFeatures()
text = self.xml.find("debuggingText")
if text is not None:
self.debuggingText = text.text
else:
self.debuggingText = ""
self.glyphclasses = {} # Will sync to fontFeatures when building
# XXX will it?
glyphclasses = self.xml.find("glyphclasses")
if glyphclasses is not None:
for c in glyphclasses:
thisclass = self.glyphclasses[c.get("name")] = {}
if c.get("automatic") == "true":
thisclass["type"] = "automatic"
thisclass["predicates"] = [ dict(p.items()) for p in c.findall("predicate") ]
self.fontfeatures.namedClasses[c.get("name")] = tuple(GlyphClassPredicateTester(self).test_all([
GlyphClassPredicate(x) for x in thisclass["predicates"]
]))
else:
thisclass["type"] = "manual"
thisclass["contents"] = [g.text for g in c]
self.fontfeatures.namedClasses[c.get("name")] = tuple([g.text for g in c])
# The font file is the authoritative source of the anchors, so load them
# from the font file on load, in case they have changed.
self._load_anchors()
self._load_glyphactions()
def _load_fontfile(self):
try:
if self.fontfile.endswith(".ufo") or self.fontfile.endswith("tf"):
# Single master workflow
self.font = Babelfont.open(self.fontfile)
self.variations = None
else:
self.variations = VariableFont(self.fontfile)
# We need a "scratch copy" because we will be trashing the
# glyph data with our interpolations
if len(self.variations.masters.keys()) == 1:
self.font = list(self.variations.masters.values())[0]
self.variations = None
else:
firstmaster = self.variations.designspace.sources[0].path
if firstmaster:
self.font = Babelfont.open(firstmaster)
else: # Glyphs, fontlab?
self.font = Babelfont.open(self.fontfile)
except Exception as e:
if self.editor:
self.editor.showError("Couldn't open %s: %s" % (self.fontfile, e))
else:
raise e
return False
return True
def _load_anchors(self):
for g in self.font:
for a in g.anchors:
if not a.name in self.fontfeatures.anchors:
self.fontfeatures.anchors[a.name] = {}
self.fontfeatures.anchors[a.name][g.name] = (a.x, a.y)
def _load_glyphactions(self):
glyphactions = self.xml.find("glyphactions")
if not glyphactions:
return
for xmlaction in glyphactions:
g = GlyphAction.fromXML(xmlaction)
self.glyphactions[g.glyph] = g
g.perform(self.font)
def _slotArray(self, el):
return [[g.text for g in slot.findall("glyph")] for slot in list(el)]
def xmlToFontFeatures(self):
routines = {}
warnings = []
for xmlroutine in self.xml.find("routines"):
if "computed" in xmlroutine.attrib:
r = ComputedRoutine.fromXML(xmlroutine)
r.project = self
elif "divider" in xmlroutine.attrib:
r = DividerRoutine.fromXML(xmlroutine)
else:
r = Routine.fromXML(xmlroutine)
routines[r.name] = r
self.fontfeatures.routines.append(r)
for xmlfeature in self.xml.find("features"):
# Temporary until we refactor fontfeatures
featurename = xmlfeature.get("name")
self.fontfeatures.features[featurename] = []
for r in xmlfeature:
routinename = r.get("name")
if routinename in routines:
self.fontfeatures.addFeature(featurename, [routines[routinename]])
else:
warnings.append("Lost routine %s referenced in feature %s" % (routinename, featurename))
return warnings # We don't do anything with them yet
def save(self, filename=None):
if not filename:
filename = self.filename
flux = etree.Element("flux")
etree.SubElement(flux, "source").set("file", self.fontfile)
etree.SubElement(flux, "debuggingText").text = self.debuggingText
glyphclasses = etree.SubElement(flux, "glyphclasses")
for k,v in self.glyphclasses.items():
self.serializeGlyphClass(glyphclasses, k, v)
# Plugins
# Features
features = etree.SubElement(flux, "features")
for k,v in self.fontfeatures.features.items():
f = etree.SubElement(features, "feature")
f.set("name", k)
for routine in v:
etree.SubElement(f, "routine").set("name", routine.name)
# Routines
routines = etree.SubElement(flux, "routines")
for r in self.fontfeatures.routines:
routines.append(r.toXML())
# Glyph actions
if self.glyphactions:
f = etree.SubElement(flux, "glyphactions")
for ga in self.glyphactions.values():
f.append(ga.toXML())
et = etree.ElementTree(flux)
with open(filename, "wb") as out:
et.write(out, pretty_print=True)
def serializeGlyphClass(self, element, name, value):
c = etree.SubElement(element, "class")
c.set("name", name)
if value["type"] == "automatic":
c.set("automatic", "true")
for pred in value["predicates"]:
pred_xml = etree.SubElement(c, "predicate")
for k, v in pred.items():
pred_xml.set(k, v)
else:
c.set("automatic", "false")
for glyph in value["contents"]:
etree.SubElement(c, "glyph").text = glyph
return c
def saveFEA(self, filename):
try:
asfea = self.fontfeatures.asFea()
with open(filename, "w") as out:
out.write(asfea)
return None
except Exception as e:
return str(e)
def loadFEA(self, filename):
unparsed = FeaUnparser(open(filename,"r"))
self.fontfeatures = unparsed.ff
def _load_features_binary(self):
tt = TTFont(self.fontfile)
self.fontfeatures = unparse(tt)
print(self.fontfeatures.features)
def _load_features_source(self):
if self.font.features and self.font.features.text:
try:
unparsed = FeaUnparser(self.font.features.text)
self.fontfeatures = unparsed.ff
except Exception as e:
print("Could not load feature file: %s" % e)
def saveOTF(self, filename):
try:
self.font.save(filename)
ttfont = TTFont(filename)
featurefile = UnicodeIO(self.fontfeatures.asFea())
builder = Builder(ttfont, featurefile)
catmap = { "base": 1, "ligature": 2, "mark": 3, "component": 4 }
for g in self.font:
if g.category in catmap:
builder.setGlyphClass_(None, g.name, catmap[g.category])
builder.build()
ttfont.save(filename)
except Exception as e:
print(e)
return str(e)
|
import logging
import boto3
from ....utils.get_sns_subscriptions import get_sns_subscriptions
LOG = logging.getLogger(__name__)
def destroy_sns_event(app_name, env, region):
""" Destroys all Lambda SNS subscription
Returns:
boolean: True if subscription destroyed successfully
"""
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_subscriptions = get_sns_subscriptions(app_name=app_name, env=env, region=region)
for subscription_arn in lambda_subscriptions:
sns_client.unsubscribe(
SubscriptionArn=subscription_arn
)
LOG.debug("Lambda SNS event deleted")
return True
|
"""
Copyright ©2020. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation
for educational, research, and not-for-profit purposes, without fee and without a
signed licensing agreement, is hereby granted, provided that the above copyright
notice, this paragraph and the following two paragraphs appear in all copies,
modifications, and distributions.
Contact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue,
Suite 510, Berkeley, CA 94720-1620, (510) 643-7201, otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED
"AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
ENHANCEMENTS, OR MODIFICATIONS.
"""
import json
import sys
from awsglue.context import GlueContext
from awsglue.dynamicframe import DynamicFrame
from awsglue.job import Job
from awsglue.utils import getResolvedOptions
import boto3
from pyspark.context import SparkContext
from pyspark.sql.types import StructType
# Pyspark Glue still uses python 2.7 on the AWS cluster while Nessie is running python on 3.6.
args = getResolvedOptions(
sys.argv,
[
'JOB_NAME',
'LRS_INCREMENTAL_TRANSIENT_BUCKET',
'LRS_CANVAS_CALIPER_SCHEMA_PATH',
'LRS_CANVAS_CALIPER_INPUT_DATA_PATH',
'LRS_GLUE_TEMP_DIR',
'LRS_CANVAS_CALIPER_EXPLODE_OUTPUT_PATH',
],
)
sc = SparkContext()
glue_context = GlueContext(sc)
spark = glue_context.spark_session
job = Job(glue_context)
job.init(args['JOB_NAME'], args)
lrs_transient_bucket = args['LRS_INCREMENTAL_TRANSIENT_BUCKET']
lrs_glue_temp_dir = args['LRS_GLUE_TEMP_DIR']
lrs_caliper_schema_path = args['LRS_CANVAS_CALIPER_SCHEMA_PATH']
lrs_canvas_caliper_input_path = args['LRS_CANVAS_CALIPER_INPUT_DATA_PATH']
lrs_canvas_caliper_explode_path = args['LRS_CANVAS_CALIPER_EXPLODE_OUTPUT_PATH']
# Import prepared canvas caliper json schema and convert to struct type that can be applied to spark dataframe as template
def import_caliper_schema(bucket, key):
s3 = boto3.client('s3', region_name='us-west-2')
json_file = s3.get_object(Bucket=bucket, Key=key)
json_object = json.load(json_file['Body'])
schema_struct = StructType.fromJson(json_object)
return schema_struct
# Relationalizes spark dataframe and exports to s3
def relationalize_and_export(statements_df):
# convert spark dataframe to glue dynamic frame
statement_dynamic_frame = DynamicFrame.fromDF(statements_df, glue_context, 'statement_dynamic_frame')
glue_temp_dir = 's3://{}/{}'.format(lrs_transient_bucket, lrs_glue_temp_dir)
# transform the dataframe using glue relationalize
statement_explode_df = statement_dynamic_frame.relationalize('root', glue_temp_dir)
statement_explode_df.keys()
lrs_explode_output_path = 's3://{}/{}'.format(lrs_transient_bucket, lrs_canvas_caliper_explode_path)
# write glue dynamic frame contents to s3 location as compressed json gzip files
glue_context.write_dynamic_frame.from_options(
frame=statement_explode_df,
connection_type='s3',
connection_options={'path': lrs_explode_output_path, 'compression': 'gzip'},
format='json',
transformation_ctx='datasink',
)
return
# Create a caliper schema struct that can be used as a template to create spark dataframes
# print 'Importing Caliper corrected schema from S3 and convert it to struct type'
caliper_schema_struct = import_caliper_schema(
args['LRS_INCREMENTAL_TRANSIENT_BUCKET'],
args['LRS_CANVAS_CALIPER_SCHEMA_PATH'],
)
# Apply prepared schema template on the incoming statements to create a spark dataframe
sys.stdout.write('Importing Caliper statements from S3 with the corrected schema')
lrs_caliper_input_data_path = 's3://{}/{}'.format(lrs_transient_bucket, lrs_canvas_caliper_input_path)
statements_df = spark.read.schema(caliper_schema_struct).json(lrs_caliper_input_data_path)
statements_df.printSchema()
# Verify inferred schema from spark process.
sys.stdout.write('Display inferred schema from the dataframe')
schema_json = statements_df.schema.json()
sys.stdout.write(schema_json)
# Convert the data to flat tables and export to S3 as compressed json gzip files.
sys.stdout.write('Exporting dynamic frame as json partitions in S3')
relationalize_and_export(statements_df)
job.commit()
|
#!/usr/bin/env python
import os
import sys
import re
if len(sys.argv)!= 3:
print("usage: filter.py intput output")
quit()
fin = open(sys.argv[1],'r')
fout = open(sys.argv[2],'w')
for line in fin:
if not re.match(r'^\s*$',line):
fout.write(line)
fin.close()
fout.close()
|
#!/usr/bin/env python3
# Renames all files given as arguments to "track#-title.ext".
# The track and title data is extracted from the files' id3 tags.
import sys, os
from mutagen.id3 import ID3
for file in sys.argv[1:]:
if not os.path.isfile(file):
print(f"Invalid path: {file}")
exit(1)
id3 = ID3(file)
title = str(id3.get("TIT2", "NO_TITLE"))
if "/" in title:
title = title.replace("/", "_")
try:
track = int(str(id3.get("TRCK", "NO_TRACK")))
except ValueError:
print(f"Track tag is not a number: {file}")
continue
dirname = os.path.dirname(file)
extension = os.path.splitext(file)[1]
os.rename(file, os.path.join(dirname, f"{track:02}-{title}{extension}"))
|
import pymysql as mysql
MYSQL_HOST = '117.16.123.11'
MYSQL_CONN = mysql.connect(
host=MYSQL_HOST,
port=3306,
user='root',
passwd='dkimsmu',
db='testDB',
charset='utf8'
)
def conn_mysqldb():
if not MYSQL_CONN.open:
MYSQL_CONN.ping(reconnect=True)
return MYSQL_CONN
conn = conn_mysqldb()
cursor = conn.cursor()
cursor.execute("select * from predictTBL")
row = cursor.fetchall()
for i in row:
print(i) |
import talib
import numpy
from numpy import genfromtxt
data = genfromtxt('15MinutesCandles.csv', delimiter=',')
# print(data)
close = data[:,4]
# print(close)
# close = numpy.random.random(100)
# print(close)
# sma = talib.SMA(close)
# print(sma)
rsi = talib.RSI(close)
print(rsi) |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from .robots import robots_txt
# http://stackoverflow.com/a/13186337
admin.site.login = login_required(admin.site.login)
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'hourglass.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', TemplateView.as_view(template_name='index.html'), name='index'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'),
name='about'),
url(r'^api/', include('api.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^tests/$', TemplateView.as_view(template_name='tests.html')),
url(r'^robots.txt$', robots_txt),
url(r'^auth/', include('uaa_client.urls', namespace='uaa_client')),
)
if settings.DEBUG:
import fake_uaa_provider.urls
urlpatterns += patterns('',
url(r'^', include(fake_uaa_provider.urls,
namespace='fake_uaa_provider')),
)
|
# Author:Cecilia
import os
import sys
from tcp_server import server
if __name__ == '__main__':
server.run() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 20:23:04 2019
@author: jasonmeverett
"""
from scipy.spatial.transform import Rotation as R
from numpy import *
from ananke.planets import *
def Rot_I_Perifocal(Om, i, om, degrees=True):
if degrees:
Om = Om * pi/180
i = i * pi/180
om = om * pi/180
R3 = R.from_dcm([
[cos(-om),sin(-om),0],
[-sin(-om),cos(-om),0],
[0,0,1]
]).inv()
R2 = R.from_dcm([
[1,0,0],
[0,cos(-i),sin(-i)],
[0,-sin(-i),cos(-i)]
]).inv()
R1 = R.from_dcm([
[cos(-Om),sin(-Om),0],
[-sin(-Om),cos(-Om),0],
[0,0,1]
]).inv()
return R3 * R2 * R1
# Get the inertial position of a landing site.
def Pos_LS(lon,lat,alt,R_eq=1738e3,degrees=False):
"""
Get the inertial position of a landing site based on planetary location.
Does not yet incorporate planetary rotation.
"""
if degrees == True:
lon = lon*pi/180
lat = lat*pi/180
# Grab the planetary rotation.
R_I_UEN = DCM_I_UEN(lon,lat)
# Based on altitude
X_UEN = array([R_eq + alt, 0, 0])
# Convert to inertial
X_I = R_I_UEN.inv().apply(X_UEN)
return X_I
# Construct a DCM that represents the transformation from a planetary inertial
# frame to an Up-East-North frame. Expects lat and lon in radians
def Rot_PF_UEN(lon,lat,degrees=False):
"""
Convert a latitude and a longitude to a UEN DCM.
X - Up
Y - East
Z - North
"""
if degrees == True:
lon = lon*pi/180
lat = lat*pi/180
# First rotation is longitude along the Z-axis.
R1 = R.from_dcm([
[cos(lon),sin(lon),0],
[-sin(lon),cos(lon),0],
[0,0,1]])
# Second rotation is negative latitude along the new Y-axis.
R2 = R.from_dcm([
[cos(lat),0,sin(lat)],
[0,1,0],
[-sin(lat),0,cos(lat)]])
# Combine rotations
return R2*R1
# Construct a DCM that represents the transformation from a planetary inertial
# frame to a planetary-fixed frame. Rotation around the Z-axis.
def Rot_I_PF(Om, ep, t,degrees=False):
"""
Convert a latitude and a longitude to a UEN DCM.
X - Meridian
Z - North Pole
Y - Z x X
"""
if degrees == True:
Om = Om*pi/180
# Calculate total rotation angle.
th = Om*(t-ep)
# First rotation is longitude along the Z-axis.
R1 = R.from_dcm([
[cos(th),sin(th),0],
[-sin(th),cos(th),0],
[0,0,1]])
# Combine rotations
return R1
|
import shelve
import requests
class Exchange:
file = 'exchange_db'
standart = ['EUR', 'USD', 'RUB', 'UAH']
currency = ["AED", "ARS", "AUD", "BGN", "BRL", "BSD", "CAD", "CHF", "CLP", "CNY", "COP", "CZK", "DKK", "DOP", "EGP", "EUR", "FJD", "GBP", "GTQ", "HKD", "HRK", "HUF", "IDR", "ILS", "INR", "ISK", "JPY", "KRW", "KZT", "MXN", "MYR", "NOK", "NZD", "PAB", "PEN", "PHP", "PKR", "PLN", "PYG", "RON", "RUB", "SAR", "SEK", "SGD", "THB", "TRY", "TWD", "UAH", "USD", "UYU", "VND", "ZAR"]
def exchange_run(self, base, dest, num, mmbr_id):
r = requests.get('https://api.exchangerate-api.com/v4/latest/'+base.upper())
exch_dict = eval(r.text)
dest = dest.upper()
num = num.replace(',', '.')
if dest in self.currency:
rate = exch_dict['rates'][dest]
sum = float(num) * float(rate)
result = str(num) +' '+ base.upper() + ' = ' + str(round(sum)) + ' ' + dest
return result
else:
rates_list = []
with shelve.open(self.file) as db:
if mmbr_id not in db:
db[mmbr_id] = self.standart
db_id = db[mmbr_id]
for i in db_id:
num = int(num)
rate = exch_dict['rates'][i]
sum = num * float(rate)
rates_list.append(i+' '+str(round(sum, 2)))
res = '\n'.join(rates_list)
db[mmbr_id] = db_id
return res
def exchange_add(self, base, mmbr_id):
mmbr_id = str(mmbr_id)
with shelve.open(self.file) as db:
if mmbr_id not in db:
db[mmbr_id] = self.standart
db_id = db[mmbr_id]
count = db_id.count(base.upper())
if count == 0:
db_id.append(base.upper())
add1 = ', '.join(db_id)
added = 'Валюты в вашем списке:\n'+add1
else:
added = 'Валюта уже есть в списке'
db[mmbr_id] = db_id
return added
def exchange_del(self, base, mmbr_id):
mmbr_id = str(mmbr_id)
with shelve.open(self.file) as db:
if mmbr_id not in db:
db[mmbr_id] = self.standart
db_id = db[mmbr_id]
try:
db_id.remove(base.upper())
remove = ', '.join(db_id)
removed = 'Валюты в вашем списке:\n'+remove
except ValueError:
removed = ('Валюты нет в списке')
db[mmbr_id] = db_id
return removed |
# -*- coding: utf-8 -*-
'''
One string is an anagram of another if the second is simply a rearrangement of the first. For example, 'heart' and 'earth' are anagrams.
The strings 'python' and 'typhon' are anagrams as well.
For the sake of simplicity, we will assume that the two strings in question are of equal length and that they are made up of symbols from the set of 26 lowercase alphabetic characters.
Our goal is to write a boolean function that will take two strings and return whether they are anagrams
'''
import time
def anagram(s1,s2):
'''
TC=n*(n+1)/2
O(n2)
'''
flag=True
if (len(s1)==len(s2)):
s1_list=list(s1)
for char in s2:
if char in s1_list:
char_pos=s1_list.index(char)
s1_list[char_pos]=None
else:
flag=False
break
else:
flag=False
return flag
def anagram_sort(s1,s2):
'''
Time Complexity depends on sort function.
Typically sort function's TC is O(n2) or O(nlogn)
'''
flag=True
if (len(s1)==len(s2)):
s1_list=list(s1)
s2_list=list(s2)
s1_list.sort()
s2_list.sort()
for i in range(len(s1_list)):
if(s1_list[i]!=s2_list[i]):
flag=False
break
else:
flag=False
return flag
def anagram_strike_off(s1,s2):
'''
O(n)
'''
flag=True
c=[0]*26
pos=0
if (len(s1)==len(s2)):
for char in s1:
pos=ord(char)-ord('a')
c[pos]=1
for char in s1:
pos=ord(char)-ord('a')
if(c[pos]==1):
c[pos]=-1
else:
flag=False
break
for i in c:
if(c[i]!=-1):
flag=False
break
else:
flag=False
return flag
s1="earth"
s2="heatt"
start_time=time.time()
op=anagram(s1,s2)
end_time=time.time()
print("anagram_output:",op)
op=anagram_sort(s1,s2)
print("anagram_sort_output:",op)
op=anagram_strike_off(s1,s2)
print("anagram_sort_output:",op) |
"""
SOLR module
Generates the required Solr cores
"""
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, StringType
import sys
from pyspark.sql.functions import (
when,
col,
lit,
first,
concat_ws,
collect_set,
flatten,
collect_list,
udf,
)
COLUMN_MAPPER = {
"schedule_key": "schedule.scheduleId",
"stage": "schedule.stage",
"stage_label": "schedule.timeLabel",
"pipeline_id": "pipelineId",
"pipeline_stable_key": "pipelineId",
"pipeline_stable_id": "pipelineKey",
"pipeline_name": "name",
"procedure_id": "procedure.procedureId",
"procedure_stable_key": "procedure.procedureId",
"procedure_stable_id": "procedure.procedureKey",
"procedure_name": "procedure.name",
"experiment_level": "procedure.level",
"parameter_id": "parameter.parameterId",
"parameter_stable_key": "parameter.parameterId",
"parameter_stable_id": "parameter.parameterKey",
"parameter_name": "parameter.name",
"description": "parameter.description",
"data_type": "parameter.valueType",
"required": "parameter.isRequired",
"annotate": "parameter.isAnnotation",
"media": "parameter.isMedia",
"has_options": "parameter.isOption",
"increment": "parameter.isIncrement",
"comparable_parameter_group": "parameter.comparableParameterGroup",
}
COMPUTED_COLUNMS = [""]
def main(argv):
"""
Pipeline Solr Core loader
:param list argv: the list elements should be:
[1]: Pipeline parquet path
[2]: Observations parquet
[3]: Ontology parquet
[4]: EMAP-EMAPA tsv
[5]: EMAPA metadata csv
[6]: MA metadata csv
[7]: Output Path
"""
pipeline_parquet_path = argv[1]
observations_parquet_path = argv[2]
ontology_parquet_path = argv[3]
emap_emapa_tsv_path = argv[4]
emapa_metadata_csv_path = argv[5]
ma_metadata_csv_path = argv[6]
output_path = argv[7]
spark = SparkSession.builder.getOrCreate()
pipeline_df = spark.read.parquet(pipeline_parquet_path)
observations_df = spark.read.parquet(observations_parquet_path)
ontology_df = spark.read.parquet(ontology_parquet_path)
emap_emapa_df = spark.read.csv(emap_emapa_tsv_path, header=True, sep="\t")
for col_name in emap_emapa_df.columns:
emap_emapa_df = emap_emapa_df.withColumnRenamed(
col_name, col_name.lower().replace(" ", "_")
)
emapa_metadata_df = spark.read.csv(emapa_metadata_csv_path, header=True)
ma_metadata_df = spark.read.csv(ma_metadata_csv_path, header=True)
pipeline_df = pipeline_df.withColumnRenamed("increment", "incrementStruct")
for column, source in COLUMN_MAPPER.items():
pipeline_df = pipeline_df.withColumn(column, col(source))
pipeline_df = pipeline_df.withColumn(
"unit_y",
when(col("incrementStruct").isNotNull(), col("unitName")).otherwise(lit(None)),
)
pipeline_df = pipeline_df.withColumn(
"unit_x",
when(
col("incrementStruct").isNotNull(), col("incrementStruct.incrementUnit")
).otherwise(col("unitName")),
)
pipeline_df = pipeline_df.withColumn(
"metadata", col("parameter.type") == "procedureMetadata"
)
pipeline_df = pipeline_df.withColumn(
"fully_qualified_name",
concat_ws(
"_", "pipeline_stable_id", "procedure_stable_id", "parameter_stable_id"
),
)
observations_df = observations_df.withColumn(
"fully_qualified_name",
concat_ws(
"_", "pipeline_stable_id", "procedure_stable_id", "parameter_stable_id"
),
)
observations_df = observations_df.groupBy("fully_qualified_name").agg(
first(col("observation_type")).alias("observation_type")
)
pipeline_df = pipeline_df.join(
observations_df, "fully_qualified_name", "left_outer"
)
pipeline_categories_df = pipeline_df.select(
"fully_qualified_name",
when(
col("option.name").rlike("^\d+$") & col("option.description").isNotNull(),
col("option.description"),
)
.otherwise(col("option.name"))
.alias("name"),
)
pipeline_categories_df = pipeline_categories_df.groupBy("fully_qualified_name").agg(
collect_set("name").alias("categories")
)
pipeline_df = pipeline_df.join(
pipeline_categories_df, "fully_qualified_name", "left_outer"
)
pipeline_mp_terms_df = pipeline_df.select(
"fully_qualified_name", "parammpterm.selectionOutcome", "termAcc"
).where(col("termAcc").startswith("MP"))
pipeline_mp_terms_df = pipeline_mp_terms_df.join(
ontology_df, col("id") == col("termAcc")
)
uniquify = udf(_uniquify, ArrayType(StringType()))
pipeline_mp_terms_df = pipeline_mp_terms_df.groupBy("fully_qualified_name").agg(
collect_set("id").alias("mp_id"),
collect_set("term").alias("mp_term"),
uniquify(flatten(collect_list("top_level_ids"))).alias("top_level_mp_id"),
uniquify(flatten(collect_list("top_level_terms"))).alias("top_level_mp_term"),
uniquify(flatten(collect_list("top_level_synonyms"))).alias(
"top_level_mp_term_synonym"
),
uniquify(flatten(collect_list("intermediate_ids"))).alias("intermediate_mp_id"),
uniquify(flatten(collect_list("intermediate_terms"))).alias(
"intermediate_mp_term"
),
collect_set(
when(col("selectionOutcome") == "ABNORMAL", col("termAcc")).otherwise(
lit(None)
)
).alias("abnormal_mp_id"),
collect_set(
when(col("selectionOutcome") == "ABNORMAL", col("term")).otherwise(
lit(None)
)
).alias("abnormal_mp_term"),
collect_set(
when(col("selectionOutcome") == "INCREASED", col("termAcc")).otherwise(
lit(None)
)
).alias("increased_mp_id"),
collect_set(
when(col("selectionOutcome") == "INCREASED", col("term")).otherwise(
lit(None)
)
).alias("increased_mp_term"),
collect_set(
when(col("selectionOutcome") == "DECREASED", col("termAcc")).otherwise(
lit(None)
)
).alias("decreased_mp_id"),
collect_set(
when(col("selectionOutcome") == "DECREASED", col("term")).otherwise(
lit(None)
)
).alias("decreased_mp_term"),
)
pipeline_df = pipeline_df.join(
pipeline_mp_terms_df, "fully_qualified_name", "left_outer"
)
pipeline_df = pipeline_df.join(
emap_emapa_df.alias("emap_emapa"),
col("emap_id") == col("termAcc"),
"left_outer",
)
pipeline_df = pipeline_df.withColumn("embryo_anatomy_id", col("emapa_id"))
pipeline_df = pipeline_df.drop(*emap_emapa_df.columns)
emapa_metadata_df = emapa_metadata_df.select("acc", col("name").alias("emapaName"))
pipeline_df = pipeline_df.join(
emapa_metadata_df, col("embryo_anatomy_id") == col("acc"), "left_outer"
)
pipeline_df = pipeline_df.withColumn("embryo_anatomy_term", col("emapaName"))
pipeline_df = pipeline_df.drop(*emapa_metadata_df.columns)
pipeline_df = pipeline_df.join(
ontology_df, col("embryo_anatomy_id") == col("id"), "left_outer"
)
pipeline_df = pipeline_df.withColumn(
"top_level_embryo_anatomy_id", col("top_level_ids")
)
pipeline_df = pipeline_df.withColumn(
"top_level_embryo_anatomy_term", col("top_level_terms")
)
pipeline_df = pipeline_df.drop(*ontology_df.columns)
pipeline_df = pipeline_df.withColumn(
"mouse_anatomy_id",
when(col("termAcc").startswith("MA:"), col("termAcc")).otherwise(lit(None)),
)
ma_metadata_df = ma_metadata_df.withColumnRenamed("name", "maName")
pipeline_df = pipeline_df.join(
ma_metadata_df, col("mouse_anatomy_id") == col("curie"), "left_outer"
)
pipeline_df = pipeline_df.withColumn("mouse_anatomy_term", col("maName"))
pipeline_df = pipeline_df.drop(*ma_metadata_df.columns)
pipeline_df = pipeline_df.join(
ontology_df, col("mouse_anatomy_id") == col("id"), "left_outer"
)
pipeline_df = pipeline_df.withColumn(
"top_level_mouse_anatomy_id", col("top_level_ids")
)
pipeline_df = pipeline_df.withColumn(
"top_level_mouse_anatomy_term", col("top_level_terms")
)
missing_parameter_information_df = pipeline_df.where(
col("parameter_stable_id").isNull()
)
missing_parameter_rows = missing_parameter_information_df.collect()
if len(missing_parameter_rows) > 0:
print("MISSING PARAMETERS")
for missing in missing_parameter_rows:
print(missing.asDict())
pipeline_df = pipeline_df.where(col("parameter_stable_id").isNotNull())
pipeline_df = pipeline_df.drop(*ontology_df.columns)
pipeline_df.write.parquet(output_path)
def _uniquify(list_col):
return list(set(list_col))
if __name__ == "__main__":
print(sys.version)
sys.exit(main(sys.argv))
|
# Copyright (c) 2016 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from dateutil import tz
import functools
from oslo_utils import uuidutils
from tempest.lib.common.utils import test_utils
from watcher_tempest_plugin.tests.client_functional.v1 import base
class AuditTests(base.TestCase):
"""Functional tests for audit."""
dummy_name = 'dummy'
list_fields = ['UUID', 'Name', 'Audit Type', 'State', 'Goal', 'Strategy']
detailed_list_fields = list_fields + ['Created At', 'Updated At',
'Deleted At', 'Parameters',
'Interval', 'Audit Scope',
'Next Run Time', 'Hostname']
audit_template_name = 'a' + uuidutils.generate_uuid()
audit_uuid = None
@classmethod
def setUpClass(cls):
raw_output = cls.watcher('audittemplate create %s dummy -s dummy'
% cls.audit_template_name)
template_output = cls.parse_show_as_object(raw_output)
audit_raw_output = cls.watcher(
'audit create -a %s' % template_output['Name'])
audit_output = cls.parse_show_as_object(audit_raw_output)
cls.audit_uuid = audit_output['UUID']
audit_created = test_utils.call_until_true(
func=functools.partial(cls.has_audit_created, cls.audit_uuid),
duration=600,
sleep_for=2)
if not audit_created:
raise Exception('Audit has not been succeeded')
@classmethod
def tearDownClass(cls):
output = cls.parse_show(
cls.watcher('actionplan list --audit %s' % cls.audit_uuid))
action_plan_uuid = list(output[0])[0]
cls.watcher('actionplan delete %s' % action_plan_uuid)
cls.watcher('audit delete %s' % cls.audit_uuid)
cls.watcher('audittemplate delete %s' % cls.audit_template_name)
def test_audit_list(self):
raw_output = self.watcher('audit list')
self.assert_table_structure([raw_output], self.list_fields)
def test_audit_detailed_list(self):
raw_output = self.watcher('audit list --detail')
self.assert_table_structure([raw_output], self.detailed_list_fields)
def test_audit_show(self):
audit = self.watcher('audit show ' + self.audit_uuid)
self.assertIn(self.audit_uuid, audit)
self.assert_table_structure([audit], self.detailed_list_fields)
def test_audit_update(self):
audit_raw_output = self.watcher('audit update %s add interval=2'
% self.audit_uuid)
audit_output = self.parse_show_as_object(audit_raw_output)
assert int(audit_output['Interval']) == 2
class AuditTestsV11(AuditTests):
"""This class tests v1.1 of Watcher API"""
api_version = 1.1
detailed_list_fields = AuditTests.list_fields + [
'Created At', 'Updated At', 'Deleted At', 'Parameters', 'Interval',
'Audit Scope', 'Next Run Time', 'Hostname', 'Start Time', 'End Time']
def test_audit_detailed_list(self):
raw_output = self.watcher('audit list --detail')
self.assert_table_structure([raw_output], self.detailed_list_fields)
def test_audit_show(self):
audit = self.watcher('audit show ' + self.audit_uuid)
self.assertIn(self.audit_uuid, audit)
self.assert_table_structure([audit], self.detailed_list_fields)
def test_audit_update(self):
local_time = datetime.now(tz.tzlocal())
local_time_str = local_time.strftime("%Y-%m-%dT%H:%M:%S")
utc_time = (local_time - local_time.utcoffset())
utc_time_str = utc_time.strftime("%Y-%m-%dT%H:%M:%S")
audit_raw_output = self.watcher(
'audit update {0} replace end_time="{1}"'.format(self.audit_uuid,
local_time_str))
audit_output = self.parse_show_as_object(audit_raw_output)
assert audit_output['End Time'] == utc_time_str
class AuditTestsV12(AuditTestsV11):
"""This class tests v1.2 of Watcher API"""
api_version = 1.2
@classmethod
def setUpClass(cls):
raw_output = cls.watcher('audittemplate create %s dummy -s dummy'
% cls.audit_template_name)
template_output = cls.parse_show_as_object(raw_output)
audit_raw_output = cls.watcher(
'audit create --force -a %s' % template_output['Name'])
audit_output = cls.parse_show_as_object(audit_raw_output)
cls.audit_uuid = audit_output['UUID']
audit_created = test_utils.call_until_true(
func=functools.partial(cls.has_audit_created, cls.audit_uuid),
duration=600,
sleep_for=2)
if not audit_created:
raise Exception('Audit has not been succeeded')
class AuditActiveTests(base.TestCase):
list_fields = ['UUID', 'Name', 'Audit Type', 'State', 'Goal', 'Strategy']
detailed_list_fields = list_fields + ['Created At', 'Updated At',
'Deleted At', 'Parameters',
'Interval', 'Audit Scope']
audit_template_name = 'a' + uuidutils.generate_uuid()
@classmethod
def setUpClass(cls):
cls.watcher('audittemplate create %s dummy -s dummy'
% cls.audit_template_name)
@classmethod
def tearDownClass(cls):
cls.watcher('audittemplate delete %s' % cls.audit_template_name)
def _create_audit(self):
return self.parse_show_as_object(
self.watcher('audit create -a %s'
% self.audit_template_name))['UUID']
def _delete_audit(self, audit_uuid):
self.assertTrue(test_utils.call_until_true(
func=functools.partial(
self.has_audit_created, audit_uuid),
duration=600,
sleep_for=2
))
output = self.parse_show(
self.watcher('actionplan list --audit %s' % audit_uuid))
action_plan_uuid = list(output[0])[0]
self.watcher('actionplan delete %s' % action_plan_uuid)
self.watcher('audit delete %s' % audit_uuid)
def test_create_oneshot_audit(self):
audit = self.watcher('audit create -a %s' % self.audit_template_name)
audit_uuid = self.parse_show_as_object(audit)['UUID']
self.assert_table_structure([audit], self.detailed_list_fields)
self._delete_audit(audit_uuid)
def test_delete_oneshot_audit(self):
audit_uuid = self._create_audit()
self.assertTrue(test_utils.call_until_true(
func=functools.partial(
self.has_audit_created, audit_uuid),
duration=600,
sleep_for=2
))
raw_output = self.watcher('audit delete %s' % audit_uuid)
self.assertOutput('', raw_output)
output = self.parse_show(
self.watcher('actionplan list --audit %s' % audit_uuid))
action_plan_uuid = list(output[0])[0]
self.watcher('actionplan delete %s' % action_plan_uuid)
def test_continuous_audit(self):
audit = self.watcher('audit create -a %s -t CONTINUOUS -i 600'
% self.audit_template_name)
audit_uuid = self.parse_show_as_object(audit)['UUID']
self.assert_table_structure([audit], self.detailed_list_fields)
self.assertTrue(test_utils.call_until_true(
func=functools.partial(
self.has_audit_created, audit_uuid),
duration=600,
sleep_for=2
))
audit_state = self.parse_show_as_object(
self.watcher('audit show %s' % audit_uuid))['State']
if audit_state == 'ONGOING':
self.watcher('audit update %s replace state=CANCELLED'
% audit_uuid)
raw_output = self.watcher('audit delete %s' % audit_uuid)
self.assertOutput('', raw_output)
outputs = self.parse_listing(
self.watcher('actionplan list --audit %s' % audit_uuid))
for actionplan in outputs:
self.watcher('actionplan delete %s' % actionplan['UUID'])
|
import random
from enum import Enum
import string
import logging
logger = logging.getLogger('hearthstone')
class Card:
CARD_DB = {
'The Coin':
{'mana_cost': 0, 'spell_play_effect': 'this_turn_mana+1', 'is_spell': True, 'is_minion': False},
'Mage_Hero_Fireblast':
{'attack': 1, 'mana_cost': 2, 'heropower': True, 'is_minion': False},
'Sheep':
{'attack': 1, 'health': 1, 'collectible': False},
'Mirror Image 0/2 Taunt':
{'attack': 0, 'health': 2, 'taunt': True, 'collectible': False},
'Mirror Image':
{'mana_cost': 1, 'is_spell': True, 'is_minion': False,
'spell_play_effect': 'summon two 0/2 taunt minions'},
'Mana Wyrm':
{'attack': 1, 'health': 3, 'mana_cost': 1, 'last_played_card_effect': 'cast_spell_attack+1',
}, # effect is checked after every move
'Bloodfen Raptor':
{'attack': 3, 'health': 2, 'mana_cost': 2},
'Bluegill Warriors':
{'attack': 2, 'health': 1, 'mana_cost': 2, 'charge': True},
'River Crocolisk':
{'attack': 2, 'health': 3, 'mana_cost': 2},
'Magma Rager':
{'attack': 5, 'health': 1, 'mana_cost': 3},
'Wolfrider':
{'attack': 3, 'health': 1, 'mana_cost': 3, 'charge': True},
'Chillwind Yeti':
{'attack': 4, 'health': 5, 'mana_cost': 4},
'Fireball':
{'mana_cost': 4, 'is_spell': True, 'is_minion': False, 'spell_play_effect': 'damage_to_a_target_6',
'spell_require_target': True, 'spell_target_can_be_hero': True},
'Oasis Snapjaw':
{'attack': 2, 'health': 7, 'mana_cost': 4},
'Polymorph':
{'mana_cost': 4, 'is_spell': True, 'is_minion': False, 'spell_play_effect': 'transform_to_a_1/1sheep',
'spell_require_target': True, 'spell_target_can_be_hero': False},
'Stormwind Knight':
{'attack': 2, 'health': 5, 'mana_cost': 4, 'charge': True},
'Silvermoon Guardian':
{'attack': 3, 'health': 3, 'divine': True, 'mana_cost': 4}
}
# cidx is a string index for each kind of card
# idx 0 is reserved for endturn action
name2cidx_dict = {name:(idx+1) for idx, name in enumerate(CARD_DB)}
cidx2name_dict = {(idx+1):name for idx, name in enumerate(CARD_DB)}
# number of all different cards (including Heropower and EndTurn)
# CARD_DB has no EndTurn card. That's why we need to plus 1 in the return.
all_diff_cards_size = len(CARD_DB) + 1
def __init__(self, name=None, attack=None, mana_cost=None, health=None, heropower=False, divine=False, taunt=False,
used_this_turn=True, deterministic=True, is_spell=False, is_minion=True, charge=False, summon=None,
zone='DECK', spell_play_effect=None, last_played_card_effect=None, spell_require_target=False,
spell_target_can_be_hero=False, collectible=True):
# cid is a random string generated to be unique for each card instance
self.cid = ''.join(random.sample(string.printable[:-6], k=30))
self.cidx = self.name2cidx_dict[name]
self.name = name
self.mana_cost = mana_cost
self.heropower = heropower
self.used_this_turn = used_this_turn
self.deterministic = deterministic # whether the card effect is deterministic
self.collectible = collectible # whether the card can be constructed in the deck
# or must be summoned by other cards
# minion
self.is_minion = is_minion
self.attack = attack
self.health = health
self.charge = charge
self.summon = summon
self.divine = divine
self.taunt = taunt
# self.zone = zone
# spell
self.is_spell = is_spell
self.spell_play_effect = spell_play_effect
self.spell_require_target = spell_require_target
self.spell_target_can_be_hero = spell_target_can_be_hero
# miscellaneous effects
self.last_played_card_effect = last_played_card_effect
def __eq__(self, other):
if isinstance(other, Card):
return self.cid == other.cid
else:
return False
def __lt__(self, other):
return self.name < other.name
@staticmethod
def init_card(name):
card_args = Card.CARD_DB[name]
card_args['name'] = name
return Card(**card_args)
@staticmethod
def find_card(card_list, card):
""" return the card with the same cid in the card list"""
for c in card_list:
if card == c:
return c
@staticmethod
def find_card_idx(card_list, card):
""" return the index of the card with the same cid in the card list"""
for i, c in enumerate(card_list):
if card == c:
return i
def __repr__(self):
if self.is_spell:
return "SPELL:{0}({1})".format(self.name, self.mana_cost)
elif self.is_minion:
if self.divine and self.taunt:
return "MINION:{0}({1}, {2}, {3}, divine/taunt)".format(self.name, self.mana_cost, self.attack,
self.health)
elif self.divine and not self.taunt:
return "MINION:{0}({1}, {2}, {3}, divine)".format(self.name, self.mana_cost, self.attack, self.health)
elif not self.divine and self.taunt:
return "MINION:{0}({1}, {2}, {3}, taunt)".format(self.name, self.mana_cost, self.attack, self.health)
else:
return "MINION:{0}({1}, {2}, {3})".format(self.name, self.mana_cost, self.attack, self.health)
class HeroClass(Enum):
MAGE = 1
WARRIOR = 2
class DeckInsufficientException(Exception):
""" Throw when deck is insufficient to draw cards. """
def __init__(self, k, deck_remain_size):
logger.info("deck is insufficient to be drawn. k={0}, deck size={1}".format(k, deck_remain_size))
class Deck:
def __init__(self, fix_deck):
self.indeck = []
if fix_deck:
for card_name in fix_deck:
card = Card.init_card(card_name)
self.indeck.append(card)
logger.info("create fix deck (%d): %r" % (self.deck_remain_size, self.indeck))
else:
# random deck
pass
def draw(self, k=1):
""" Draw a number of cards """
if k > self.deck_remain_size:
self.indeck = []
raise DeckInsufficientException(k, self.deck_remain_size)
idc = random.sample(range(self.deck_remain_size), k=k) # sample: draw without replacement
drawn_cards, new_indeck = [], []
for i, v in enumerate(self.indeck):
if i in idc:
drawn_cards.append(v)
else:
new_indeck.append(v)
self.indeck = new_indeck
return drawn_cards
@property
def deck_remain_size(self):
return len(self.indeck)
|
from pan_dag import *
from scipy.special import comb
def corr_worker(X_dag, X_positive, corr_method, i, j):
"""
Calculate the correlation between the i-th and j-th genes in
X_dag using only the cells that have positive expression in
both genes.
"""
both_positive = np.logical_and(X_positive[:, i],
X_positive[:, j])
if corr_method == 'pearson':
corr, corr_score = pearsonr(
X_dag[both_positive, i], X_dag[both_positive, j]
)
elif corr_method == 'spearman':
corr, corr_score = spearmanr(
X_dag[both_positive, i], X_dag[both_positive, j],
nan_policy='omit'
)
else:
raise ValueError('Invalid correlation method {}'
.format(self.corr_method))
if not np.isfinite(corr) or not np.isfinite(corr_score) or \
abs(corr) == 1. :
corr = 0.
corr_score = 1.
return i, j, corr, corr_score
class CorrelationDAG(PanDAG):
def __init__(
self,
dag_method='agg_ward',
corr_method='pearson',
min_leaves=100,
sketch_size='auto',
sketch_method='auto',
reduce_dim=None,
permute=False,
n_jobs=1,
verbose=False,
):
"""
Initializes correlation DAG object.
"""
super(CorrelationDAG, self).__init__(
dag_method, sketch_size, sketch_method,
reduce_dim, verbose
)
self.corr_method = corr_method
self.permute = permute
self.min_leaves = min_leaves
self.n_jobs = n_jobs
self.null_scores = None
self.real_scores = None
self.verbose = verbose
self.features = None
# Items that need to be populated in self.fill_correlations().
self.correlations = None
self.corr_scores = None
def fill_correlations(self, X, permute_step=False):
"""
Stack the correlation matrices across all nodes in DAG.
Parameters
----------
X: `numpy.ndarray` or `scipy.sparse.csr_matrix`
Matrix with rows corresponding to all of the samples that
define the DAG and columns corresponding to features that
define the correlation matrices.
"""
n_samples, n_features = X.shape
triu_idx = np.triu_indices(X.shape[1], 1)
tril_idx = np.tril_indices(X.shape[1], 0)
if self.permute:
dist_scores = []
dist_sample_size = comb(n_features, 2) * len(self.nodes)
dist_sample_p = 1e7 / dist_sample_size
if dist_sample_p < 1. and self.verbose:
print('Downsampling to {}% for distribution estimate'
.format(dist_sample_p * 100.))
sys.stdout.flush()
for node_idx, node in enumerate(self.nodes):
if self.verbose:
print('Filling node {} out of {}'
.format(node_idx, len(self.nodes)))
sys.stdout.flush()
if node.n_leaves < self.min_leaves:
node.correlations = None
node.corr_scores = None
continue
X_dag = X[node.sample_idx].toarray()
X_positive = X_dag != 0
X_binary = X_dag * 1
if permute_step:
for cell_idx in range(X_dag.shape[0]):
perm_idx = np.random.permutation(n_features)
X_dag[cell_idx] = X_dag[cell_idx][perm_idx]
# Calculate correlation of positive cells.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
corr, corr_scores = spearmanr(
X_dag, nan_policy='omit'
)
corr_scores[np.isnan(corr)] = 1.
corr_scores[np.isnan(corr_scores)] = 1.
corr_scores[tril_idx] = 1.
abs_corr = np.abs(corr)
corr_scores[abs_corr == 1.] = 1.
corr_scores[abs_corr < 0.25] = 1.
del abs_corr
cutoff = 0.01 / (len(tril_idx) * len(self.nodes))
results = Parallel(n_jobs=self.n_jobs,
backend='loky') (
delayed(corr_worker)(
X_dag, X_positive, self.corr_method, i, j
)
for i in range(n_features)
for j in range(n_features)
if corr_scores[i, j] < cutoff
)
for i, j, corr_ij, score_ij in results:
corr[i, j] = corr_ij
corr_scores[i, j] = score_ij
if self.permute:
# Add significance scores to the distribution.
dist_samples = corr_scores[triu_idx].flatten()
if dist_sample_p < 1.:
dist_sample_idx = np.random.choice(
len(dist_samples),
size=int(dist_sample_p * len(dist_samples)),
replace=False,
)
dist_samples = dist_samples[dist_sample_idx]
dist_scores.append(dist_samples)
if not permute_step:
node.correlations = corr
node.corr_scores = corr_scores
if self.permute:
if permute_step:
self.null_scores = np.concatenate(dist_scores)
else:
self.real_scores = np.concatenate(dist_scores)
def sig_bsearch(self, n_features):
FLOAT_MIN = np.nextafter(0, 1)
low_cutoff = FLOAT_MIN
high_cutoff = 1
cutoff = np.exp(np.log(FLOAT_MIN) / 2)
n_iter = 0
max_iter = 100000
while True:
n_real = float(np.sum(self.real_scores < cutoff))
n_fake = float(np.sum(self.null_scores < cutoff))
if n_real + n_fake == 0:
pct_fake = 0
else:
pct_fake = n_fake / (n_real + n_fake)
if pct_fake < 0.05:
low_cutoff = cutoff
elif pct_fake > 0.05:
high_cutoff = cutoff
else:
return cutoff
cutoff = (high_cutoff + low_cutoff) / 2.
if low_cutoff >= high_cutoff:
return cutoff
n_iter += 1
if n_iter >= max_iter:
break
if n_iter >= max_iter:
warnings.warn('Exceeded {} iterations in FDR binary search'
.format(max_iter),
RuntimeWarning)
return cutoff
def significant(self, n_features):
if self.permute:
if self.null_scores is None:
raise NotImplementedError(
'Need to perform permutation run before calling this'
' method.'
)
cutoff = self.sig_bsearch(n_features)
else:
cutoff = 0.01 / (comb(n_features, 2) * len(self.nodes))
if self.verbose:
n_real = float(np.sum(self.real_scores < cutoff))
n_fake = float(np.sum(self.null_scores < cutoff))
if n_real + n_fake == 0:
pct_fake = 0
else:
pct_fake = n_fake / (n_real + n_fake)
print('Using {} as significance score cutoff, {}% FDR'
.format(cutoff, pct_fake * 100))
sys.stdout.flush()
for node_idx, node in enumerate(self.nodes):
if node.corr_scores is not None:
not_sig = node.corr_scores >= cutoff
node.corr_scores[not_sig] = 1.
def stack_correlations(self):
"""
Stack the correlation matrices across all nodes in DAG.
Returns
-------
stacked: numpy.ndarray
A `(n_feature, n_feature, n_node)` array with the matrices
stacked along the third dimension.
"""
if self.correlations is None:
raise NotImplementedError('Need to call fit() before calling this'
' method.')
corr_list = []
for i, node in enumerate(self.nodes):
if node.n_leaves < self.min_leaves:
continue
corr_list.append(node.correlations)
return np.stack(corr_list, axis=2)
def stack_corr_scores(self):
"""
Stack the significance score matrices across all nodes in DAG.
Returns
-------
stacked: numpy.ndarray
A `(n_feature, n_feature, n_node)` array with the matrices
stacked along the third dimension.
"""
if self.correlations is None:
raise NotImplementedError('Need to call fit() before calling this'
' method.')
score_list = []
for i, node in enumerate(self.nodes):
if node.corr_scores is not None:
score_list.append(node.corr_scores)
return np.stack(score_list, axis=2)
def collapse_correlations(self):
"""
Take strongest correlations across all nodes in DAG.
Returns
-------
strongest: numpy.ndarray
An upper triangular matrix with the stongest correlation values.
"""
corr = self.stack_correlations()
strongest = corr.max(axis=2)
min_corr = corr.min(axis=2)
abs_corr = np.abs(corr).max(axis=2)
min_strongest = abs_corr == np.abs(min_corr)
strongest[min_strongest] = min_corr[min_strongest]
return strongest
def collapse_corr_scores(self):
"""
Take lowest significance score across all nodes in DAG.
Returns
-------
most_sig: numpy.ndarray
An upper triangular matrix with the log10 of the significance
scores and zeros on the diagonal.
"""
corr_scores = self.stack_corr_scores()
most_sig = corr_scores.min(axis=2)
FLOAT_MIN = np.nextafter(0, 1)
positive_idx = most_sig >= FLOAT_MIN
most_sig[most_sig < FLOAT_MIN] = np.log10(FLOAT_MIN)
most_sig[positive_idx] = np.log10(most_sig[positive_idx])
np.fill_diagonal(most_sig, 0)
return most_sig
def fit(self, X, y=None, features=None):
"""
Constructs DAG according to `self.dag_method` and populates
the DAG with correlation matrices.
Parameters
----------
X: `numpy.ndarray` or `scipy.sparse.csr_matrix`
Matrix with rows corresponding to all of the samples that
define the DAG and columns corresponding to features that
define the correlation matrices.
y
Ignored
features: `numpy.ndarray` of `str`
A list of strings with feature labels.
"""
print(X.shape)
super(CorrelationDAG, self).fit(X, y, features)
if self.permute:
if self.verbose:
print('Calculating null distribution...')
sys.stdout.flush()
self.fill_correlations(X, permute_step=True)
if self.verbose:
print('Computing correlations and scoring significance...')
sys.stdout.flush()
self.fill_correlations(X)
self.significant(X.shape[1])
return self
|
from unittest.mock import MagicMock
import api.query_handler as query_handler
from classes import ThreadsManager
from constants import get_default_city_name
from database import CrawlerCacheModel
from tests import BaseTestCase
from tests.data import expected_udm_crawler_data, expected_uqam_crawler_data, expected_concordia_crawler_data
class QueryHandlerTest(BaseTestCase):
def test_query_handler_api_with_empty_query(self):
res = self.get('/query')
self.assertStatus(res, 422)
self.assertEqual("Query is invalid", res.json['message'])
self.assertTrue(res.json['has_error'])
def test_query_handler_api_with_empty_city(self):
query_handler._process_query = MagicMock(return_value=[{"data": "test"}])
res = self.get('/query?query=test2')
self.assertStatus(res, 200)
query_handler._process_query.assert_called_with('test2', get_default_city_name())
def test_query_handler_api_caches_results(self):
mocked_thread_manager = ThreadsManager
mocked_thread_manager.init_thread_pool = MagicMock(return_value=[{"data": "test"}])
res = self.get('/query?query=test2&city=MTL')
self.assertStatus(res, 200)
mocked_thread_manager.init_thread_pool.assert_called()
# check the database
crawler_record = CrawlerCacheModel.get_cache_crawlers_record_for('test2', 'MTL')
self.assertEqual(crawler_record.content, [{"data": "test"}])
# do another request, but this time the cache will be used instead of thread_manager
mocked_thread_manager.init_thread_pool.reset_mock()
res = self.get('/query?query=test2&city=MTL')
self.assertStatus(res, 200)
self.assertEqual([{"data": "test"}], res.json['data'])
mocked_thread_manager.init_thread_pool.assert_not_called()
def get_mocked_concordia_crawler():
return expected_concordia_crawler_data
def get_mocked_udm_crawler():
return expected_udm_crawler_data
def get_mocked_uqam_crawler():
return expected_uqam_crawler_data
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetParamGUID(param):
if hasattr(param, "GuidValue"): return param.GuidValue.ToString()
else: return None
params = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetParamGUID(x) for x in params]
else: OUT = GetParamGUID(params) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.