index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,900 | d3d90b8ccd0ec449c84ac0316c429b33353f4518 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import unittest
from distutils.version import StrictVersion
import numpy as np
from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION
if _HAS_SKLEARN:
import sklearn
from coremltools.converters import sklearn as converter
try:
# scikit-learn >= 0.21
from sklearn.impute import SimpleImputer as Imputer
sklearn_class = sklearn.impute.SimpleImputer
except ImportError:
# scikit-learn < 0.21
from sklearn.preprocessing import Imputer
sklearn_class = sklearn.preprocessing.Imputer
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class ImputerTestCase(unittest.TestCase):
"""
Unit test class for testing scikit-learn converter.
"""
@classmethod
def setUpClass(self):
"""
Set up the unit test by loading the dataset and training a model.
"""
from sklearn.datasets import load_boston
scikit_data = load_boston()
# axis parameter deprecated in SimpleImputer >= 0.22. which now imputes
# only along columns as desired here.
if _SKLEARN_VERSION >= StrictVersion("0.22"):
scikit_model = Imputer(strategy="most_frequent")
else:
scikit_model = Imputer(strategy="most_frequent", axis=0)
scikit_data["data"][1, 8] = np.NaN
input_data = scikit_data["data"][:, 8].reshape(-1, 1)
scikit_model.fit(input_data, scikit_data["target"])
# Save the data and the model
self.scikit_data = scikit_data
self.scikit_model = scikit_model
def test_conversion(self):
spec = converter.convert(self.scikit_model, "data", "out").get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
# Test the interface
self.assertTrue(spec.pipeline.models[-1].HasField("imputer"))
def test_conversion_bad_inputs(self):
# Error on converting an untrained model
with self.assertRaises(Exception):
model = Imputer()
spec = converter.convert(model, "data", "out")
# Check the expected class during covnersion.
with self.assertRaises(Exception):
from sklearn.linear_model import LinearRegression
model = LinearRegression()
spec = converter.convert(model, "data", "out")
|
8,901 | 7be62ce45f815c4f4cf32df696cc444f92ac6d5c | #Bingo Game
#Anthony Swift
#06/05/2019
'''
A simple bingo game. Player is presented with a randomly generated grid of numbers.
Player is asked to enter the number called out by the caller, each time a number is called out.
A chip ('X') is placed on the grid when the number entered (that has been called) matches a number on the grid.
Player wins if they match 5 numbers in a row on the grid (diagonally, vertically or horizontally).
The bingo grid is generated in line with standard bingo rules:
In the first column (B) - Random numbers are generated between 1 and 15.
In the second column (I) - Random numbers are generated between 16 and 30.
In the third column (N) - Random numbers are generated between 31 and 45 (with a free chip in the middle).
In the fourth column (G) - Random numbers are generated between 46 and 60.
In the fifth column (o) - Random numbers are generated between 61 and 75.
'''
import random
#Welcome player to the game
def welcome():
print("\nWelcome to the Bingo Game.")
#Initialise the bingo grid
def initialise_grid():
grid = [['','','','','' ],
['','','','','' ],
['','','','','' ],
['','','','','' ],
['','','','','' ]]
return grid
#Randomly generates numbers in the first column (B) of the bingo grid
#Ensures the numbers are between 1 and 15
def generate_b_column(grid):
b_nums = []
for x in range(0,5):
num = random.randint(1,15)
while num in b_nums:
num = random.randint(1,15)
b_nums.append(num)
for x in range(0,5):
grid[x][0] = b_nums[x]
return grid
#Randomly generates numbers in the second column (I) of the bingo grid
#Ensures the numbers are between 16 and 30
def generate_i_column(grid):
i_nums = []
for x in range(0,5):
num = random.randint(16,30)
while num in i_nums:
num = random.randint(16,30)
i_nums.append(num)
for x in range(0,5):
grid[x][1] = i_nums[x]
return grid
#Randomly generates numbers in the third column (N) of the bingo grid
#Ensures the numbers are between 31 and 45
#Places a chip in the middle position of the grid as this is a free move.
def generate_n_column(grid):
n_nums = []
for x in range(0,5):
if x == 2:
n_nums.append("X")
else:
num = random.randint(31,45)
while num in n_nums:
num = random.randint(31,45)
n_nums.append(num)
for x in range(0,5):
grid[x][2] = n_nums[x]
return grid
#Randomly generates numbers in the fourth column (G) of the bingo grid
#Ensures the numbers are between 46 and 60
def generate_g_column(grid):
g_nums = []
for x in range(0,5):
num = random.randint(46,60)
while num in g_nums:
num = random.randint(46,60)
g_nums.append(num)
for x in range(0,5):
grid[x][3] = g_nums[x]
return grid
#Randomly generates numbers in the fifth column (O) of the bingo grid
#Ensures the numbers are between 61 and 75
def generate_o_column(grid):
o_nums = []
for x in range(0,5):
num = random.randint(61,75)
while num in o_nums:
num = random.randint(61,75)
o_nums.append(num)
for x in range(0,5):
grid[x][4] = o_nums[x]
return grid
#Asks player to enter number called by the caller
def enter_number_called():
print("\n")
num_called = int(input("Please enter the number called: "))
return num_called
#If the number entered by player matches a number on the grid
#A chip (X) is placed on the grid where the number matches
def place_chips(num_called,grid):
for x in range(0,5):
for y in range(0,5):
if grid[x][y] == num_called:
grid[x][y] = 'X'
return grid
#Checks to see if the player has 5 chips (X's) in a row horizontally on the grid
#Lets the player know if they have won.
def check_horizontal_win(grid, win):
for x in range(0,5):
if grid[x][0] == 'X' and grid[x][1] == 'X' and grid[x][2] == 'X' and grid[x][3] == 'X' and grid[x][4] == 'X':
print("You have won! BINGO!! ")
win = True
return win
#Checks to see if the player has 5 chips (X's) in a row vertically on the grid
#Lets the player know if they have won.
def check_vertical_win(grid, win):
for y in range(0,5):
if grid[0][y] == 'X' and grid[1][y] == 'X' and grid[2][y] == 'X' and grid[3][y] == 'X' and grid[4][y] == 'X':
print("You have won! BINGO!! ")
win = True
return win
#Checks to see if the player has 5 chips (X's) in a row diagonally left on the grid
#Lets the player know if they have won.
def check_diagonal_left_win(grid, win):
if grid[0][0] == 'X' and grid[1][1] == 'X' and grid[2][2] == 'X' and grid[3][3] == 'X' and grid[4][4] == 'X':
print("You have won! BINGO!! ")
win = True
return win
#Checks to see if the player has 5 chips (X's) in a row diagonally right on the grid
#Lets the player know if they have won.
def check_diagonal_right_win(grid, win):
if grid[0][4] == 'X' and grid[1][3] == 'X' and grid[2][2] == 'X' and grid[3][1] == 'X' and grid[4][0] == 'X':
print("You have won! BINGO!! ")
win = True
return win
#Prints the grid
def print_grid(grid):
print("\n")
print("Bingo Board:")
print("\n")
for x in range(0,5):
print(grid[x])
#The main function
def main():
win = False
welcome()
grid = initialise_grid()
grid = generate_b_column(grid)
grid = generate_i_column(grid)
grid = generate_n_column(grid)
grid = generate_g_column(grid)
grid = generate_o_column(grid)
print_grid(grid)
while win == False:
num_called = enter_number_called()
grid = place_chips(num_called,grid)
win = check_horizontal_win(grid, win)
win = check_vertical_win(grid, win)
win = check_diagonal_left_win(grid, win)
win = check_diagonal_right_win(grid, win)
print_grid(grid)
main()
|
8,902 | b0a354d82880c5169293d1229206470c1f69f24f | '''
ESTMD
Tool to isolate targets from video. You can generate appropriate videos
using module Target_animation.
__author__: Dragonfly Project 2016 - Imperial College London
({anc15, cps15, dk2015, gk513, lm1015,zl4215}@imperial.ac.uk)
CITE
'''
import os
from copy import deepcopy
import cv2
import numpy as np
from scipy import signal
from Helper.BrainModule import BrainModule
class Estmd(BrainModule):
"""
With this class we set parameters and extract targets from movie.
Main engine of the class is the process_frame method, to store a frame and
then detect movement in the frame compared to store frames in frame_history.
This is called in its step method which then returns all nonzero values.
"""
@staticmethod
def rtc_exp(t_s, x):
x[x > 0] = 1 / x[x > 0]
x[x > 0] = np.exp(-t_s * x[x > 0])
return x
def __init__(self,
run_id,
input_dimensions=(640, 480),
preprocess_resize=True,
resize_factor=0.1,
threshold=0.1,
time_step=0.001,
LMC_rec_depth=12,
H_filter=None,
b=None,
a=None,
CSKernel=None,
b1=None,
a1=None,
gain=50
):
BrainModule.__init__(self, run_id)
# Set H_filter.
if H_filter is None:
self.H_filter = np.array([[-1, -1, -1, -1, -1],
[-1, 0, 0, 0, -1],
[-1, 0, 2, 0, -1],
[-1, 0, 0, 0, -1],
[-1, -1, -1, -1, -1]])
else:
self.H_filter = H_filter
# Set b.
if b is None:
self.b = [0.0, 0.00006, -0.00076, 0.0044,
-0.016, 0.043, -0.057, 0.1789, -0.1524]
else:
self.b = b
# Set a.
if a is None:
self.a = [1.0, -4.333, 8.685, -10.71, 9.0, -5.306,
2.145, -0.5418, 0.0651]
else:
self.a = a
# Set CSKernel.
if CSKernel is None:
self.CSKernel = np.array([[-1.0 / 9.0, -1.0 / 9.0, -1.0 / 9.0],
[-1.0 / 9.0, 8.0 / 9.0, -1.0 / 9.0],
[-1.0 / 9.0, -1.0 / 9.0, -1.0 / 9.0]])
else:
self.CSKernel = CSKernel
# Set b1.
if b1 is None:
self.b1 = [1.0, 1.0]
else:
self.b1 = b1
# Set a1.
if a1 is None:
self.a1 = [51.0, -49.0]
else:
self.a1 = a1
self.pre_resize = preprocess_resize
self.resize_factor = resize_factor
self.input_dimensions = input_dimensions
self.output_dimensions = (int(self.input_dimensions[0] * self.resize_factor),
int(self.input_dimensions[1] * self.resize_factor))
self.frame_history = []
self.LMC_rec_depth = LMC_rec_depth
self.dt = self.t = self.T0 = time_step
self.threshold = threshold
self.gain = gain
self.result_values = []
def get_video(self, fps, directory=None, name="estmd_output.avi", run_id_prefix=True, cod="MJPG"):
"""
Returns a video of processed frames after processing through step
Args:
fps (): Frame rate
cod (): Codec of output
run_id_prefix (): Prefix with run_id?
name (): Output file name
directory (): Output file directory
"""
path = self.get_full_output_name(name, directory, run_id_prefix)
codec = cv2.cv.CV_FOURCC(cod[0], cod[1], cod[2], cod[3])
video = cv2.VideoWriter(path, codec, fps, self.output_dimensions, isColor=0)
print "ESTMD outputting at: ", self.output_dimensions
for values in self.result_values:
frame = np.zeros(self.output_dimensions[::-1])
for v in values:
ycord, xcord, pixel = v
frame[ycord, xcord] = pixel
frame = (frame * 255.0).astype('u1')
video.write(frame)
video.release()
cv2.destroyAllWindows()
print "Saved ESTMD output video to " + path
return
def process_frame(self, downsize):
"""
The engine of the class.
Applies concepts from paper:
'Discrete Implementation of Biologically Inspired Image Processing for
Target Detection' by K. H., S. W., B. C. and D. C. from
The University of Adelaide, Australia.
"""
# if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):
# downsize = np.array(downsize)
if type(downsize) != np.ndarray:
raise TypeError
if not downsize.any():
raise ValueError
if self.pre_resize:
downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)
self.frame_history.append(downsize)
# Remove no longer needed frames from memory
self.frame_history = self.frame_history[-(self.LMC_rec_depth):]
downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]
# Center surround antagonism kernel applied.
downsize = cv2.filter2D(downsize, -1, self.CSKernel)
# RTC filter.
u_pos = deepcopy(downsize)
u_neg = deepcopy(downsize)
u_pos[u_pos < 0] = 0
u_neg[u_neg > 0] = 0
u_neg = -u_neg
# On first step, instead of computing just save the images.
if self.t == self.T0:
self.v_pos_prev = deepcopy(u_pos)
self.v_neg_prev = deepcopy(u_neg)
self.u_pos_prev = deepcopy(u_pos)
self.u_neg_prev = deepcopy(u_neg)
# Do everything for pos == ON.
tau_pos = u_pos - self.u_pos_prev
tau_pos[tau_pos >= 0] = 0.001
tau_pos[tau_pos < 0] = 0.1
mult_pos = self.rtc_exp(self.dt, tau_pos)
v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev
self.v_pos_prev = deepcopy(v_pos)
# Do everything for neg == OFF.
tau_neg = u_neg - self.u_neg_prev
tau_neg[tau_neg >= 0] = 0.001
tau_neg[tau_neg < 0] = 0.1
mult_neg = self.rtc_exp(self.dt, tau_neg)
v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev
self.v_neg_prev = deepcopy(v_neg)
# keep track of previous u.
self.u_pos_prev = deepcopy(u_pos)
self.u_neg_prev = deepcopy(u_neg)
# Subtract v from u to give the output of each channel.
out_pos = u_pos - v_pos
out_neg = u_neg - v_neg
# Now apply yet another filter to both parts.
out_pos = cv2.filter2D(out_pos, -1, self.H_filter)
out_neg = cv2.filter2D(out_neg, -1, self.H_filter)
out_pos[out_pos < 0] = 0
out_neg[out_neg < 0] = 0
if self.t == self.T0:
self.out_neg_prev = deepcopy(out_neg)
# Delay off channel.
out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]
self.out_neg_prev = out_neg
downsize = out_neg * out_pos
# Show image.
downsize *= self.gain
downsize = np.tanh(downsize)
# Threshold.
downsize[downsize < self.threshold] = 0
if not self.pre_resize:
downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)
self.t += self.dt
return downsize
def step(self, frame):
"""
Process a given frame and return all nonzero values
"""
result = []
frame = self.process_frame(frame)
ycords, xcords = frame.nonzero()
for i in xrange(len(ycords)):
result.append((ycords[i], xcords[i], frame[ycords[i], xcords[i]]))
self.result_values.append(result)
return result
|
8,903 | 08a0ab888886184f7447465508b6494b502821ea | #!/usr/bin/env python
# coding: utf-8
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
print(tf.__version__)
print(tf.keras.__version__)
print(tf.__path__)
import numpy as np
from tqdm import tqdm, tqdm_notebook
from utils import emphasis
import tensorflow.keras.backend as K
from tensorflow.keras.utils import Sequence
import librosa
import librosa.display
print(tf.test.is_gpu_available())
# ## SRCNN
class SubPixel1D(tf.keras.layers.Layer):
def __init__(self, r=2):
super(SubPixel1D, self).__init__()
self.r = r
def call(self, I):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
"""
X = tf.transpose(I, [2,1,0]) # (r, w, b)
X = tf.batch_to_space_nd(X, [self.r], [[0,0]]) # (1, r*w, b)
X = tf.transpose(X, [2,1,0])
return X
noisy = tf.keras.layers.Input(shape=(None, 1))
x_input = noisy
x = x_input
# B = 8
# n_filters = [128, 256, 512, 512, 512, 512, 512, 512]
# kernel_sizes = [65, 33, 17, 9, 9, 9, 9, 9]
B = 4
n_filters = [128, 256, 512, 512]
kernel_sizes = [65, 33, 17, 9]
# B = 3
# n_filters = [128, 256, 512]
# kernel_sizes = [65, 33, 17]
# B = 3
# n_filters = [64, 128, 256]
# kernel_sizes = [65, 33, 17]
# Downsampling Layers
encoder_features = []
for k, n_filter, kernel_size in zip(range(B), n_filters, kernel_sizes):
x = tf.keras.layers.Conv1D(filters = n_filter,
kernel_size = kernel_size,
strides = 2,
padding = 'same',
kernel_initializer = 'Orthogonal')(x)
# x = tf.keras.layers.PReLU()(x)
x = tf.keras.layers.LeakyReLU(0.2)(x)
encoder_features.append(x)
# Bottleneck Layer
x = tf.keras.layers.Conv1D(filters = 512,
kernel_size = 9,
strides = 2,
padding = 'same',
kernel_initializer = 'Orthogonal')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x)
# x = tf.keras.layers.PReLU()(x)
x = tf.keras.layers.LeakyReLU(0.2)(x)
# Upsampling Layer
for k, n_filter, kernel_size, enc in reversed(list(zip(range(B),
n_filters,
kernel_sizes,
encoder_features))):
x = tf.keras.layers.Conv1D(filters = 2 * n_filter,
kernel_size = kernel_size,
strides = 1,
padding = 'same',
kernel_initializer = 'Orthogonal')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x)
# x = tf.keras.layers.PReLU()(x)
x = tf.keras.layers.ReLU()(x)
x = SubPixel1D()(x)
x = tf.keras.layers.Concatenate(axis=2)([x, enc])
# Final Conv Layer
x = tf.keras.layers.Conv1D(filters = 2,
kernel_size = 9,
strides = 1,
padding = 'same')(x)
x = SubPixel1D()(x)
x_final = tf.keras.layers.Add()([x, x_input])
G = tf.keras.models.Model(inputs = [noisy], outputs = [x_final])
# Train Model
# Initialize Model
optim = tf.keras.optimizers.Adam(lr=1e-4)
def G_loss(true, fake):
return K.mean(K.sqrt(K.mean((fake - true) ** 2 + 1e-6, axis=[1, 2])), axis=0)
def G_LSD_loss(y_clean, y_noisy):
y_clean = tf.squeeze(y_clean)
y_noisy = tf.squeeze(y_noisy)
D_clean = tf.signal.stft(signals = y_clean,
frame_length = 2048,
frame_step = 1024)
D_noisy = tf.signal.stft(signals = y_noisy,
frame_length = 2048,
frame_step = 1024)
D_clean_log = K.log(K.abs(D_clean) ** 2 + 1e-6)
D_noisy_log = K.log(K.abs(D_noisy) ** 2 + 1e-6)
return K.mean(K.sqrt(K.mean((D_clean_log - D_noisy_log) ** 2, axis = [2])), axis = [0, 1])
G.compile(loss = G_LSD_loss,
optimizer = optim)
G.summary()
# tf.keras.utils.plot_model(G, to_file='./generator.png', show_shapes=True)
# Training
class data_sequence(Sequence):
def __init__(self, data_path, batch_size = 64):
self.filenames = [os.path.join(data_path, filename) for filename in os.listdir(data_path)]
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.filenames) / float(self.batch_size)))
def on_epoch_end(self):
np.random.shuffle(self.filenames)
def __getitem__(self, idx):
noisy_batch = []
clean_batch = []
for i in range(idx * self.batch_size, min(len(self.filenames), (idx + 1) * self.batch_size)):
pair = np.load(self.filenames[i])
# pair = emphasis(pair[np.newaxis, :, :], emph_coeff=0.95).reshape(2, -1)
clean = pair[0].reshape(-1, 1).astype('float32')
noisy = pair[1].reshape(-1, 1).astype('float32')
noisy_batch.append(noisy)
clean_batch.append(clean)
return np.array(noisy_batch), np.array(clean_batch)
train_data_path = '../dataset/serialized_train_data'
val_data_path = '../dataset/serialized_val_data'
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath='./model/weights_LSD.hdf5',
verbose=1,
save_best_only=True,
save_weights_only=True),
tf.keras.callbacks.TensorBoard(log_dir='./logs/LSD', update_freq='batch'),
# tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-8),
]
G.fit_generator(generator = data_sequence(train_data_path, 64),
validation_data = data_sequence(val_data_path, 2),
steps_per_epoch = 3325 // 64,
verbose = 1,
epochs = 400,
callbacks = callbacks,
max_queue_size = 10,
use_multiprocessing = True,
workers = 6,
initial_epoch = 0)
|
8,904 | 63a7225abc511b239a69f625b12c1458c75b4090 | import threading
import serial
import time
bno = serial.Serial('/dev/ttyUSB0', 115200, timeout=.5)
compass_heading = -1.0
def readBNO():
global compass_heading
try:
bno.write(b'g')
response = bno.readline().decode()
if response != '':
compass_heading = float(response.split('\r')[0])
except:
pass
def readContinuous():
while True:
readBNO()
time.sleep(.1)
bno_thread = threading.Thread(target=readContinuous)
bno_thread.start()
def get_heading():
return compass_heading
if __name__ == '__main__':
while True:
print(get_heading())
time.sleep(.1) |
8,905 | 477d1629c14609db22ddd9fc57cb644508f4f490 | #!/usr/bin/env python
from django.contrib import admin
from models import UserProfile, AuditTrail
class UserProfileAdmin(admin.ModelAdmin):
list_display = [i.name for i in UserProfile._meta.fields]
admin.site.register(UserProfile, UserProfileAdmin)
class AuditTrailUserAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'user', 'level', 'message')
list_filter = ('level', 'date', 'user__username')
readonly_fields = [i.name for i in AuditTrail._meta.fields]
search_fields = (u'user__username', u'message',)
admin.site.register(AuditTrail, AuditTrailUserAdmin)
|
8,906 | fa5468741e9884f6c8bcacaf9d560b5c93ee781a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#from setup_env import *
#from mmlibrary import *
from astropy.coordinates import SkyCoord
import astropy.units as u
from mmlibrary import *
import numpy as np
import lal
from scipy.special import logsumexp
import cpnest, cpnest.model
# Oggetto per test: GW170817
#GW = SkyCoord('13h07m05.49s', '23d23m02.0s', unit=(u.hourangle, u.deg))
DL=33.4
dDL=3.34
GW = SkyCoord(ra = '13h07m05.49s', dec = '23d23m02.0s',
unit=('hourangle','deg'))
def Mstar(omega):
'''
Calcolo magnitudine di taglio Schechter function
'''
return -20.47 + 5.0*np.log10(omega.h)
def Schechter_unnormed(M, omega, alpha):
'''
Funzione di Schechter non normalizzata
'''
Ms = Mstar(omega)
tmp = 10**(-0.4*(M-Ms))
return tmp**(alpha+1.0)*np.exp(-tmp)
def normalise(omega, alpha, Mmin = -30,Mmax = -10):
'''
Normalizzazione funzione di Schechter (todo: fare analitica)
'''
M = np.linspace(Mmin, Mmax, 100)
return np.sum([Schechter_unnormed(Mi, omega, alpha = alpha)*np.diff(M)[0] for Mi in M])
def Schechter(M, omega, alpha = -1.07):
'''
Funzione di Schechter normalizzata
'''
return Schechter_unnormed(M, omega, alpha = alpha)/normalise(omega, alpha = alpha)
def Mthreshold(DL, mth = 27.0):
'''
Magnitudine assoluta di soglia
'''
return mth - 5.0*np.log10(1e5*DL)
def mabs(m, DL):
return m - 5.0*np.log10(1e5*DL)
def HubbleLaw(D_L, omega): # Da rivedere: test solo 1 ordine
return D_L*omega.h/(3e3) # Sicuro del numero?
def gaussian(x,x0,sigma):
return np.exp(-(x-x0)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names=['z', 'h', 'om', 'ol']
self.bounds=[[0.001,0.012],
[0.5,1.],
[0.04,1.],
[0.,1.]]
self.omega = lal.CreateCosmologicalParameters(0.7,0.5,0.5,-1.,0.,0.)
self.catalog = catalog
def log_prior(self, x):
# controllo finitezza e theta(M-Mth)
if not(np.isfinite(super(completeness, self).log_prior(x))):
return -np.inf
else:
self.omega.h = x['h']
self.omega.om = x['om']
self.omega.ol = x['ol']
zgw = x['z']
logP = 0.0
for zi,mi in zip(self.catalog['z'],self.catalog['Bmag']):
DL = lal.LuminosityDistance(self.omega, zi)
Mabsi = mabs(mi,DL)
if Mthreshold(DL) < Mabsi:
return -np.inf
else:
# Update parametri cosmologici con simulazione
# Calcolo prior. Ciascuna coordinata è pesata con le probabilità
# delle coordinate ('banane') GW, così come z.
# Temporaneamente, è assunta gaussiana intorno a un evento.
logP += np.log(Schechter(Mabsi, self.omega))
#log_P_RA = np.log(gaussian(x['ra'],Gal.ra.rad,Gal.ra.rad/100.))
#log_P_DEC = np.log(gaussian(x['dec'],Gal.dec.rad,Gal.dec.rad/100.))
logP += np.log(lal.ComovingVolumeElement(zi, self.omega))
return logP
# PROBLEMA! Come introduco le delta(ra,dec)?
def log_likelihood(self, x):
logL = 0.0
zgw = x['z']
logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,dDL))
logL += logsumexp([gaussian(zgw, zgi, zgi/10.0) for zgi in self.catalog['z']])
#logL += np.log(gaussian(x['ra'],GW.ra.rad,GW.ra.rad/10.))
#logL += np.log(gaussian(x['dec'],GW.dec.rad,GW.dec.rad/10.))
return logL
if __name__ == '__main__':
Gal_cat = GalInABox([190,200],[-25,-15], u.deg, u.deg, catalog='GLADE')[::100]
M = completeness(Gal_cat)
job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)
job.run()
# GLADE galaxy catalog
|
8,907 | 892c363c247177deb3297af84a93819a69e16801 | from EdgeState import EdgeState
from rest_framework import serializers
from dzTrafico.BusinessLayer.TrafficAnalysis.TrafficAnalyzer import TrafficAnalyzer, VirtualRampMetering
from dzTrafico.BusinessEntities.Location import LocationSerializer
from dzTrafico.BusinessLayer.SimulationCreation.NetworkManager import NetworkManager
class Sink(object):
id = 0
trafficAnalyzer = None
incidents = []
def __init__(self):
self.id = Sink.id
Sink.id += 1
self.nodes = []
#print "--------nodes----------"
#print len(nodes)
def add_node(self, node):
self.nodes.append(node)
def get_sensors(self):
sensors = []
for node in self.nodes:
for sensor in node.sensors:
sensors.append(sensor)
return sensors
def change_lane(self):
for node in self.nodes:
if node.LC_is_activated:
node.change_lane()
def incident_change_lane(self):
for node in self.nodes:
if node.isCongested:
node.incident_change_lane()
def update_vsl(self):
vsl = []
index = 1
for node in self.nodes:
if node.VSL_is_activated:
Sink.trafficAnalyzer.update_vsl(self, node)
vsl_node = dict()
vsl_node["id"] = index
vsl_node["vsl"] = node.get_current_vsl()
vsl.append(vsl_node)
index += 1
return vsl
def deactivate_vsl(self):
for node in self.nodes:
node.deactivate_VSL()
def deactivate_lc(self):
for node in self.nodes:
node.deactivate_LC()
def set_sumo_LC_Model(self, mode):
for node in self.nodes:
if node.LC_is_activated:
node.set_sumo_LC_Model(mode)
def read_traffic_state(self):
traffic_state = []
for node in self.nodes:
congested_lanes = node.check_congested_lanes()
congestion_detected = len(congested_lanes) > 0
if congestion_detected:
for incident in Sink.incidents:
print "incident ===> ", incident.edge.getID()
congestion_detected = node.edge.getID() == incident.edge.getID()
if congestion_detected:
congested_lanes = [incident.lane]
break
if congestion_detected and not TrafficAnalyzer.isCongestionDetected:
print "--------notify_congestion_detected----------"
print node.edge.getID()
print congested_lanes
node.isCongested = True
node.set_congested_lanes(congested_lanes)
if TrafficAnalyzer.isLCControlActivated:
node.close_incident_lanes()
Sink.trafficAnalyzer.notify_congestion_detected(self, node, congested_lanes)
elif TrafficAnalyzer.congestionExists and node.isCongested and TrafficAnalyzer.isLCControlActivated:
if node.check_if_discharged():
Sink.trafficAnalyzer.clear_congestion()
node.isCongested = False
edge_coords = dict()
start, end = NetworkManager.get_edge_coords(node.edge)
edge_coords["start"] = LocationSerializer(start).data
edge_coords["end"] = LocationSerializer(end).data
traffic_state.append(
EdgeState(
node.edge.getID(),
edge_coords,
node.get_current_speed(),
node.get_current_vsl(),
node.get_current_density(),
node.VSL_is_activated,
congestion_detected
)
)
return traffic_state
def get_node_by_edgeID(self, edge_id):
for node in self.nodes:
if node.edge.getID() == edge_id:
return node
return None
def get_LC_recommendations(self):
lc_recommendations = []
index = VirtualRampMetering.num_vsl_controlled_sections + 1
for node in self.nodes:
lanes = []
if node.LC_is_activated:
for i in range(0,len(node.recommendations)):
for r in node.recommendations:
if r.lane == i:
lanes.append(
NodeLanesRcmd(
r.lane,
r.recommendation
)
)
lc_recommendations.extend(
[
NodeLCRcmd(
index,
lanes
)
]
)
index += 1
nodeLCRcmdSerializer = NodeLCRcmdSerializer(lc_recommendations, many=True)
return nodeLCRcmdSerializer.data
class NodeLCRcmd(object):
def __init__(self, id, lanes):
self.id = id
self.lanes = lanes
class NodeLanesRcmd(object):
def __init__(self, lane, recommendation):
self.lane = lane
self.recommendation = recommendation
class NodeLanesRcmdSerializer(serializers.Serializer):
lane = serializers.IntegerField()
recommendation = serializers.IntegerField()
class NodeLCRcmdSerializer(serializers.Serializer):
id = serializers.IntegerField()
lanes = NodeLanesRcmdSerializer(many=True)
|
8,908 | a81ee0a855c8a731bafe4967b776e3f93ef78c2a | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.cbook as cbook
import Image
from matplotlib import _png
from matplotlib.offsetbox import OffsetImage
import scipy.io
import pylab
#for question 1 (my data)
def resample(ms,srate):
return int(round(ms/1000*srate))
def formatdata(data,Params):
"""
reads in TrialsMTX data structure, pulls out relevant data
"""
mndata = dict()
alltrials = np.array([])
for k in range(len(Params["conditions"])):
conditionmean = data[0,k].mean(axis = 0)
mndata.update({Params["conditions"][k]: {'data' : data[0,k].mean(axis = 0), 'cmax' : conditionmean.max(), 'cmin' : conditionmean.min()}})
return mndata
def traces(mndata,Params,srate,imagepath):
"""
plots traces of high gamma data for the trial duration. separated by condition, with brain & elec position
"""
#plot high gamma traces
#data should be bandpassed (todo)
#resample to srate
st = resample(Params["st"],srate)
en = resample(Params["en"],srate)
bl_en = resample(Params["bl_en"],srate)
bl_st = resample(Params["bl_st"],srate)
plot_tp = resample(Params["plot"],srate)
cue = resample(500,srate)
colors = ['red','orange','green','blue']
x = np.array(range(st,en+1))
f, (ax,ax2) = plt.subplots(1,2, sharex = False)
ax.axhline(y = 0,color = 'k',linewidth=2)
ax.axvline(x = 0,color='k',linewidth=2)
ax.axvline(x = cue,color = 'gray',linewidth = 2)
ax.axvline(x = cue+cue,color = 'gray',linewidth = 2)
ax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')
for j in range(len(Params["conditions"])):
condition = Params['conditions'][j]
y = mndata[condition]['data']
ax.plot(x,y, label = condition,linewidth = 2,color = colors[j])
ax.set_ylim((-30,85))
ax.set_xlim(st,en)
ax.legend()
ax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)
ax.xaxis.set_ticks(range(st,en,plot_tp))
ax.set_xlabel("time (ms)")
ax.set_ylabel("% change baseline")
ax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)
#plot brain with elec location
#brain = plt.imread(imagepath)
#aa = pylab.mean(brain,2)
#ax2.imshow(aa)
#a2.gray()
#brain = Image.open(imagepath)
#ax2.set_axis_off()
#im = plt.imshow(brain, origin = 'lower')
#brain = _png.read_png(imagepath)
#imagebox = OffsetImage(brain,zoom =5)
#ab = AnnotationBbox(imagebox,)
im = Image.open(imagepath)
ax2.imshow(im,aspect = 'auto',origin = 'lower')
ax2.set_xlim((0,750))
ax2.set_title('Electrode Location',fontsize = 18)
return f, (ax, ax2)
#for question 2 (stocks data)
def readdata(filename):
"""
reads in a txt file with 2 columns of numbers and 1 header (dates and values)
"""
dt = np.dtype([('date','int'),('val','<f8')])
data = np.loadtxt(filename,dtype = dt,skiprows = 1)
return data
def plotstocksdata(datadict,formats):
"""
takes in dict of data structures and Params indicating when to start/end
also takes in formats dictionary. keys must match datadict, values are the linewidth/color to plot
"""
#plot data
f = plt.figure()
ax1 = plt.subplot(111)
data = datadict["yahoo"]
yahoo = ax1.plot(data['date'],data['val'],formats["yahoo"], label = 'Yahoo Stock Value',linewidth = 1.5)
data = datadict["google"]
google = ax1.plot(data['date'],data['val'],formats["google"], label = 'Google Stock Value',linewidth = 1.5)
ax2 = ax1.twinx()
data = datadict["nytmp"]
nytmp = ax2.plot(data['date'],data['val'],formats["nytmp"],label = 'NY Mon. High Temp',linewidth=1.5)
ax1.set_xlabel('Date (MJD)')
ax1.set_ylabel('Value (Dollars')
ax1.set_ylim((-20,765))
ax1.yaxis.set_minor_locator(plt.MultipleLocator(20))
ax1.set_xlim((48800, 55600))
ax1.xaxis.set_minor_locator(plt.MultipleLocator(200))
#plt.show() #ISAAC EDIT
ax2.set_ylim((-150, 100))
ax2.set_ylim((-150, 100))
ax2.set_ylabel('Temperature ($^\circ$F)')
ax2.yaxis.set_minor_locator(plt.MultipleLocator(10))
plt.title('New York Temperature, Google, and Yahoo!', fontname = 'serif',fontsize = 18)
plts = yahoo+google+nytmp
labels = [l.get_label() for l in plts]
ax1.legend(plts, labels, loc=(0.025,0.5) ,frameon=False, prop={'size':11}, markerscale = 2)
plt.show()
def answer_hw():
#QUESTION 1
#load data
#dataDir = "/Users/matar/Documents/Courses/PythonClass/HW2/data/"
dataDir = "data/" #ISAAC EDIT
imagepath = dataDir + 'e37.png'
matdata = scipy.io.loadmat(dataDir+'TrialsMTX',struct_as_record = True)
data = matdata["TrialsMTX"]['data'][0,0]
#define parameters
Params={"f1":70, "f2": 150, "st" :-250, "en":3000, "plot":250, "bl_st" : -250, "bl_en":0, "caxis":200, "conditions":['20','40','60','80']}
subjdata = scipy.io.loadmat(dataDir+"subj_globals")
srate = subjdata["srate"][0,0]
#format data
mndata = formatdata(data, Params)
print '-'*40
print "question 1 : plotting traces"
print '-'*40
traces(mndata,Params,srate,imagepath)
#ideally would like to separate the traces func from the brain image, but can't figure out how to plot 2 funcs as subplots of same image
#QUESTION 2
formats = {'google' : 'b', 'nytmp' : 'r--', 'yahoo' :'purple'}
#dataDir = "/Users/matar/Documents/Courses/PythonClass/HW2/hw2_data/"
dataDir = "hw2_data/" #ISAAC EDIT
datadict = {'nytmp': readdata(dataDir+'ny_temps.txt'), 'google': readdata(dataDir+'google_data.txt'), 'yahoo': readdata(dataDir+'yahoo_data.txt')}
print '-'*40
print "question 2 : plotting stock data"
print '-'*40
plotstocksdata(datadict,formats) |
8,909 | 5750fd4b59f75ea63b4214ee66b23602ed4d314d | # Copyright 2021 Yegor Bitensky
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DiceEmptyInialItemsError(Exception):
def __init__(self):
super().__init__(
"To dice creation "
"whether \"faces_count\" or \"faces_items\" "
"argsuments need to be passed."
)
class DiceWrongFacesCountTypeError(Exception):
def __init__(self):
super().__init__("Dice \"faces_count\" argsument type need to be \"int\".")
class DiceWrongFacesCountError(Exception):
def __init__(self, min_count):
super().__init__(f"Dice \"faces_count\" argsument need to be greater or equal to {min_count}.")
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__("Dice \"faces_items\" argsument need to be iterable.")
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(f"Dice \"faces_items\" count need to be greater or equal to {min_count}.")
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__("Dice instance expected.")
|
8,910 | 338bf2406c233d857e1a688391161d58e1dab23c | from __future__ import annotations
from VersionControl.Branch import Branch
from Branches.Actions.Actions import Actions
from VersionControl.Git.Branches.Develop.Init import Init
class Develop(Branch):
def process(self):
if self.action is Actions.INIT:
self.start_message('Develop Init')
Init(self.state_handler, self.config_handler).process()
else:
raise NotImplementedError
|
8,911 | 067e0129b1a9084bbcee28d1973504299b89afdb | import json
import os
from django.conf import settings
from django.db import models
from jsonfield import JSONField
class Word(models.Model):
value = models.CharField(
max_length=50,
verbose_name='Слово'
)
spelling = models.CharField(
max_length=250,
verbose_name='Транскрипция'
)
raw_od_article = JSONField(
verbose_name='Сырые данные с OD'
)
is_active = models.BooleanField(
default=True,
verbose_name='Используется'
)
def __str__(self):
return self.value
class Meta:
ordering = ["value"]
verbose_name = "Слово"
verbose_name_plural = "Слова"
class Meaning(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
value = models.TextField(
verbose_name='Значение'
)
order = models.PositiveIntegerField(
verbose_name="Порядок",
default=0
)
examples = JSONField(
null=True,
blank=True
)
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ["order"]
verbose_name = "Доп. значение"
verbose_name_plural = "Доп. значения"
class Pronunciation(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
audio = models.FileField(
upload_to='media/audio',
verbose_name='Произношение'
)
raw_od_data = JSONField(
verbose_name='Сырые данные с OD',
blank=True,
null=True
)
is_active = models.BooleanField(
default=True,
verbose_name='Используется'
)
def __str__(self):
return "Произношение {}".format(self.word)
class Meta:
verbose_name = "Произношение"
verbose_name_plural = "Произношения"
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
user = models.ForeignKey(
"auth.User",
on_delete=models.CASCADE,
verbose_name='Пользователь'
)
is_user_know_meaning = models.BooleanField(
default=False,
verbose_name='Выучил значение'
)
is_user_know_pronunciation = models.BooleanField(
default=False,
verbose_name='Выучил произношение'
)
usage_count = models.PositiveIntegerField(
default=0,
verbose_name='Количество показов'
)
last_usage_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата последнего показа'
)
preferred_pronunciation = models.PositiveIntegerField(
default=0,
verbose_name='forvo id препочтительного произношения',
)
training_session = models.BooleanField(
default=False,
blank=False,
verbose_name='Сеанс обучения'
)
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(
settings.BASE_DIR, 'media', 'forvo', '{}.json'.format(word_str)
)
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds', word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds', word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in forvo_meta.get('items') or []:
if item.get('code', '') != 'en' or item.get(
'country', '') != 'United States':
continue
if ct > slen-1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({
'id': item['id'],
'by': item['username'],
'sex': item['sex'],
'src': sound_file,
'best': is_best
})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return "Статистика слова {}".format(self.word)
class Meta:
verbose_name = "Статистика"
verbose_name_plural = "Статистика"
|
8,912 | 0ae5d20b78bf7c23418de55ffd4d81cd5284c6d5 | class Tienda:
def __init__(self, nombre_tienda, lista_productos = []):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n"
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print("# # # # # # # PRODUCTO ANHADIDO # # # # # # #")
producto_nuevo.producto_info()
return self
def vender_producto(self, id):
print("\n# # # # # # # PRODUCTO VENDIDO # # # # # # #")
self.lista_productos.pop(id).producto_info()
return self
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f"=================Producto 0{a}:=================")
pro.producto_info()
print("AUMENTA su precio a: ")
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
def descuentazo(self, categoria, descuentazo_porcentaje):
a = 0
for product in self.lista_productos:
a += 1
if product.cat_producto == categoria:
print(f"=================Producto 0{a}:=================")
product.producto_info()
print("Se REMATA, y su nuevo precio de remate es: ")
product.actualizar_precio(descuentazo_porcentaje, False).producto_info()
print(f"Descuento de precios a toda la categoria {categoria}, realizado")
return self
#########################################################
##### coso = Tienda("VERDULERIA")
##### print(coso)
##### print("anhadir_P")
##### pera = ("PERA", 1000, "FRUTAS")
##### coco = ("COCO", 1511, "FRUTAS")
##### coso.anhadir_producto(pera)
##### coso.anhadir_producto(coco)
##### print(coso)
##### print("#############################")
##### coso.vender_producto(1) |
8,913 | 031727fa42b87260abb671518b2baeff1c9524f9 | #C:\utils\Python\Python27\python.exe incompletosClean.py incompletos\inc.dat incompletos\out.dat
import sys
import os
import os.path
bfTmp = ''
lsOutTmp = []
InFileName = []
lsHTMLName = []
fileNameIn= sys.argv[1]
fileNameOu= sys.argv[2]
fo = open(fileNameIn)
InFileName += [x.replace('\n', '') for x in fo.readlines()]
fo.close()
for bfMatFile in InFileName:
if os.path.isfile(bfMatFile):
lsHTMLName = []
fo = open(bfMatFile)
lsHTMLName += [x.replace('\n', '') for x in fo.readlines()]
fo.close()
bfRow = ''
for rowHTML in lsHTMLName:
iPosic = rowHTML.find('<td><p>')
if iPosic > 0:
bfRowPart = rowHTML[iPosic + len('<td><p>'):]
bfRow += ((bfRowPart[:bfRowPart.index('</p></td>')] + ',').replace(' ', ',')).strip()
if bfRow != '':
lsOutTmp.append(bfRow[:len(bfRow)-1] + ';')
bufferTmp = '\n'
bufferTmp = bufferTmp.join(lsOutTmp)
fo= open(fileNameOu, 'w')
fo.write(bufferTmp)
fo.close()
|
8,914 | 74eea67b8640a03e616bebdadba49891017b921d | from collections import Counter, defaultdict
import pandas as pd
from glob import glob
import subsamplex
files = glob('outputs.txt/*.unique.txt.gz')
files.sort()
biome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)
duplicates = set(line.strip() for line in open('cold/duplicates.txt'))
counts = defaultdict(Counter)
skipped = 0
for i,fname in enumerate(files):
sample = fname.split('/')[1].split('.')[0]
if sample in duplicates:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f = pd.read_table(fname, index_col=0, squeeze=True)
if f.sum() < 1_000_000:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000*1000)
f = f[f>0]
counts[biome[sample]].update(f.index)
if i % 100 == 99:
print("Done {}/{}".format(i+1, len(files)))
recounts = pd.DataFrame({k:pd.Series(v) for k, v in counts.items()})
recounts.fillna(0, inplace=True)
used_total = recounts.sum(1)
recounts['all'] = used_total
recounts = recounts.astype(int)
recounts.reset_index(inplace=True)
recounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')
names = [line.strip() for line in open('cold/derived/GMGC10.headers')]
recounts.set_index('index', inplace=True)
recounts.index = recounts.index.map(names.__getitem__)
recounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\t')
|
8,915 | ba7db49ca7956fdc055702ffccba769485fd0046 | import os
import location
import teamList
import pandas as pd
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
##adapted from code from this website:
## https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4
year = "18-19"
team = "ARI"
seasonReportRaw = pd.read_csv("Data/" + year + " " + team + "/" + team + "_SeasonRaw.csv")
seasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw['tEPDEP'] + seasonReportRaw['tEPDOP']
homeWins = seasonReportRaw[(seasonReportRaw["Home Team"] == team) & (seasonReportRaw["Home Score"] > seasonReportRaw["Away Score"])]
awayWins = seasonReportRaw[(seasonReportRaw["Away Team"] == team) & (seasonReportRaw["Away Score"] > seasonReportRaw["Home Score"])]
homeLosses = seasonReportRaw[(seasonReportRaw["Home Team"] == team) & (seasonReportRaw["Home Score"] < seasonReportRaw["Away Score"])]
awayLosses = seasonReportRaw[(seasonReportRaw["Away Team"] == team) & (seasonReportRaw["Away Score"] < seasonReportRaw["Home Score"])]
winCount = homeWins["Home Team"].count() + awayWins["Away Team"].count()
PenaltiesSeasonTotal = seasonReportRaw["tPEN(#)"].sum()
PenaltiesSeasonAverage = PenaltiesSeasonTotal / 16
PenaltiesWinTotal = homeWins["tPEN(#)"].sum() + awayWins["tPEN(#)"].sum()
PenaltiesWinAverage = PenaltiesWinTotal / winCount
PenaltiesLossTotal = homeLosses["tPEN(#)"].sum() + awayLosses["tPEN(#)"].sum()
PenaltiesLossAverage = PenaltiesLossTotal / (16-winCount)
EPCSeasonTotal = seasonReportRaw["tEPPfP"].sum()
EPCSeasonAverage = EPCSeasonTotal / 16
EPCWinTotal = homeWins["tEPPfP"].sum() + awayWins["tEPPfP"].sum()
EPCWinAverage = EPCWinTotal / winCount
EPCLossTotal = homeLosses["tEPPfP"].sum() + awayLosses["tEPPfP"].sum()
EPCLossAverage = EPCLossTotal / (16-winCount)
EPCDHPSeasonTotal = seasonReportRaw["tEPDHP"].sum()
EPCDHPSeasonAverage = EPCDHPSeasonTotal / 16
EPCDHPWinTotal = homeWins["tEPDHP"].sum() + awayWins["tEPDHP"].sum()
EPCDHPWinAverage = EPCDHPWinTotal / winCount
EPCDHPLossTotal = homeLosses["tEPDHP"].sum() + awayLosses["tEPDHP"].sum()
EPCDHPLossAverage = EPCDHPLossTotal / (16-winCount)
EPCDEPSeasonTotal = seasonReportRaw["tEPDEP"].sum()
EPCDEPSeasonAverage = EPCDEPSeasonTotal / 16
EPCDEPWinTotal = homeWins["tEPDEP"].sum() + awayWins["tEPDEP"].sum()
EPCDEPWinAverage = EPCDEPWinTotal / winCount
EPCDEPLossTotal = homeLosses["tEPDEP"].sum() + awayLosses["tEPDEP"].sum()
EPCDEPLossAverage = EPCDEPLossTotal / (16-winCount)
EPCOPSeasonTotal = seasonReportRaw["tEPDOP"].sum()
EPCOPSeasonAverage = EPCOPSeasonTotal / 16
EPCOPWinTotal = homeWins["tEPDOP"].sum() + awayWins["tEPDOP"].sum()
EPCOPWinAverage = EPCOPWinTotal / winCount
EPCOPLossTotal = homeLosses["tEPDOP"].sum() + awayLosses["tEPDOP"].sum()
EPCOPLossAverage = EPCOPLossTotal / (16-winCount)
headerRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win', 'Loss Total','Per Loss']
penaltiesRow = ['Penalties',PenaltiesSeasonTotal,PenaltiesSeasonAverage,PenaltiesWinTotal,PenaltiesWinAverage,PenaltiesLossTotal,PenaltiesLossAverage]
EPCRow = ['EPC',EPCSeasonTotal,EPCSeasonAverage,EPCWinTotal,EPCWinAverage,EPCLossTotal,EPCLossAverage]
EPCDHPRow = ['EPCDHP',EPCDHPSeasonTotal,EPCDHPSeasonAverage,EPCDHPWinTotal,EPCDHPWinAverage,EPCDHPLossTotal,EPCDHPLossAverage]
EPCDEPRow = ['EPCDEP',EPCDEPSeasonTotal,EPCDEPSeasonAverage,EPCDEPWinTotal,EPCDEPWinAverage,EPCDEPLossTotal,EPCDEPLossAverage]
EPCOPRow = ['EPCOP',EPCOPSeasonTotal,EPCOPSeasonAverage,EPCOPWinTotal,EPCOPWinAverage,EPCOPLossTotal,EPCOPLossAverage]
fig_background_color = 'white'
fig_border = 'black'
data = [headerRow,penaltiesRow,EPCRow,EPCDHPRow,EPCDEPRow,EPCOPRow]
# Pop the headers from the data array
column_headers = data.pop(0)
row_headers = [x.pop(0) for x in data]
# Table data needs to be non-numeric text. Format the data
# while I'm at it.
cell_text = []
for row in data:
cell_text.append([f'{x:1.2f}' for x in row])
# Get some lists of color specs for row and column headers
rcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))
ccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))
# Create the figure. Setting a small pad on tight_layout
# seems to better regulate white space. Sometimes experimenting
# with an explicit figsize here can produce better outcome.
plt.figure(linewidth=2,
edgecolor=fig_border,
facecolor=fig_background_color,
tight_layout={'pad':1},
figsize=(4.5,1.75)
)
# Add a table at the bottom of the axes
the_table = plt.table(cellText=cell_text,
rowLabels=row_headers,
rowColours=rcolors,
rowLoc='right',
colColours=ccolors,
colLabels=column_headers,
loc='center')
# Scaling is the only influence we have over top and bottom cell padding.
# Make the rows taller (i.e., make cell y scale larger).
the_table.scale(1, 1.1)
# Hide axes
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Hide axes border
plt.box(on=None)
# Force the figure to update, so backends center objects correctly within the figure.
# Without plt.draw() here, the title will center on the axes and not the figure.
plt.draw()
# Create image. plt.savefig ignores figure edge and face colors, so map them.
fig = plt.gcf()
plt.savefig('pyplot-table-demo.png',
edgecolor=fig.get_edgecolor(),
facecolor=fig.get_facecolor(),
dpi=175
) |
8,916 | af2ef3c77cefe675f3d30c3234401f0f9bda3505 | work_hours = 8
work_days = 5
pay_periods = 2
total = work_hours * work_days * pay_periods
rate = 17
pay = total * rate
print(pay)
# variables
name = "josh"
age = 30
# float
weight = 160.5
# list
kill_streak = [3, 5, 1, 9] # [90.9] list can contain sub lists
# range
players = list(range(1,10))
odds = list(range(1, 10, 2))
print(odds)
print(type(name), type(age), type(weight), type(kill_streak))
# dir(str)
# attributes
# help(str.upper)
# dir(__builtins__)
kill_streak_sum = sum(kill_streak)
length = len(kill_streak)
mean = kill_streak_sum / length
print(mean)
student_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]
tens = student_grades.count(10)
print(tens)
# dictionary (key:value)
family = {"josh": 30, "jess": 31, "bailey": 1.5}
age_sum = sum(family.values())
family_size = len(family)
average_age = age_sum / family_size
print(average_age)
# Tuple like a dictionary but non-mutable
palette_one = ("#f1f1f1", "#333333", "#4287f5")
palette_two = ("#f5f5f5", "#454545", "#6dd46a")
palette_three = ("#f0fff0", "#c7c7c7", "#725fb0")
palettes = (palette_one, palette_two, palette_three)
color_codes = palettes
temperature_data = {"morning": (3.1, 2.0, 4.9), "noon": (1.2, 0.9, 3.4), "evening": (0.2, 0.1, 1.0)}
day_temperatures = temperature_data
|
8,917 | 75958b48a3372b56e072a0caa468171ab6b99eb6 | #!/usr/bin/env python3
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from pymongo import MongoClient
from bson.objectid import ObjectId
import requests, datetime, re
#------------- CONFIG CONSTANTS -------------#
DEBUG = True
MAX_PAGE_LIMIT = 2
COLLECTION = 'indicators'
DB_CONFIG = {
'dbuser': 'z5113243',
'dbpassword': 'badpassword01',
'mlab_inst': 'ds239071',
'dbname': 'cs9321_ass2'
}
#------------- API INITIALISATION -------------#
db = None # initialised in main
app = Flask(__name__)
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(
app,
title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',
description='In this assignment, we\'re asked to develop ' \
'a Flask-Restplus data service that allows a client to ' \
'read and store some publicly available economic indicator ' \
'data for countries around the world, and allow the consumers ' \
'to access the data through a REST API.'
)
indicator_model = api.model(COLLECTION, {
'indicator_id': fields.String(required=True,
title='An Indicator ',
description='http://api.worldbank.org/v2/indicators',
example='NY.GDP.MKTP.CD'),
})
parser = api.parser()
parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \
'where k is between 1 and 100. Eg. top10, bottom40')
#------------- HELPER FUNCTIONS -------------#
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return 'http://api.worldbank.org/v2/countries/all/indicators/' \
f'{indicator}?date={date}&format={fmt}&page={page}'
# Recursively build an array containing indicator data
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:
return prevRes+response[1]
return get_indicator_data(
indicator=indicator,
page=response[0]['page']+1,
prevRes=prevRes+response[1],
max_pages=max_pages,
)
# Restructure indicator entry according to spec
def format_collection_entry(indicator_data):
return {
'country': indicator_data['country']['value'],
'date': indicator_data['date'],
'value': indicator_data['value'],
}
# Transform to top<k>/bottom<k> queries to array indexes
def query_to_index(query, arr_size):
try:
match = re.search(r'^(bottom|top)\d+$', query).group()
order = re.search(r'^(bottom|top)', match).group()
length = int(re.search(r'\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size-length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
#------------- QUESTION ROUTES -------------#
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
# Indicator hasn't been specified in body (400)
if not body['indicator_id']:
return { 'message': 'Please specify an indicator.' }, 400
# Retrieve indicator from database (200)
existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})
if existing_collection:
return {
'location': f'/{COLLECTION}/{str(existing_collection["_id"])}',
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator'],
}, 200
# From now onwards we need to obtain data from the Worldbank API
indicator_data = get_indicator_data(body['indicator_id'])
# Valid indicator hasn't been specified (400)
if indicator_data == 'Invalid indicator':
return { 'message': 'Please specify a valid indicator.' }, 400
# Create and retrieve indicator from Worldbank API (201)
collection = {
'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(),
'entries': [format_collection_entry(entry) for entry in indicator_data],
}
created_collection = db[COLLECTION].insert_one(collection)
return {
'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']),
'indicator': collection['indicator'],
}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return { 'message': 'Unable to retrieve collections.' }, 400
return [{
'location': f'/{COLLECTION}/{str(doc["_id"])}',
'collection_id': str(doc['_id']),
'creation_time': str(doc['creation_time']),
'indicator': doc['indicator'],
} for doc in collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
# Check if collection exists
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return { 'message': 'Unable to find collection.' }, 404
# Remove collection from db
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to remove collection.' }, 400
return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'creation_time': str(collection['creation_time']),
'entries': collection['entries'],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200, 'Successfully retrieved economic indicator for given a country and year.')
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
# Create a filtered list containing entries that match params
filtered_entries = [
entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year
]
if len(filtered_entries) == 0:
return {'message': 'Unable to find specific indicator entry ' \
f'for country=\'{country}\' and year=\'{year}\'.'}, 400
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
**filtered_entries[0],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
filtered_entries = [
entry for entry in collection['entries'] if entry['date'] == year
]
if not query:
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': filtered_entries,
}, 200
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': sorted(
filtered_entries,
key=lambda k: k['value'],
reverse=True
)[query_to_index(query, len(filtered_entries))],
}, 200
if __name__ == '__main__':
db = mlab_client(
dbuser=DB_CONFIG['dbuser'],
dbpassword=DB_CONFIG['dbpassword'],
mlab_inst=DB_CONFIG['mlab_inst'],
dbname=DB_CONFIG['dbname']
)
app.run(debug=DEBUG) |
8,918 | 9bf4725c054578aa8da2a563f67fd5c72c2fe831 | #coding=utf8
uu=u'中国'
s = uu.encode('utf-8')
if s == '中国' :
print 11111
print u"一次性还本息".encode('utf-8')
|
8,919 | 1bdc1274cceba994524442c7a0065498a9c1d7bc | #Adds states to the list
states = {
'Oregon' : 'OR' ,
'Flordia': 'FL' ,
'California':'CA',
'New York':'NY',
'Michigan': 'MI',
}
#Adds cities to the list
cities = {
'CA':'San Fransisco',
'MI': 'Detroit',
'FL': 'Jacksonville'
}
cities['NY'] = 'New York'
cities['OR'] = 'PortLand'
#Prints cities
print('-' * 10)
print("NY State has:", cities['NY'])
print("OR State has : ",cities['OR'])
#prints states
print('-' * 10)
print("Michigan's abbreviation is: " , states['Michigan'])
print("Flordia's abreviation is :" , states['Flordia'])
print('-' * 10)
print("Michigan has : ", cities[states['Michigan']])
print("Flordia has: " , cities[states['Flordia']])
print('-' * 10)
for state , abbrev in list(states.items()):
print(f"{state} is abbreviated {abbrev}")
print('-'* 10)
for abbrev, city in list(cities.items()):
print(f"{abbrev} has the city {city} ")
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state}state is abbreviated {abbrev}")
print(f"and has city {cities[abbrev]}")
#carefullly aquires state that may not be there
print('-' * 10)
|
8,920 | b6e28f29edd0c4659ab992b45861c4c31a57e7fd | import os
import pytest
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver import Firefox
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def create_gecko_driver():
home_dir = os.getenv('HOME')
return Firefox(executable_path=os.path.join(home_dir, 'bin', 'geckodriver'))
@pytest.fixture
def driver(request):
firefox = create_gecko_driver()
request.addfinalizer(firefox.quit)
return firefox
def test_successful_login(driver: WebDriver): # type hint for IDE
driver.get("http://localhost:8080/litecart/admin/login.php")
driver.find_element_by_name("username").send_keys('admin', Keys.TAB)
driver.find_element_by_name("password").send_keys('admin', Keys.ENTER)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'sidebar')))
|
8,921 | 63093190ee20e10698bd99dcea94ccf5d076a006 | species(
label = 'C=C([CH]C)C(=C)[CH]C(24182)',
structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),
E0 = (249.687,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C1([CH]C)CC1=CC(25275)',
structure = SMILES('[CH2]C1([CH]C)CC1=CC'),
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=CC)C(C)=[C]C(25412)',
structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(C)=CC(25413)',
structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH2]C(=CC)[C](C)C=C(24605)',
structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),
E0 = (216.244,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2][C](C=C)C(C)=CC(24606)',
structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),
E0 = (216.244,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2]C(=CC)[C]1CC1C(25414)',
structure = SMILES('[CH2]C(=CC)[C]1CC1C'),
E0 = (289.9,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)"""),
)
species(
label = '[CH2][C]1C(=CC)CC1C(25415)',
structure = SMILES('[CH2]C1=C([CH]C)CC1C'),
E0 = (304.572,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(=C)C([CH2])=CC(25416)',
structure = SMILES('[CH2]C(=C)C([CH2])=CC'),
E0 = (285.713,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),
HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=C([CH]C)C[C]=CC(24184)',
structure = SMILES('[CH2]C(=CC)C[C]=CC'),
E0 = (366.985,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),
HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'CC=C1CCC1=CC(25269)',
structure = SMILES('CC=C1CCC1=CC'),
E0 = (114.107,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C([C]=CC)=CC(25417)',
structure = SMILES('[CH2]C([C]=CC)=CC'),
E0 = (334.774,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C1([CH]C)C(=C)C1C(25296)',
structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C(=CC)C(=C)C=C(24604)',
structure = SMILES('[CH2]C(=CC)C(=C)C=C'),
E0 = (242.677,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),
HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (107.173,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH2]CC(=C)C([CH2])=CC(25418)',
structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),
E0 = (316.814,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),
HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C(CC)C([CH2])=CC(25419)',
structure = SMILES('[CH]=C(CC)C([CH2])=CC'),
E0 = (358.664,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(=C)CC(25420)',
structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),
E0 = (349.41,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH]=C([CH]C)C(C)=CC(25421)',
structure = SMILES('[CH]C(=CC)C(C)=CC'),
E0 = (317.373,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2][C](C=C)C(=C)CC(24623)',
structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),
E0 = (228.159,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)"""),
)
species(
label = 'C[CH][C]1CCC1=CC(25422)',
structure = SMILES('C[CH]C1CCC=1[CH]C'),
E0 = (303.292,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)"""),
)
species(
label = '[CH2][C]1C(=C)C(C)C1C(25423)',
structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),
E0 = (305.852,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=CC(=C)C(C)=CC(24616)',
structure = SMILES('C=CC(=C)C(C)=CC'),
E0 = (91.1774,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=[C]C(C)C(=C)[CH]C(24183)',
structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),
E0 = (369.44,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),
HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = 'C=C1C(=CC)CC1C(25265)',
structure = SMILES('C=C1C(=CC)CC1C'),
E0 = (118.381,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CHCH3(T)(95)',
structure = SMILES('[CH]C'),
E0 = (343.893,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),
HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C([C]=C)=CC(24774)',
structure = SMILES('[CH2]C([C]=C)=CC'),
E0 = (370.8,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C([CH]C)C(=C)CC(25424)',
structure = SMILES('[CH]C(=CC)C(=C)CC'),
E0 = (330.753,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=CC(=C)C(=C)CC(24630)',
structure = SMILES('C=CC(=C)C(=C)CC'),
E0 = (104.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=C1C(=C)C(C)C1C(25274)',
structure = SMILES('C=C1C(=C)C(C)C1C'),
E0 = (122.654,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (291.23,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (538.699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (497.951,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (380.338,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (399.474,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (722.113,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (343.259,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (705.575,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (537.022,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (716.337,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (454.469,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (430.619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (503.849,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (393.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (361.682,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (375.044,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (274.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (463.915,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (714.692,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (375.062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (258.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission
Ea raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction2',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)CC1=CC(25275)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C](C)C=C(24605)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(C)=CC(24606)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction9',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C]1CC1C(25414)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction10',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=CC)CC1C(25415)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction11',
reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C[C]=CC(24184)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction13',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CC=C1CCC1=CC(25269)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction14',
reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction15',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction16',
reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction21',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(=C)CC(24623)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction22',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C[CH][C]1CCC1=CC(25422)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction24',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(C)=CC(24616)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction26',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=CC)CC1C(25265)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction27',
reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction28',
reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction29',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(=C)CC(24630)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction30',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=C)C(C)C1C(25274)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
network(
label = '4267',
isomers = [
'C=C([CH]C)C(=C)[CH]C(24182)',
],
reactants = [
('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4267',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
8,922 | 9ae92d6ee4b82f7ed335c47d53567b817140a51c | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import backref
db = SQLAlchemy()
def connect_db(app):
"""Connect to database."""
db.app = app
db.init_app(app)
"""Models for Blogly."""
class User(db.Model):
__tablename__= "users"
id = db.Column(db.Integer, primary_key=True, autoincrement = True)
first_name = db.Column(db.String(50), nullable = False)
last_name = db.Column(db.String(50), nullable = False)
image_url = db.Column(db.String)
class Post(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
title = db.Column(db.String(50), nullable = False)
content = db.Column(db.String(250), nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
db.relationship(User, backref="posts")
|
8,923 | e7ac5c1010330aec81ce505fd7f52ccdeddb76de | import database
import nltk
def pop(i): # pupulate the words table
loc = i
sentencesTrial = []
File = open('words.txt')
lines = File.read()
sentences = nltk.sent_tokenize(lines)
locations = ["Castle","Beach","Beach","Ghost Town","Ghost Town","Haunted House","Jungle","Carnival", "Ghost Town", "Highway", "Castle", "Pyramid","Beach","Beach","Carnival", "Highway", "Castle" ,"Jungle" ]
for sentence in sentences:
for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if(pos == 'NN'):
database.nouns.append(word.lower())
sentencesTrial.append("NN")
elif (pos == 'NNS'):
database.nounsplural.append(word.lower())
sentencesTrial.append("NNS")
elif (pos == 'NNP'):
database.propernounS.append(word.lower())
sentencesTrial.append("NNP")
elif (pos == 'NNPS'):
database.propernounP.append(word.lower())
sentencesTrial.append("NNPS")
elif (pos == 'JJ'):
database.adjective.append(word.lower())
sentencesTrial.append("JJ")
elif (pos == 'VB' or pos == 'VBG' or pos == 'VBN'):
database.verbs.append(word.lower())
sentencesTrial.append("VB")
elif (pos == 'VBD'):
database.verbpast.append(word.lower())
sentencesTrial.append("VBD")
elif (pos == 'VBZ' or pos == 'VBP'):
database.verb3person.append(word.lower())
sentencesTrial.append("VBZ")
elif (pos == 'RB' or pos == 'RBR' or pos == 'RBS'):
database.adverb.append(word)
sentencesTrial.append("RB".lower())
else:
if(word == ","):
database.useless.append(word)
sentencesTrial.append(",")
break
elif(word == "."):
database.useless.append(word)
sentencesTrial.append(".")
break
else:
database.unUsedWords.append(word.lower())
break
nounCount = []
trueNouns = []
for x in database.nouns:
if x in trueNouns:
a = trueNouns.index(x)
nounCount[a] +=1
else:
trueNouns.append(x)
a = trueNouns.index(x)
nounCount.append(1)
for x in trueNouns:
i = trueNouns.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x,'NN',locations[loc],nounCount[i]))
nounpCount = []
trueNounsp = []
for x in database.nounsplural:
if x in trueNounsp:
a = trueNounsp.index(x)
nounpCount[a] += 1
else:
trueNounsp.append(x)
a = trueNounsp.index(x)
nounpCount.append(1)
for x in trueNounsp:
i = trueNounsp.index(x)
database.cursor.execute(
"INSERT INTO words VALUES (?, ?, ?, ?)",
(x, 'NNS', locations[loc], nounpCount[i]))
pnounCount = []
truepNouns = []
for x in database.propernounS:
if x in truepNouns:
a = truepNouns.index(x)
pnounCount[a] += 1
else:
truepNouns.append(x)
a = truepNouns.index(x)
pnounCount.append(1)
for x in truepNouns:
i = truepNouns.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'NNP', locations[loc], pnounCount[i]))
pnounpCount = []
truepNounsp = []
for x in database.propernounP:
if x in truepNounsp:
a = truepNounsp.index(x)
pnounpCount[a] += 1
else:
truepNounsp.append(x)
a = truepNounsp.index(x)
pnounpCount.append(1)
for x in truepNounsp:
i = truepNounsp.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'NNPS', locations[loc], pnounpCount[i]))
adjectCount = []
trueadject = []
for x in database.adjective:
if x in trueadject:
a = trueadject.index(x)
adjectCount[a] += 1
else:
trueadject.append(x)
a = trueadject.index(x)
adjectCount.append(1)
for x in trueadject:
i = trueadject.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'JJ', locations[loc], adjectCount[i]))
verbCount = []
trueVerb = []
for x in database.verbs:
if x in trueVerb:
a = trueVerb.index(x)
verbCount[a] += 1
else:
trueVerb.append(x)
a = trueVerb.index(x)
verbCount.append(1)
for x in trueVerb:
i = trueVerb.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'VB', locations[loc], verbCount[i]))
verbpCount = []
trueVerbp = []
for x in database.verbpast:
if x in trueVerbp:
a = trueVerbp.index(x)
verbpCount[a] += 1
else:
trueVerbp.append(x)
a = trueVerbp.index(x)
verbpCount.append(1)
for x in trueVerbp:
i = trueVerbp.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'VBD', locations[loc], verbpCount[i]))
verb3pCount = []
trueVerb3p = []
for x in database.verb3person:
if x in trueVerb3p:
a = trueVerb3p.index(x)
verb3pCount[a] += 1
else:
trueVerb3p.append(x)
a = trueVerb3p.index(x)
verb3pCount.append(1)
for x in trueVerb3p:
i = trueVerb3p.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'VBZ', locations[loc], verb3pCount[i]))
adverbCount = []
trueAdverb = []
for x in database.adverb:
if x in trueAdverb:
a = trueAdverb.index(x)
adverbCount[a] += 1
else:
trueAdverb.append(x)
a = trueAdverb.index(x)
adverbCount.append(1)
for x in trueAdverb:
i = trueAdverb.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'RB', locations[loc], adverbCount[i]))
uselessCount = []
trueUseless = []
for x in database.useless:
if x in trueUseless:
a = trueUseless.index(x)
uselessCount[a] += 1
else:
trueUseless.append(x)
a = trueUseless.index(x)
uselessCount.append(1)
for x in trueUseless:
i = trueUseless.index(x)
database.cursor.execute(
"INSERT INTO words VALUES (?, ?, ?, ?)",
(x, 'PU', locations[loc], uselessCount[i]))
uuWCount = []
trueuuW = []
for x in database.unUsedWords:
if x in trueuuW:
a = trueuuW.index(x)
uuWCount[a] += 1
else:
trueuuW.append(x)
a = trueuuW.index(x)
uuWCount.append(1)
for x in trueuuW:
i = trueuuW.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'US', locations[loc], uuWCount[i]))
def pop2(): #populate the monster and characters table
####populating the monsters
database.cursor.execute("INSERT INTO monsters VALUES ('Knight','Castle','Old Man Jenkins','Picture')")
database.cursor.execute("INSERT INTO monsters VALUES ('Vampire' , 'Castle' , 'Andrew the Tour', 'Vampire Make Up and fake blood')")
database.cursor.execute("INSERT INTO monsters VALUES ('Shadow' , 'Castle' , 'Frank the Janitor' , 'Black paint')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ghost Pirate','Beach','Bill the Lifeguard','Pirate Costume')")
database.cursor.execute("INSERT INTO monsters VALUES ('Seaweed Monster','Beach','Old Fisherman Joe','Seaweed')")
database.cursor.execute("INSERT INTO monsters VALUES ('Shark','Beach','The Mayor','Shark fins')")
database.cursor.execute("INSERT INTO monsters VALUES ('Cowboy Ghost','Ghost Town','Jerry the Businessman ','Cowboy hat')")
database.cursor.execute("INSERT INTO monsters VALUES ('Miner Ghost','Ghost Town','Gold Hunter Phil','Dusty shoes')")
database.cursor.execute("INSERT INTO monsters VALUES ('Headless Horse Man','Ghost Town','Envirnmentalist Paddy','Drawing of rig to appear headless')")
database.cursor.execute("INSERT INTO monsters VALUES ('Francinstein','Haunted House','Sir Godfree','Green paint')")
database.cursor.execute("INSERT INTO monsters VALUES ('Zombie','Haunted House','The Waiter','Zombie Make Up and fake boy parts')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ghost','Haunted House','Jimmy','Glow in the dark paint on cloths')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ape Man','Jungle','Explorer Fred','Ape Costume')")
database.cursor.execute("INSERT INTO monsters VALUES ('Animal Ghosts','Jungle','Environmentalist Jennie','Scratch Marks')")
database.cursor.execute("INSERT INTO monsters VALUES ('Pterodactyl','Jungle','Tour Guide Bill','Book on flight')")
database.cursor.execute("INSERT INTO monsters VALUES ('Clown Ghost','Carnival','Ring Master','Old Clown Costumes')")
database.cursor.execute("INSERT INTO monsters VALUES ('Zombie','Carnival','Blind Knife Thrower','Eye tests saying he is not blind')")
database.cursor.execute("INSERT INTO monsters VALUES ('Animals','Carnival','Worlds Strongest Man','Scratch marks')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ghost Car','Highway','Old Town Mayor','Car ownership documents')")
database.cursor.execute("INSERT INTO monsters VALUES ('White Lady Ghost','Highway','Miss Anderson','White Dress')")
database.cursor.execute("INSERT INTO monsters VALUES ('Aliens','Highway','Conspiracy Tom','Fake Space ship blueprint')")
database.cursor.execute("INSERT INTO monsters VALUES ('Mummy','Pyramid','Museum Curator Petterson ','Bandages')")
database.cursor.execute("INSERT INTO monsters VALUES ('Sand Man','Pyramid','Ramesh the Tour Guide','Sand')")
database.cursor.execute("INSERT INTO monsters VALUES ('Sphynx','Pyramid','Tour Guide Bob','scratch marks')")
####populating the characters
database.cursor.execute("INSERT INTO characters VALUES ('Scooby Doo','Scooby Dooby Doo')")
database.cursor.execute("INSERT INTO characters VALUES ('Shaggy','Zoinks!')")
database.cursor.execute("INSERT INTO characters VALUES ('Fred','Lets Split up and look for clues')")
database.cursor.execute("INSERT INTO characters VALUES ('Velma','My glasses. I cant find my glasses')")
database.cursor.execute("INSERT INTO characters VALUES ('Daphne','Do you want a Scooby Snack')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Stormy')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Raining')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Misty')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Dark')")
database.cursor.execute("INSERT INTO location VALUES ('Beach','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Beach','Misty')")
database.cursor.execute("INSERT INTO location VALUES ('Ghost Town','Cloudy')")
database.cursor.execute("INSERT INTO location VALUES ('Ghost TOwn','Foggy')")
database.cursor.execute("INSERT INTO location VALUES ('Haunted House','Stormy')")
database.cursor.execute("INSERT INTO location VALUES ('Haunted House','Misty')")
database.cursor.execute("INSERT INTO location VALUES ('Jungle','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Jungle','Raining')")
database.cursor.execute("INSERT INTO location VALUES ('Carnival','Dark')")
database.cursor.execute("INSERT INTO location VALUES ('Carnival','Cloudy')")
database.cursor.execute("INSERT INTO location VALUES ('Carnival','Overcast')")
database.cursor.execute("INSERT INTO location VALUES ('Highway','Overcast')")
database.cursor.execute("INSERT INTO location VALUES ('Highway','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Pyramid','Overcast')")
database.cursor.execute("INSERT INTO location VALUES ('Pyramid','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Pyramid','Raining')") |
8,924 | fcdb43e36a4610ca0201a27d82b1a583f1482878 | import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
POWER_PIN = 21
SPICLK = 18
SPIMISO = 23
SPIMOSI = 24
SPICS = 25
PAUSE = 0.1
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
def spi_setup():
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
GPIO.setup(POWER_PIN, GPIO.OUT)
def spi_readout(adc_pin):
# read the analog pin
return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)
def power_on():
GPIO.output(POWER_PIN, True)
def power_off():
GPIO.output(POWER_PIN, False)
def adc_to_temp(readout):
millivolts = readout * (3300.0 / 1024.0)
temp_c = ((millivolts - 100.0) / 10.0) - 40.0
return temp_c
if __name__ == "__main__":
HYGROMETER = 0
TEMP = 1
LIGHT = 2
spi_setup()
power_on()
time.sleep(PAUSE)
print("Hygrometer value %d" % spi_readout(HYGROMETER))
power_off()
time.sleep(PAUSE)
temp = adc_to_temp(spi_readout(TEMP))
print("Temp sensor: %.1f C" % temp)
time.sleep(PAUSE)
light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0
print("Light level {}% ".format(light_level))
GPIO.cleanup()
|
8,925 | 71e0137fc02b4f56bdf87cc15c275f5cca1588c4 | from enum import IntEnum
class DaqListType(IntEnum):
"""
This class describes a daq list type.
"""
DAQ = 0x01
STIM = 0x02
DAQ_STIM = 0x03 |
8,926 | 1c2a862f995869e3241dd835edb69399141bfb64 | import numpy as np
import tensorflow as tf
K_model = tf.keras.models.load_model('K_model.h5')
K_model.summary()
features, labels = [], []
# k_file = open('dataset_20200409.tab')
k_file = open('ts.tab')
for line in k_file.readlines():
line = line.rstrip()
contents = line.split("\t")
label = contents.pop()
labels.append([float(label)])
features.append([float(i) for i in contents])
pass
MAE = 0
for ins in range(len(labels)):
pred = K_model(np.array([features[ins]]).astype(np.float32))
MAE += abs(pred - labels[ins]) / len(labels)
pass
print(MAE)
|
8,927 | 892d6662e4276f96797c9654d15c96a608d0835a | import itertools
import unittest
from pylev3 import Levenshtein
TEST_DATA = [
('classic', "kitten", "sitting", 3),
('same', "kitten", "kitten", 0),
('empty', "", "", 0),
('a', "meilenstein", "levenshtein", 4),
('b', "levenshtein", "frankenstein", 6),
('c', "confide", "deceit", 6),
('d', "CUNsperrICY", "conspiracy", 8),
]
TEST_FUNCTIONS = [
# Levenshtein().classic, # too slow
Levenshtein().recursive,
Levenshtein().wf,
Levenshtein().wfi,
Levenshtein().damerau
]
class Tests(unittest.TestCase):
def test_singleton(self):
lev1, lev2 = Levenshtein(), Levenshtein()
self.assertIs(lev1, lev2)
def _mk_test_fn(fn, a, b, expected):
def _test_fn(self):
self.assertEqual(fn(a, b), expected)
self.assertEqual(fn(b, a), expected)
return _test_fn
for lev_fn, data in itertools.product(TEST_FUNCTIONS, TEST_DATA):
name, a, b, expected = data
test_fn = _mk_test_fn(lev_fn, a, b, expected)
setattr(Tests, "test_{}_{}".format(name, lev_fn.__name__), test_fn)
if __name__ == '__main__':
unittest.main()
|
8,928 | 1576693264a334153c2752ab6b3b4b65daa7c37c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 17/02/17 at 11:48 PM
@author: neil
Program description here
Version 0.0.1
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
import sys
# detect python version
# if python 3 do this:
if (sys.version_info > (3, 0)):
import tkinter
import tkinter.simpledialog as tksimpledialog
else:
import Tkinter as tkinter
import tkSimpleDialog as tksimpledialog
# =============================================================================
# Define Class. Methods and Functions
# =============================================================================
class Add_Buttons(object):
def __init__(self, ax=None, **kwargs):
"""
Adds a select rectangle feature to any matplotlib axis, with select,
clear all, and finish buttons
:param ax: matplotlib axis, the frame to add the selector to
:param kwargs: kwargs passed to the rectangle selector
Current allowed kwargs are:
button_labels - list of strings
defines the name of each button to be displayed
Must be of length 1 or greater
button_actions - list of strings
defines the action of each button. Must be same
length as button_labels
currently supported actions are:
"NEXT" - sends a return statement to move to
next plot
self.result set to 1
"PREVIOUS" - sends a return statement to move to
previous plot
self.result set to -1
"CLOSE" - closes the plot
"OPTION" - sends the button_label string
self.result set to button_label
"UINPUT" - asks user for an input
button_params - list of dictionaries (optional)
if defined must be same length as button_labels
a dictionary for each button
keywords of each dictionary:
"close" - when used with "OPTION" action will
close the plot after OPTION is clicked
"""
# set supported actions (and link to function)
self.actions = dict(NEXT=self.next,
PREVIOUS=self.previous,
CLOSE=self.end,
OPTION=self.option,
UINPUT=self.uinput)
self.supported_actions = list(self.actions.keys())
# current button params
self.buttons = []
self.regions = []
# result (1, 0, -1, or string)
self.result = 0
# storage
self.data = dict()
# Deal with having no matplotlib axis
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
# load keyword arguments
if kwargs is None:
kwargs = dict()
self.button_labels = kwargs.get('button_labels', ['Close'])
self.num_buttons = len(self.button_labels)
self.button_actions = kwargs.get('button_actions', ['CLOSE'])
dparams = [dict()]*self.num_buttons
self.button_params = kwargs.get('button_params', dparams)
# check inputs are correct
self.validate_inputs()
# create buttons
self.create_buttons()
def validate_inputs(self):
# Make sure button labels is in correct format
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError("Button labels must be a list of strings")
# Make sure button actions is in correct format
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError("Button labels must be a list of strings")
# Make sure button actions is in correct format
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError("Button params must be a dictionary")
# Make sure list are not empty and same length
if len(self.button_labels) < 1:
raise ValueError("'button_labels' Must have at least one button "
"label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError("'button_actions' must be the same length "
"as 'button_labels")
self.num_buttons = len(self.button_labels)
# Make sure all button actions are supported
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = "supported. \n Currently supported actions are: \n"
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
def create_buttons(self, width=0.2):
"""
Create a set of buttons along the bottom axis of the figure
Need to re-write this to be generic based on used input
(might not be possible as user need to define events)
:param N: int, Number of buttons, default 3
:param width: float, width of the buttons in x, must be less than
1.0/N
:return:
"""
b_N, b_length = self.num_buttons, width
b_sep = (1. / (b_N + 1)) * (1 - b_N * b_length)
for b in range(b_N):
start = (b + 1) * b_sep + b * b_length
r = [start, 0.05, b_length, 0.075]
self.regions.append(r)
# adjust the figure
plt.subplots_adjust(bottom=0.25)
# populate buttons
for b in range(b_N):
axbutton = plt.axes(self.regions[b])
button = Button(axbutton, self.button_labels[b])
button.on_clicked(self.actions[self.button_actions[b]])
self.buttons.append(button)
def next(self, event):
"""
Event for clicking a button with action "NEXT"
Sets self.result to 1
:param event:
:return:
"""
self.result = 1
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval,
maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue,
minvalue=minval,
maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
def end(self, event):
"""
Event for clicking the finish button - closes the graph
:param event: event passed to function
:return:
"""
plt.close()
def button_region(self, event):
if len(self.regions) == 0:
return None
# get mouse click location in pixels
x, y = event.x, event.y
# get the current canvas width and height (in pixels)
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
# loop round each button region
for r, rn in enumerate(self.regions):
# convert region to pixels
rn1 = [rn[0]*width, rn[1]*height,
(rn[0] + rn[2])*width, (rn[1] + rn[3])*height]
# test whether x, y are in region
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
# =============================================================================
# Start of code
# =============================================================================
# Main code to test the rectangle selector
if __name__ == '__main__':
import numpy as np
# plt.close()
# fig, frame = plt.subplots(ncols=1, nrows=1)
# x = np.random.rand(100)
# y = np.random.rand(100)
# plt.scatter(x, y, color='k', marker='o', s=20)
# odict = dict(close=True)
# a = Add_Buttons(ax=frame,
# button_labels=['A', 'B'],
# button_actions=['OPTION', 'OPTION'],
# button_params=[odict, odict])
# plt.show()
# plt.close()
plt.close()
fig, frame = plt.subplots(ncols=1, nrows=1)
x = np.random.rand(100)
y = np.random.rand(100)
plt.scatter(x, y, color='k', marker='o', s=20)
odict = dict(close=True)
udict = dict(name='x', fmt=int, title='Enter value',
comment='Please enter x in meters.', minval=4, maxval=10)
a = Add_Buttons(ax=frame,
button_labels=['Enter value', 'Close'],
button_actions=['UINPUT', 'OPTION'],
button_params=[udict, odict])
plt.show()
plt.close()
# =============================================================================
# End of code
# =============================================================================
|
8,929 | 592d5074eeca74a5845d26ee2ca6aba8c3d0f989 | from os import listdir
from os.path import isfile, join
from datetime import date
mypath = '/Users/kachunfung/python/codewars/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
py_removed = [i.replace('.py','') for i in onlyfiles]
file_counter_removed = py_removed.remove('file_counter')
day_removed = max([int(j.replace('day','')) for j in py_removed])
d0 = date(2016, 11, 7)
d1 = date.today()
delta = d1 - d0
if day_removed >= delta.days:
print "Well done!\nYou are %s days ahead.\nKeep up the good work! I am proud of you." % (day_removed - delta.days)
else:
print "You are %s days behind schedule.\nTry your best and Never give up!" % (delta.days - day_removed)
print "\nYou have completed %s codewars kata since 7th December 2016" % day_removed
|
8,930 | 2d503c93160b6f44fba2495f0ae0cf9ba0eaf9d6 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 251)
MainWindow.setStyleSheet("/*\n"
"Neon Style Sheet for QT Applications (QpushButton)\n"
"Author: Jaime A. Quiroga P.\n"
"Company: GTRONICK\n"
"Last updated: 24/10/2020, 15:42.\n"
"Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n"
"*/\n"
"QPushButton{\n"
" border-style: solid;\n"
" border-color: #050a0e;\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
" background-color: #100E19;\n"
"}\n"
"QPushButton::default{\n"
" border-style: solid;\n"
" border-color: #050a0e;\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #FFFFFF;\n"
" padding: 2px;\n"
" background-color: #151a1e;\n"
"}\n"
"QPushButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-width: 2px;\n"
" border-radius: 1px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
"}\n"
"QPushButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-width: 2px;\n"
" border-radius: 1px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))
self.pushButton_3.setStyleSheet("")
self.pushButton_3.setObjectName("pushButton_3")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))
self.label_2.setObjectName("label_2")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))
self.pushButton_2.setStyleSheet("")
self.pushButton_2.setObjectName("pushButton_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))
self.pushButton.setStyleSheet("")
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))
self.label.setObjectName("label")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))
self.comboBox.setStyleSheet("background-color: rgb(101, 101, 101);")
self.comboBox.setObjectName("comboBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_3.setText(_translate("MainWindow", "Download"))
self.label_2.setText(_translate("MainWindow", "Save location"))
self.pushButton_2.setText(_translate("MainWindow", "Search"))
self.label_3.setText(_translate("MainWindow", "Qualiti"))
self.pushButton.setText(_translate("MainWindow", "Browse"))
self.label.setText(_translate("MainWindow", "Video URL"))
|
8,931 | 40637c7a5e45d0fe4184478a1be2e08e5040c93b | from colander_validators import (
email,
url)
def test_url():
assert url("ixmat.us") == True
assert url("http://bleh.net") == True
assert type(url("://ixmat.us")) == str
assert type(url("ixmat")) == str
def test_email():
assert email("barney@purpledino.com") == True
assert email("barney.10.WHATDINO@purple.com") == True
assert type(email("barney")) == str
assert type(email("barney@dino")) == str
|
8,932 | d2787f17a46cf0db9aeea82f1b97ee8d630fd28a |
from xai.brain.wordbase.adjectives._corporal import _CORPORAL
#calss header
class _CORPORALS(_CORPORAL, ):
def __init__(self,):
_CORPORAL.__init__(self)
self.name = "CORPORALS"
self.specie = 'adjectives'
self.basic = "corporal"
self.jsondata = {}
|
8,933 | 322795bce189428823c45a26477555052c7d5022 | # Author: Andreas Francois Vermeulen
print("CrawlerSlaveYoke")
print("CSY-000000023.py")
|
8,934 | e2a38d38d2ab750cf775ed0fbdb56bc6fc7300c4 | from typing import *
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
# [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]
return map_[-1][-1]
print(Solution().uniquePaths(7, 3))
|
8,935 | 4b647d37d390a4df42f29bbfc7e4bae4e77c5828 | import string
import random
file_one_time_pad = open("encryption_file.txt","r")
p_text = file_one_time_pad.read()
file_one_time_pad.close()
print(p_text)
p_text = str.lower(p_text)
main_text = []
p_text_numerical = []
temp_key = [21,25,20,15,16,14,10,26,24,9,8,13]
alphabets = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
main_key = []
cipher_text = []
cipher_text_numerical = []
length_p_text = len(p_text)
length_temp_key = len(temp_key)
random_alpha = 0
decipher_text = []
decipher_numerical = []
##Getting the numerical values of the text
for i in p_text:
main_text.append(i)
for i in range(length_p_text):
for j in range(25):
if main_text[i] == alphabets[j]:
p_text_numerical.append(j)
break
##Generating keys dynamically
if length_p_text == length_temp_key:
for i in range(length_temp_key-1):
main_key.append(temp_key[i])
elif length_p_text < length_temp_key:
for i in range(length_p_text-1):
main_key.append(temp_key[i])
else:
for i in range(length_temp_key-1):
main_key.append(temp_key[i])
diff = length_p_text - length_temp_key
for i in range(diff):
random_alpha = random.choice(temp_key)
main_key.append(random_alpha)
print("The main key is :: \n")
print(main_key)
print("The length of p_text_numerical:: \t",len(p_text_numerical))
print("\n")
print("The length of the main_key is :: \t",len(main_key))
## Ciphering algorithm
for i in range(length_p_text-1):
cipher_text_numerical.append(abs(p_text_numerical[i]+main_key[i]))
print("The cipherred text is :: \n")
print(cipher_text_numerical)
## Deciphering algorithm
length_cipher = len(cipher_text_numerical)
for i in range(length_cipher):
decipher_numerical.append(cipher_text_numerical[i] - main_key[i])
print("The decipherred numerical::\n")
print(decipher_numerical)
temp = 0
for i in range(length_p_text-1):
temp = decipher_numerical[i]
decipher_text.append(alphabets[temp])
deciphered_one = ""
for i in decipher_text:
deciphered_one = deciphered_one + i
file_encrypt = open("encryption_file.txt","w")
file_encrypt.write(deciphered_one)
file_encrypt.close()
print("The deciphered text is ::\n")
print(decipher_text)
|
8,936 | 24368b6c607c0524f8b52b279a6dce0fde72294b | import typing
import time
import cv2
import os
from .ABC import ABC
from .Exceptions import *
from .Constants import *
class Video(ABC):
def __init__(self, filename: str, *, scale: float = 1, w_stretch: float = 2, gradient: typing.Union[int, str] = 0, verbose: int = False):
if not os.path.isfile(filename): # check to make sure file actually exists
raise FileNotFound(filename) # FileNotFound is from .Exceptions
self.filename = filename
self.video = cv2.VideoCapture(filename)
# self.frames is a frames[frame[row[char, char,..], row[],..], frame[],..]
self.frames = [] # converted frames (will be populated when convert() is called)
self.fps = self.video.get(cv2.CAP_PROP_FPS) # fps of the origin video
self.width = self.video.get(3) # float, width of the video
self.height = self.video.get(4) # float, height of the video
# if scale was given as a percentage (out of 100 rather than out of 1)
if scale > 1:
scale /= 100
self.scale = scale # scale which both dimensions are multiplied by
self.w_stretch = w_stretch # scale which the width dimension is multiplied by (to account for text which is taller than it is wide)
# scaled dimensions
self.scaled_width = int(self.width*self.scale*self.w_stretch)
self.scaled_height = int(self.height*self.scale)
# determine what the gradient / brightness to character mapping will be
if type(gradient) == int:
if 0 > gradient > (len(gradients) - 1):
raise IndexError(f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.')
else:
self.gradient = gradients[gradient]
else:
self.gradient = gradient
self.gradient = tuple([c for c in self.gradient]) # turn self.gradient into a tuple
self.gradient_len = len(self.gradient)
self.verbose = verbose # whether or not to do extra logging of information
# for __iter__ to allow this to be used in a for loop to iterate through the frames
self.current_frame = 0
self.end_frame = None
# determine what the clear command will be when viewing the final asciified frames
if os.name == 'nt':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
if self.verbose:
print(f'Dimensions: {self.width}x{self.height}')
print(f'Scale Factor: {self.scale}')
print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')
print(f'Gradient: \'{"".join(self.gradient)}\'')
print(f'FPS: {self.fps}')
def convert(self): # function which is called to populate the list of converted frames (self.frames)
if self.verbose: print('Converting...')
while True:
succ, img = self.video.read() # read frame from video
if not succ: break # if failed when reading
# resize image to the scale specified in __init__
img = cv2.resize(img, (self.scaled_width, self.scaled_height,))
self.frames.append(self.asciify_img(img)) # add the asciified image to the list of converted frames
self.end_frame = len(self.frames)
if self.verbose: print('Done.')
return self # returns self for fluent chaining
def view(self, *, fps: float=None): # function to view all the frames in the console like a video
if fps is None:
spf = 1/self.fps
else:
spf = 1/fps
try:
for frame in self.frames:
start = time.perf_counter()
print(frame)
diff = start - time.perf_counter()
time.sleep((spf - diff + abs(spf - diff)) / 2)
os.system(self.clear_cmd)
except KeyboardInterrupt:
pass
def __iter__(self): # allow iteration over the frames (like in a for loop)
return self
def __next__(self): # allow iteration over the frames (like in a for loop)
if self.current_frame > self.end_frame:
raise StopIteration
self.current_frame += 1
return self.frames[self.current_frame - 1]
|
8,937 | a6bd10723bd89dd08605f7a4abf17ccf9726b3f5 | # pyre-ignore-all-errors
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from wallet.storage import db_session, engine, Base
from wallet.storage.models import User, Account
from wallet.types import RegistrationStatus
from diem_utils.types.currencies import FiatCurrency
def clear_db() -> None:
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
def setup_fake_data() -> None:
clear_db()
fake_users = [
User(
username="sunmi",
registration_status=RegistrationStatus.Registered,
selected_fiat_currency=FiatCurrency.USD,
selected_language="en",
password_salt="123",
password_hash="deadbeef",
is_admin=True,
first_name="First1",
last_name="Last1",
account=Account(),
),
User(
username="sunyc",
registration_status=RegistrationStatus.Registered,
selected_fiat_currency=FiatCurrency.USD,
selected_language="en",
password_salt="123",
password_hash="deadbeef",
is_admin=False,
first_name="First2",
last_name="Last2",
account=Account(),
),
User(
username="rustie",
registration_status=RegistrationStatus.Registered,
selected_fiat_currency=FiatCurrency.USD,
selected_language="en",
password_salt="123",
password_hash="deadbeef",
is_admin=False,
first_name="First3",
last_name="Last3",
account=Account(),
),
]
for user in fake_users:
db_session.add(user)
try:
db_session.commit()
except Exception as e:
db_session.rollback()
db_session.flush()
|
8,938 | c4720eb5a42267970d3a98517dce7857c0ba8450 | import datetime
import json
import logging
from grab import Grab
from actions import get_course_gold, get_chat_type, get_indexes, group_chat_id
# logging.basicConfig(
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# level=logging.DEBUG)
# logger = logging.getLogger(__name__)
results = None
timestamp = datetime.datetime.now()
date = timestamp.date()
date_post = date
caption = None
def check_date():
global results
global date_post
current_datetime = datetime.datetime.now()
current_date = current_datetime.date()
if date_post is not None:
# noinspection PyTypeChecker
if date_post < current_date:
results = None
date_post = None
else:
pass
def get_every_day():
global caption
global date_post
url = "https://pp.userapi.com/"
g = Grab()
g.go("https://vk.com/skorpw",
user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 '
'YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36')
# list = g.doc.body.decode('cp1251')
try:
image = g.doc.select(
'.//*[@id="public_wall"]/*[@id="page_wall_posts"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick')[
0].text()
caption = 'Ежа'
date_time = datetime.datetime.now()
date_post = date_time.date()
json_string = get_indexes(image)
res = json.loads(json_string)
result = res['temp']['y']
url_image = result
#url_image=result[0]
#url_image="http://www.kartinki.me/pic/201506/1920x1200/kartinki.me-21699.jpg"
return url_image
except IndexError:
return None
def uid_from_update(update):
"""
Extract the chat id from update
:param update: `telegram.Update`
:return: chat_id extracted from the update
"""
chat_id = None
try:
chat_id = update.message.from_user.id
except (NameError, AttributeError):
try:
chat_id = update.inline_query.from_user.id
except (NameError, AttributeError):
try:
chat_id = update.chosen_inline_result.from_user.id
except (NameError, AttributeError):
try:
chat_id = update.callback_query.from_user.id
except (NameError, AttributeError):
logging.error("No chat_id available in update.")
return chat_id
def start(bot, update):
chat_id = uid_from_update(update)
bot.sendMessage(chat_id=chat_id, text="Приветули")
def get_gold(bot, update):
chat_type = get_chat_type(update)
response = get_course_gold()
if chat_type == "group":
bot.sendMessage(chat_id=group_chat_id(update), text=response,
reply_to_message_id=update.message.message_id)
else:
bot.sendMessage(chat_id=uid_from_update(update), text=response,
reply_to_message_id=update.message.message_id)
def get_everyday(bot, update):
global results
check_date()
chat_type = get_chat_type(update)
if results is None:
results = get_every_day()
if results is not None:
if chat_type == "group":
bot.sendPhoto(chat_id=group_chat_id(update), photo=results,
reply_to_message_id=update.message.message_id,
caption=caption)
else:
bot.sendPhoto(chat_id=uid_from_update(update), photo=results,
reply_to_message_id=update.message.message_id, caption=caption)
else:
if chat_type == "group":
bot.sendMessage(chat_id=group_chat_id(update), text="Ошибка, повторите позже",
reply_to_message_id=update.message.message_id,
caption=caption)
else:
bot.sendMessage(chat_id=uid_from_update(update), text="Ошибка, повторите позже",
reply_to_message_id=update.message.message_id, caption=caption)
else:
if chat_type == "group":
bot.sendPhoto(chat_id=group_chat_id(update), photo=results,
reply_to_message_id=update.message.message_id,
caption=caption)
else:
bot.sendPhoto(chat_id=uid_from_update(update), photo=results,
reply_to_message_id=update.message.message_id, caption=caption)
|
8,939 | f6dd5acc75d1a85a996629e22e81cdef316c1dcd | """Test functions for util.mrbump_util"""
import pickle
import os
import sys
import unittest
from ample.constants import AMPLE_PKL, SHARE_DIR
from ample.util import mrbump_util
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl):
return
with open(pkl, 'rb') as f:
if sys.version_info.major == 3:
d = pickle.load(f, encoding='latin1')
else:
d = pickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf), 3)
self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')
if __name__ == "__main__":
unittest.main()
|
8,940 | 3431e342c940b0d91f817c3e583728e55e305210 | # import the necessary packages
from .pigear import PiGear
from .camgear import CamGear
from .videogear import VideoGear
__all__ = ["PiGear", "CamGear", "VideoGear"] |
8,941 | adfdd988b7e208229f195308df8d63fd2799046f |
from math import exp
from math import e
import numpy as np
import decimal
import pandas as pd
pop = []
x = 0
for a in range(1,10001):
pop.append((1.2)*e**(-1.2*x))
x =+0.0001
for k in range(100,10100,100):
exec(f'S{k} =pop[1:k]')
####################################################################################
import numpy as np
for size in np.arange(100,10100,100):
exec(f'S{size} = np.random.exponential(scale=1.2,size=size)')
len(S10000)
####################################################################################
import numpy as np
#another way to do it
#create a dictionary of samples
dict_samples = {}
for size in np.arange(100,10100,100):
dict_samples[size]=np.random.exponential(scale=10/12,size=size)
dict_samples[100]
len(dict_samples[200])
1/1.2
pos = 100
for pos in np.arange(100,10100,100):
sample = dict_samples[pos]
sample_mean = sample.mean()
print("The mean for sample {} is {}".format(pos,sample_mean))
|
8,942 | 031f668fbf75b54ec874a59f53c60ceca53779cf | from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('checkout' , views.order_checkout_view , name='orders-checkout') ,
]
|
8,943 | 590baf17d9fdad9f52869fa354112d3aa5f7d5f0 | import requests
import json
import io
import sys
names = ['abc-news', 'abc-news-au', 'aftenposten','al-jazeera-english','ars-technica','associated-press','australian-financial-review','axios', 'bbc-news', 'bbc-sport','bleacher-report', 'bloomberg','breitbart-news','business-insider', 'business-insider-uk','buzzfeed','cbc-news', 'cbs-news','cnbc','cnn','crypto-coins-news','daily-mail','engadget','entertainment-weekly','espn','engadget','espn-cric-info','financial-post','financial-times','football-italia','fortune','fox-sports','fox-news','four-four-two','google-news','google-news-ca','google-news-uk','google-news-in''google-news-au','hacker-new','ign','independent','mashable','metro','mirror','mtv-news','medical-news-today','mtv-news-uk','national-geographic','msnbc','nbc-news','news24','new-scientist','newsweek','news-com-au','new-york-magazine','next-big-future','nfl-news','nhl-news','politico','polygon','recode','reuters','reddit-r-all','rte','techradar','the-economist','the-globe-and-mail','the-guardian-au','the-guardian-uk','techcrunch','the-hill','talksport','the-hindu','the-irish-times','the-lad-bible','the-huffington-post','the-new-york-times','the-times-of-india','the-telegraph','the-verge','the-wall-street-journal','the-washington-post','time','usa-today','vice-news','wired','xinhua-net','der-tagesspiegel']
sys.stdout=open("/sources/output20180401.json","a+")
print("[")
sys.stdout.close()
for name in names:
url = ('https://newsapi.org/v2/everything?sources='+name+'&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e')
count = 0
response = requests.get(url)
sys.stdout=open("/sources/output20180401.json","a+")
print(json.dumps(response.json()))
print(",")
sys.stdout.close()
sys.stdout=open("/sources/output20180401.json","a+")
print("]")
sys.stdout.close()
#&from=2018-03-28
|
8,944 | 74fae3636b1c1b0b79d0c6bec8698581b063eb9c | from . import by_trips
from . import by_slope
|
8,945 | f26c624e8ae9711eb835e223407256e60dfc6d6e | # Ejercicio 1
print('Pepito')
print('Cumpleaños: 22 de enero')
edad = 42
print('Tengo', edad, 'años')
cantante = 'Suzanne Vega'
comida = 'rúcula'
ciudad = 'Barcelona'
print('Me gusta la música de', cantante)
print('Me gusta cenar', comida)
print('Vivo en', ciudad) |
8,946 | 8fdc9a52b00686e10c97fa61e43ddbbccb64741b | """
OO 05-18-2020
Task
----------------------------------------------------------------------------------------------------------
Your company needs a function that meets the following requirements:
- For a given array of 'n' integers, the function returns the index of the element with the minimum value
in the array. If there is more than one element with the minimum value, the returned index should be
the smallest one.
- If an empty array is passed to the function, it should raise an Exception.
A colleague has written that function, and your task is to design 3 separated unit tests, testing if the
function behaves correctly. The implementation in Python is listed below (Implementations in other
languages can be found in the code template):
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if a[i] < a[min_idx]:
min_idx = i
return min_idx
Another co-worker has prepared functions that will perform the testing and validate returned results with
expectations. Your task is to implement 3 classes that will produce test data and the expected results for
the testing functions. More specifically: function 'get_array()' in 'TestDataEmptyArray' class and
functions 'get_array()' and 'get_expected_result()' in classes 'TestDataUniqueValues' and
'TestDataExactlyTwoDifferentMinimums' following the below specifications:
- get_array() method in class TestDataEmptyArray has to return an empty array.
- get_array() method in class TestDataUniqueValues has to return an array of size at least 2 with all
unique elements, while method get_expected_result() of this class has to return the expected minimum
value index for this array.
- get_array() method in class TestDataExactlyTwoDifferentMinimums has to return an array where there are
exactly two different minimum values, while method get_expected_result() of this class has to return
the expected minimum value index for this array.
"""
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [5, 3, 2]
@staticmethod
def get_expected_result():
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [5, 3, 2, 2, 9]
@staticmethod
def get_expected_result():
return 2
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
minimum_index(seq)
except ValueError:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
TestWithEmptyArray()
TestWithUniqueValues()
TestWithExactyTwoDifferentMinimums()
print("OK")
|
8,947 | cd07dd596f760e232db5c0fd8e27360d61bda635 | #!/usr/bin/python3
import i3ipc
if __name__ == "__main__":
i3 = i3ipc.Connection()
wp = [int(w["name"]) for w in i3.get_workspaces() if w["num"] != -1]
for k in range(1, 16):
if k not in wp:
i3.command("workspace {}".format(k))
break
|
8,948 | 94130b4962ecff2ea087ab34cf50a084254bf980 | """This module provides the definition of the exceptions that can be raised from the database module."""
class DatabaseError(Exception):
"""Raised when the requested database operation can not be completed."""
pass
class InvalidDictError(Exception):
"""Raised when the object can not be created from the provided dict."""
pass |
8,949 | 137f9310256f66ccd9fbe6626659c3c4daea0efc | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Event(models.Model):
name = models.CharField('Назва', max_length=200)
date = models.DateField('Дата')
address = models.CharField('Адреса', max_length=255, blank=True, null=True)
attendents = models.ManyToManyField(User, through='Atendent', blank=True, null=True)
description = models.TextField('Опис', blank=True, null=True)
def __unicode__(self):
return self.name
class Atendent(models.Model):
user = models.ForeignKey(User)
event = models.ForeignKey(Event, null=True, blank=True)
state = models.IntegerField(null=True, blank=True) |
8,950 | e6d4d12d47391927364fdc9765c68690d42c5d8d | import pygame
import serial
import time
ser1 = serial.Serial('/dev/ttyACM0', 115200) #Right
ser1.write('?\n')
time.sleep(0.5)
if ser1.readline()[4] == 0:
ser2 = serial.Serial('/dev/ttyACM1', 115200) #Left, negative speeds go forward
else:
ser1 = serial.Serial('/dev/ttyACM1', 115200)
ser2 = serial.Serial('/dev/ttyACM0', 115200)
def write_spd(write1, write2):
ser1.write('sd'+str(write1)+'\n')
ser2.write('sd'+str(-write2)+'\n')
speed = 60
up = 0
down = 0
left = 0
right = 0
state = {'up':0, 'down':0, 'left':0, 'right':0}
scr = pygame.display.set_mode((1,1))
while(True):
elist = pygame.event.get()
for event in elist:
if event.type == 2 and event.dict.get('key') == 27:
write_spd(0, 0)
quit()
if event.type == 2:
if event.dict.get('key') == 273:
state['up'] = 1
elif event.dict.get('key') == 274:
state['down'] = 1
elif event.dict.get('key') == 275:
state['right'] = 1
elif event.dict.get('key') == 276:
state['left'] = 1
if event.type == 3:
if event.dict.get('key') == 273:
state['up'] = 0
elif event.dict.get('key') == 274:
state['down'] = 0
elif event.dict.get('key') == 275:
state['right'] = 0
elif event.dict.get('key') == 276:
state['left'] = 0
if state['up'] == 1:
if state['right'] == 1:
write_spd(0, speed)
elif state['left'] == 1:
write_spd(speed, 0)
else:
write_spd(speed, speed)
elif state['left'] == 1:
write_spd(speed, -speed)
elif state['right'] == 1:
write_spd(-speed, speed)
elif state['down'] == 1:
write_spd(-speed, -speed)
else:
write_spd(0, 0)
|
8,951 | 785dcaf7de68174d84af3459cde02927bc2e10cc | tabela = [[1,-45,-20,0,0,0,0],[0,20,5,1,0,0,9500],[0,0.04,0.12,0,1,0,40],[0,1,1,0,0,1,551]]
colunas = ["Z","A","B","S1","S2","S3","Solução"]
linhas = ["Z","S1","S2","S3"]
n_colunas=7
n_linhas=4
#Inicio do algoritmo
#Buscar o menor numero negativo na linha 0
menor_posicao=-1
menor_valor=0
for coluna in range(0,n_colunas):
if(tabela[0][coluna]<menor_valor):
menor_valor=tabela[0][coluna]
menor_posicao=coluna
#O menor numero negativo na linha 0 esta na coluna menor_posicao, caso nao haja um numero negativo a posicao é -1
solucao_dividida=[]
while(menor_posicao!=-1): #O loop terminara quando nao houver numero negativo na linha Z
#Vamos agora dividir a ultima coluna pelos elementos da coluna i em cada linha
solucao_dividida.clear()
solucao_dividida.append("Vazio")
for linha in range (1,n_linhas):
if(tabela[linha][menor_posicao]==0):
solucao_dividida.append(float("inf"))
else:
solucao_dividida.append(tabela[linha][n_colunas-1]/tabela[linha][menor_posicao])
#Agora iremos procurar a linha com a menor solucao_dividida positiva
if(solucao_dividida[1]>0):
menor_solucao=solucao_dividida[1]
else:
menor_solucao=float("inf")
menor_solucao_posicao=1
for i in range (1,n_linhas):
if(solucao_dividida[i]>0 and solucao_dividida[i]<menor_solucao):
menor_solucao=solucao_dividida[i]
menor_solucao_posicao=i
#Agora vamos pegar o elemento tabela[menor_solucao_posicao][menor_posicao] e dividir a linha menor_solucao_posicao por ele
pivo=tabela[menor_solucao_posicao][menor_posicao]
for coluna in range(0,n_colunas):
if(pivo==0):
tabela[menor_solucao_posicao][coluna]=float("inf")
else:
tabela[menor_solucao_posicao][coluna]=tabela[menor_solucao_posicao][coluna]/pivo
linhas[menor_solucao_posicao]=colunas[menor_posicao] #mudando o cabecalho da tabela
#Agora vamos pegar a linha menor_solucao_posicao e somar nas demais de forma que a coluna menor_posicao seja zerada em todas as linhas
#menos na linha menor_solucao_posicao
for linha in range (0,n_linhas):
if(linha!=menor_solucao_posicao):
if(tabela[menor_solucao_posicao][menor_posicao]==0):
razao=float("inf")
else:
razao=tabela[linha][menor_posicao]/tabela[menor_solucao_posicao][menor_posicao]
for coluna in range (0,n_colunas):
tabela[linha][coluna]=tabela[linha][coluna]-(razao*tabela[menor_solucao_posicao][coluna])
#Buscar o menor numero negativo na linha 0
menor_posicao=-1
menor_valor=0
for coluna in range(0,n_colunas):
if(tabela[0][coluna]<menor_valor):
menor_valor=tabela[0][coluna]
menor_posicao=coluna
#O menor numero negativo na linha 0 esta na coluna menor_posicao, caso nao haja um numero negativo a posicao é -1
#Caso menor_posicao==-1 o while termina
print(tabela)
for i in range (0,n_linhas):
print(linhas[i],"=",tabela[i][n_colunas-1]) |
8,952 | 39affe139eec4cf6877646188839d79ed575235c | from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.app import App
import webbrowser
a=0.0
b="?"
n=0.0
k=""
g=""
class ghetto(GridLayout):
def matCallback(self,a):
webbrowser.open_new("https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09")
def biyoCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09")
def edebCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09")
def kimyaCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09")
def tarihCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs")
def cogCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09")
def bilisiCallback(self,a):
webbrowser.open_new("https://us02web.zoom.us/j/3469922894")
def muzCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09")
def ingCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09")
def felCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09")
def __init__(self,**kwargs):
super(ghetto, self).__init__(**kwargs)
self.cols = 2
self.btn1 = Button(text='MATEMATİK')
self.btn1.bind(on_press=self.matCallback)
self.btn2 = Button(text='KİMYA')
self.btn2.bind(on_press=self.kimyaCallback)
self.btn3 = Button(text='BİYOLOJİ')
self.btn3.bind(on_press=self.biyoCallback)
self.btn4 = Button(text='FELSEFE')
self.btn4.bind(on_press=self.felCallback)
self.btn6 = Button(text='EDEBİYAT')
self.btn6.bind(on_press=self.edebCallback)
self.btn7 = Button(text='BİLİŞİM')
self.btn7.bind(on_press=self.bilisiCallback)
self.btn5 = Button(text='TARİH')
self.btn5.bind(on_press=self.tarihCallback)
self.btn8 = Button(text='MÜZİK')
self.btn8.bind(on_press=self.muzCallback)
self.btn9 = Button(text='İNGİLİZCE')
self.btn9.bind(on_press=self.ingCallback)
self.btn10 = Button(text='COĞRAFYA')
self.btn10.bind(on_press=self.cogCallback)
self.add_widget(self.btn10)
self.add_widget(self.btn1)
self.add_widget(self.btn2)
self.add_widget(self.btn3)
self.add_widget(self.btn4)
self.add_widget(self.btn5)
self.add_widget(self.btn6)
self.add_widget(self.btn7)
self.add_widget(self.btn8)
self.add_widget(self.btn9)
class main(App):
def build(self):
return ghetto()
if __name__ == "__main__":
main().run()
|
8,953 | f25db7d797f1f88bd0374d540adcb396e16740a0 | from django.contrib.auth import authenticate
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render
import json
from userprofile.models import Profile
from .models import *
#发送私信
def sendmessage(request):
if request.method == "POST":
data = json.loads(request.body)
uid = data.get("userid")
message = data.get("message")
tuid = data.get("touserid")
Message.objects.create(uid_id=uid, message=message, tuid_id=tuid)
return JsonResponse({
"message": "send message success"
})
else:
return JsonResponse({
"status": 0,
"message": "error method"
})
#接收私信
def getmessage(request):
if request.method == "POST":
data = json.loads(request.body)
uid = data.get("userid")
msglist= []
mres = Message.objects.filter(tuid_id=uid).all()
for res in mres:
record = {
"messageid":res.id,
"userid":res.uid.id,
"username":res.uid.username,
"message":res.message,
"time":res.time
}
msglist.append(record)
return JsonResponse(msglist, safe=False)
else:
return JsonResponse({
"status": 0,
"message": "error method"
})
#删除私信
def deletemessage(request):
if request.method == "POST":
data = json.loads(request.body)
mid = data.get("messageid")
Message.objects.filter(id=mid).delete()
return JsonResponse({
"message": "delete message success"
})
else:
return JsonResponse({
"status": 0,
"message": "error method"
}) |
8,954 | c2e0f2eda6ef44a52ee4e192b8eb71bde0a69bff | import json
import logging
logger = logging.getLogger(__name__)
from django.db.models import Q
from channels_api.bindings import ResourceBinding
from .models import LetterTransaction, UserLetter, TeamWord, Dictionary
from .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer
class TeamWordBinding(ResourceBinding):
model = TeamWord
stream = "teamwords"
serializer_class = TeamWordSerializer
def get_queryset(self):
return TeamWord.objects.filter(user__group__team=self.user.group.team)
@classmethod
def group_names(self, instance, action):
return [str(instance.user.group.team)]
def has_permission(self, user, action, pk):
logger.debug("TW has_permission {} {} {}".format(user, action, pk))
if action in ['update', 'delete']:
return False
if action == 'create':
payload = json.loads(self.message.content['text'])
if 'data' not in payload or 'word' not in payload['data']:
logger.debug("Possibly malicious malformed TeamWord from {}".format(self.user.username))
return False
word = payload['data']['word']
word_letters = set(word.lower())
if len(word_letters) == 0:
return False
user = self.user
user_letters = set()
for letter in UserLetter.objects.filter(user=user):
user_letters.add(letter.letter.lower())
for letter in LetterTransaction.objects.filter(borrower=user, approved=True):
user_letters.add(letter.letter.lower())
if not word_letters.issubset(user_letters):
return False
team_words = set()
for tword in self.get_queryset():
team_words.add(tword.word)
if word in team_words:
return False
try:
wordObj = Dictionary.objects.get(word=word)
except Exception as e:
return False
return True
# allow list, retrieve, subscribe
return True
class UserLetterBinding(ResourceBinding):
model = UserLetter
stream = "userletters"
serializer_class = UserLetterSerializer
def get_queryset(self):
queries = Q(user=self.user)
for profile in self.message.user.group.profile_set.all():
queries |= Q(user=profile.user)
return UserLetter.objects.filter(queries)
@classmethod
def group_names(self, instance, action):
logger.debug(str(instance))
return [instance.user.username + "solo"]
def has_permission(self, user, action, pk):
logger.debug("UL has_permission {} {} {}".format(user, action, pk))
if action in ['create', 'update', 'delete']:
return False
# allow list, retrieve, subscribe
return True
class LetterTransactionBinding(ResourceBinding):
model = LetterTransaction
stream = "lettertransactions"
serializer_class = LetterTransactionSerializer
def get_queryset(self):
return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user))
@classmethod
def group_names(self, instance, action):
# Send this to only the borrower and lender
return [instance.borrower.username + "solo", instance.letter.user.username + "solo"]
def has_permission(self, user, action, pk):
logger.debug("TR has_permission {} {} {}".format(user, action, self.message.content['text']))
if action == "delete":
return False
if action == "create" or action == "update":
payload = json.loads(self.message.content['text'])
if 'data' not in payload or 'letter' not in payload['data']:
logger.debug("Possibly malicious malformed LetterTransaction from {}".format(self.user.username))
return False
ul = UserLetter.objects.get(pk=payload['data']['letter'])
# If this UserLetter is not owned by a friend, permission denied
if ul.user.profile not in self.user.group.profile_set.all():
logger.debug("Malicious LetterTransaction creation suspected by {}".format(self.user.username))
return False
# allow list, retrieve, subscribe, and legitimate create
return True
|
8,955 | 35a95c49c2dc09b528329433a157cf313cf59667 | import hashlib
def md5_hexdigest(data):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def sha1_hexdigest(data):
return hashlib.sha1(data.encode('utf-8')).hexdigest()
def sha224_hexdigest(data):
return hashlib.sha224(data.encode('utf-8')).hexdigest()
def sha256_hexdigest(data):
return hashlib.sha256(data.encode('utf-8')).hexdigest()
def sha384_hexdigest(data):
return hashlib.sha384(data.encode('utf-8')).hexdigest()
def sha512_hexdigest(data):
return hashlib.sha512(data.encode('utf-8')).hexdigest()
|
8,956 | 7e3a5e1f19683b1716f3c988dcc1e65fee1cae13 | import sys
sys.stdin = open('magnet.txt', 'r')
from collections import deque
def check(t, d, c):
if t == 1:
if m1[2] != m2[-2] and not c:
check(t + 1, d * (-1), 1)
if d == 1:
m1.appendleft(m1.pop())
else:
m1.append(m1.popleft())
elif t == 4:
if m4[-2] != m3[2] and not c:
check(t - 1, d * (-1), 4)
if d == 1:
m4.appendleft(m4.pop())
else:
m4.append(m4.popleft())
elif t == 2:
if m2[2] != m3[-2] and (not c or c == 1):
check(t + 1, d * (-1), 2)
if m2[-2] != m1[2] and (not c or c == 3):
check(t - 1, d * (-1), 2)
if d == 1:
m2.appendleft(m2.pop())
else:
m2.append(m2.popleft())
else:
if m3[2] != m4[-2] and (not c or c == 2):
check(t + 1, d * (-1), 3)
if m3[-2] != m2[2] and (not c or c == 4):
check(t - 1, d * (-1), 3)
if d == 1:
m3.appendleft(m3.pop())
else:
m3.append(m3.popleft())
for test_case in range(1, int(input()) + 1):
m1, m2, m3, m4 = deque(), deque(), deque(), deque()
K = int(input())
for _ in range(4):
if m1:
if m2:
if m3:
m4 += list(map(int, input().split()))
else:
m3 += list(map(int, input().split()))
else:
m2 += list(map(int, input().split()))
else:
m1 += list(map(int, input().split()))
for _ in range(K):
touch, direction = map(int, input().split())
check(touch, direction, 0)
result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]
print('#{} {}'.format(test_case, result))
|
8,957 | 53cbc3ca3a34a8aafa97d6964337cfabb1bebac5 | from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
from random import randint
import sys
import pandas as pd
import pickle
import nltk
import os
import numpy as np
import string
replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))
dir_doc = sys.argv[1] + 'docs.txt'
dir_titles = sys.argv[1] + 'title_StackOverflow.txt'
with open(dir_doc) as f:
docs = f.read().splitlines()
with open(dir_titles) as f:
titles = f.read().splitlines()
with open('stopwords.txt') as f:
stopwords = f.read().splitlines()
print "Eliminating stopwords from docs and titles"
for i in range(len(docs)):
docs[i] = docs[i].translate(replace_punctuation)
docs[i] = ' '.join([''.join([c for c in word if not c.isdigit()]) for word in docs[i].split()])
docs[i] = ' '.join([word.lower() for word in docs[i].split() if word.lower() not in stopwords])
for i in range(len(titles)):
titles[i] = titles[i].translate(replace_punctuation)
titles[i] = ' '.join([''.join([c for c in word if not c.isdigit()]) for word in titles[i].split()])
titles[i] = ' '.join([word.lower() for word in titles[i].split() if word.lower() not in stopwords])
total = docs + titles
print "Extracting features from the training dataset using a sparse vectorizer"
vectorizer = TfidfVectorizer(max_df=0.5, max_features=10000, min_df=2, stop_words='english', use_idf=True)
vectorizer.fit(titles)
X = vectorizer.transform(titles)
print "n_samples: %d, n_features: %d" % X.shape
print "Performing dimensionality reduction using LSA"
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
r1 = 1#randint(0,10000)
r2 = 1#randint(0,10000)
true_k = 53
svd = TruncatedSVD(n_components=20, random_state=r1)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
explained_variance = svd.explained_variance_ratio_.sum()
print "Explained variance of the SVD step: {}%".format(int(explained_variance * 100))
km = KMeans(n_clusters=true_k, init='k-means++', n_jobs=-1, max_iter=1000, n_init=100, verbose=False, random_state=r2)
print "Clustering sparse data with %s" % km
km.fit(X)
ids = range(len(titles))
clusters = km.labels_.tolist()
stack = { 'title': titles, 'indexes': ids, 'cluster': clusters }
frame = pd.DataFrame(stack, index = [clusters] , columns = ['title', 'indexes', 'cluster'])
#sort cluster centers by proximity to centroid
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print "Cluster %d words:" % i
for ind in order_centroids[i, :5]: #replace 6 with n words per cluster
print "\t\t%s" % terms[ind]
print "Cluster %d titles:" % i
for ind in range(5):
print "\t\t[ %d" % frame.ix[i]['indexes'].values.tolist()[ind], "] %s" % frame.ix[i]['title'].values.tolist()[ind]
# Check clusters' distribution
a = frame['cluster'].value_counts() #number of titles per cluster
print a
id_cluster = np.array(frame['cluster'])
dir_check = sys.argv[1] + 'check_index.csv'
with open(dir_check) as f:
check = f.read().splitlines()
check = check[1:]
output = np.zeros((len(check)))
for i in range(len(check)):
word = check[i].split(',')
id1 = int(word[1])
id2 = int(word[2])
output[i] = (id_cluster[id1] == id_cluster[id2])
f = open(sys.argv[2], 'w')
f.write("ID,Ans\n")
for i in range(len(check)):
f.write(str(i) + "," + str(int(output[i])) + "\n")
|
8,958 | 6b1970ee2b0d24504f4dea1f2ad22a165101bfbe | # -*- coding=utf-8 -*-
# ! /usr/bin/env python3
"""
抽奖活动-摇一摇活动
"""
import time
import allure
from libs.selenium_libs.common.base import Base
from libs.selenium_libs.page_object.page_activity import PageActivity
from libs.selenium_libs.page_object.page_personal_center import PagePersonalCenter
class LuckDrawActivity(Base):
@allure.step('参加抽奖活动')
def join_luck_draw_activity(self, driver,activity_name):
# 进入个人中心页面
self.go_user_center()
time.sleep(2)
# 获取当前积分
integral_start = PagePersonalCenter(driver).get_my_integral()
# 获取当前卡券数
coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()
# 进入活动页面,等待
self.go_activity()
time.sleep(2)
# 点击搜索框
PageActivity(driver).click_search()
# 在搜索框中输入活动名称
PageActivity(driver).input_activity_name(activity_name)
time.sleep(2)
# 点击活动搜索后的第一个活动
PageActivity(driver).click_activity()
time.sleep(2)
return integral_start, coupon_amount_start
# # 检查参与活动方式
# time.sleep(1)
# way = self.driver.find_elements_by_xpath(loc.Activity.loc_luck_draw_rule)[-1].text
# # 判断抽奖形式
# if '凯德星会员即可参与抽奖' in way:
# print('免费抽奖')
# self.click_ele(loc.Activity.loc_draw_immediately)
# # 判断奖励类型
# # if self.driver.find_element_by_xpath('//div[@class="result_txt"]').text =="恭喜您抽中奖品是积分,立即兑换你想要的礼品吧!":
# # 进入个人中心页面
# self.go_user_center()
# time.sleep(2)
# # 获取当前积分
# integral_end = PagePersonalCenter.get_my_integral()
# # 获取当前卡券数
# coupon_amount_end = PagePersonalCenter.get_my_coupon_amount()
# elif '每次抽奖消耗' in way:
# print('消耗积分抽奖')
# integral_end = 0
# coupon_amount_end = 0
# else:
# print('验证积分抽奖')
# integral_end = 0
# coupon_amount_end = 0
# return integral_start, coupon_amount_start, integral_end, coupon_amount_end
# integral = filter(way.isdigit, way)
|
8,959 | 3accf1c066547c4939c104c36247370b4a260635 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 10:18:35 2019
@author: zehra
"""
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.special import expit
X_train, y_train, X_val, y_val, X_test, y_test = pickle.load(open("data.pkl", "rb"))
id2word, word2id = pickle.load( open("dicts.pkl", "rb") )
y_train = np.float64(np.expand_dims( np.array(y_train), axis=1 ))
y_val = np.float64(np.expand_dims( np.array(y_val), axis=1 ))
y_test = np.float64(np.expand_dims( np.array(y_test), axis=1 ))
learning_rate = 0.1#0.1 #(Total loss = average of per sample loss in the batch)
#Learning rate decay: None (fixed learning rate)
batch_size = 20
#Regularization: None
num_epochs = 300
num_features = np.shape(X_train)[1]
num_samples_train = np.shape(X_train)[0]
#Helper functions:
def sigmoid(x):
return expit(x) # 1 / (1 + np.exp(-x)) #
def LRcost(t, pred):
pred[ pred== 0.0 ] = 10**-10 #Add epsilon to all zero values, to avoid numerical underflow
cost_per_sample = -t*np.log(pred) - (1-t)*np.log(1-pred)
avg_cost = np.mean(cost_per_sample)
return avg_cost
def LRgradient_batch(X, y, pred):
m = X.shape[0]
grad = np.dot( X.T, (pred-y) ) #X.T*(prediction - target) #Dimension: 2000x1
grad = (1/m) * grad #Divide by number of samples
return grad
#Initializations:
train_cost_history = np.zeros(num_epochs)
val_cost_history = np.zeros(num_epochs)
train_accuracy = np.zeros(num_epochs)
val_accuracy = np.zeros(num_epochs)
test_accuracy = np.zeros(1)
theta_history = np.zeros((num_epochs,num_features))
theta_0_history = np.zeros(num_epochs)
#Parameter initialization: Uniform[-0.5, 0.5]
theta = np.random.uniform(low=-0.5, high=0.5, size=(num_features,1)) # theta: 2000 x 1
theta_0 = np.random.uniform(low=-0.5, high=0.5) #theta_0: scalar
#Training Loop: For epochs = 1, .., 300:
for epoch in range(num_epochs):
J = 0.0 #Logistic Regression Cost J (scalar)
gradJ = np.zeros(theta.shape) #gradient of theta: 2000 x 1
gradJ_0 = 0.0 #gradient of theta_0: scalar
#Mini-batch gradient computation and theta update loop:
for i in range(0, num_samples_train, batch_size): #For batches of the training set:
X_i = X_train[i:i+batch_size] #X_i: 20x2000
y_i = y_train[i:i+batch_size] #y_i: 20x1
z_i = np.dot(X_i, theta) + theta_0 #z_i: 20x1
pred_i = sigmoid(z_i) #pred_i: 20x1
J += LRcost(y_i, pred_i) #Compute logistic regression cost for current batch
#Compute gradients:
gradJ = LRgradient_batch(X_i, y_i, pred_i)
gradJ_0 = np.sum(pred_i-y_i)
#Update the parameters:
theta = theta - learning_rate*gradJ #theta: 2000x1
theta_0 = theta_0 - learning_rate*gradJ_0 #theta_0: scalar
#End mini-batch gradient / theta update loop
#Predict on training set:
z_train = np.dot(X_train, theta) + theta_0 #z_train: 20,000 x 1
pred_train = sigmoid(z_train) #pred_train: 20,000 x 1
pred_train_class = np.zeros(pred_train.shape)
pred_train_class [ pred_train > 0.5 ] = 1.0 #pred_train_class: 20,000 x 1
train_cost_history[epoch] = LRcost(y_train, pred_train)
train_accuracy[epoch] = np.sum(y_train==pred_train_class)/len(y_train)
print("Epoch: "+str(epoch)+" | Accuracy on training set: "+str(train_accuracy[epoch]))
#Predict on validation set:
z_val = np.dot(X_val, theta) + theta_0 #z_val: 5,000 x 1
pred_val = sigmoid(z_val) #pred_val: 5,000 x 1
val_cost_history[epoch] = LRcost(y_val, pred_val)
theta_history[epoch,] = np.squeeze(theta)
theta_0_history[epoch] = theta_0
pred_val_class = np.zeros(pred_val.shape)
pred_val_class [ pred_val > 0.5 ] = 1.0 #pred_val_class: 5,000 x 1
val_accuracy[epoch] = np.sum(y_val==pred_val_class)/len(y_val)
print("Epoch: "+str(epoch)+" | Accuracy on validation set: "+str(val_accuracy[epoch]))
#End Training Loop
#Choose the best validation model:
best_val_accuracy = np.amax(val_accuracy)
best_epoch_id = np.argmax(val_accuracy)
best_theta = theta_history [best_epoch_id,]
best_theta = np.expand_dims(best_theta, axis=1)
best_theta_0 = theta_0_history [best_epoch_id,]
#Predict on the test set:
z_test = np.dot(X_test, best_theta) + best_theta_0 #z_test: 25,000 x 1
pred_test = sigmoid(z_test) #pred_test: 25,000 x 1
test_cost = LRcost(y_test, pred_test)
print("Logistic Regression cost on the Test set: "+str(test_cost))
pred_test_class = np.zeros(pred_test.shape)
pred_test_class [ pred_test > 0.5 ] = 1.0 #pred_test_class: 25,000 x 1
test_accuracy = np.sum(y_test==pred_test_class)/len(y_test)
print("LR test accuracy: "+str(test_accuracy))
#Plot Train/Val Accuracy:
plt.plot(train_accuracy)
plt.plot(val_accuracy)
plt.title('Model Accuracy (Learning Rate: '+str(learning_rate)+')')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='lower right')
#axes = plt.gca()
#axes.set_ylim([0.5,1.0])
plt.show()
#Plot Train/Val Cost Function:
plt.figure()
plt.plot(train_cost_history)
plt.plot(val_cost_history)
plt.title('Logistic Regression Cost Function (Learning Rate: '+str(learning_rate)+')')
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='lower right')
plt.show()
|
8,960 | 903d5913025d7d61ed50285785ec7f683047b49a | # -*- encoding: utf-8 -*-
#
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2011 Thomas "Courgette" LÉVEIL <courgette@bigbrotherbot.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from exceptions import ValueError, IOError, Exception, KeyError
import json
import re
import string
import sys
import urllib2
from distutils import version
from types import StringType
## url from where we can get the latest B3 version number
URL_B3_LATEST_VERSION = 'http://master.bigbrotherbot.net/version.json'
# supported update channels
UPDATE_CHANNEL_STABLE = 'stable'
UPDATE_CHANNEL_BETA = 'beta'
UPDATE_CHANNEL_DEV = 'dev'
class B3version(version.StrictVersion):
"""
Version numbering for BigBrotherBot.
Compared to version.StrictVersion this class allows version numbers such as :
1.0dev
1.0dev2
1.0a
1.0a
1.0a34
1.0b
1.0b1
1.0b3
And make sure that any 'dev' prerelease is inferior to any 'alpha' prerelease
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? (([ab]|dev)(\d+)?)?$',
re.VERBOSE)
prerelease_order = {'dev': 0, 'a': 1, 'b': 2}
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 6, 7)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease, string.atoi(prerelease_num if prerelease_num else '0'))
else:
self.prerelease = None
def __cmp__ (self, other):
if isinstance(other, StringType):
other = B3version(other)
compare = cmp(self.version, other.version)
if compare == 0: # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if not self.prerelease and not other.prerelease:
return 0
elif self.prerelease and not other.prerelease:
return -1
elif not self.prerelease and other.prerelease:
return 1
elif self.prerelease and other.prerelease:
return cmp((self.prerelease_order[self.prerelease[0]], self.prerelease[1]),
(self.prerelease_order[other.prerelease[0]], other.prerelease[1]))
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
def getDefaultChannel(currentVersion):
if currentVersion is None:
return UPDATE_CHANNEL_STABLE
m = re.match(r'^\d+\.\d+(\.\d+)?(?i)(?P<prerelease>[ab]|dev)\d*$', currentVersion)
if not m:
return UPDATE_CHANNEL_STABLE
elif m.group('prerelease').lower() in ('dev', 'a'):
return UPDATE_CHANNEL_DEV
elif m.group('prerelease').lower() == 'b':
return UPDATE_CHANNEL_BETA
def checkUpdate(currentVersion, channel=None, singleLine=True, showErrormsg=False, timeout=4):
"""
check if an update of B3 is available
"""
if channel is None:
channel = getDefaultChannel(currentVersion)
if not singleLine:
sys.stdout.write("checking for updates... \n")
message = None
errorMessage = None
version_info = None
try:
json_data = urllib2.urlopen(URL_B3_LATEST_VERSION, timeout=timeout).read()
version_info = json.loads(json_data)
except IOError, e:
if hasattr(e, 'reason'):
errorMessage = "%s" % e.reason
elif hasattr(e, 'code'):
errorMessage = "error code: %s" % e.code
else:
errorMessage = "%s" % e
except Exception, e:
errorMessage = repr(e)
else:
latestVersion = None
latestUrl = None
try:
channels = version_info['B3']['channels']
except KeyError, err:
errorMessage = repr(err) + ". %s" % version_info
else:
if channel not in channels:
errorMessage = "unknown channel '%s'. Expecting one of '%s'" % (channel, ", '".join(channels.keys()))
else:
try:
latestVersion = channels[channel]['latest-version']
except KeyError:
errorMessage = repr(err) + ". %s" % version_info
if not errorMessage:
try:
latestUrl = version_info['B3']['channels'][channel]['url']
except KeyError:
latestUrl = "www.bigbrotherbot.net"
not singleLine and sys.stdout.write("latest B3 %s version is %s\n" % (channel, latestVersion))
_lver = B3version(latestVersion)
_cver = B3version(currentVersion)
if _cver < _lver:
if singleLine:
message = "*** NOTICE: B3 %s is available. See %s ! ***" % (latestVersion, latestUrl)
else:
message = """
_\|/_
(o o) {version:^21}
+----oOO---OOo-----------------------+
| |
| |
| A newer version of B3 is available |
| |
| {url:^34} |
| |
+------------------------------------+
""".format(version=latestVersion, url=latestUrl)
if errorMessage and showErrormsg:
return "Could not check updates. %s" % errorMessage
elif message:
return message
else:
return None
|
8,961 | 292cfecb701ecc179381d4453063aff532a0e877 | import cv2
img = cv2.imread('Chapter1/resources/jacuzi.jpg')
imgGrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgCanny = cv2.Canny(img,240,250)
cv2.imshow("output",imgCanny)
cv2.waitKey(0) |
8,962 | 86ea1c46383b5a8790eb187163107f4100395ef3 | from typing import Set, Dict, Tuple
from flask import Flask, render_template, request
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top_secret'
# Определение константных величин
RULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S',
('H', 'b'): 'SE',
('S', 'b'): 'SE',
('SE', 'a'): 'SE',
('SE', 'b'): 'SE'}
INITIAL_STATE: str = 'H'
FINAL_STATE: Set[str] = {'S', 'SE'}
def finite_automate(word: str) -> str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
yield f'{word[ind:]} --> {state}'
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
yield 'Цепочка принадлежит языку'
else:
yield 'Цепочка не принадлежит языку'
@app.route('/', methods=['GET', 'POST'])
def index():
res = None
if request.method == 'POST':
res = finite_automate(request.form['word'])
return render_template('index.html', res=res)
|
8,963 | 14826b5b121ba2939519492c1e1d8700c32396d2 | from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI
from datetime import datetime
from re import findall
# current_time = datetime.now().strftime("%H:%M:%S %d.%m.%Y")
class ZabbixItem():
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f"http://{zabbix_host}"
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
# print(self.get_group(EXT_GROUP))
def connection_init(self):
'''
Zabbix connection init
:return: connection
'''
return ZabbixAPI(f"http://{self.zabbix_host}", user=self.user, password=self.password)
def get_template(self, template_name):
'''
Get template id by template name
:param template_name:
:return: template id as string
'''
ext_template = self.connection.do_request("template.get", {
"filter": {"host": [template_name]},
"output": "template_id"
}).get("result")
if ext_template:
result = ext_template[0].get("templateid")
else:
result = False
return result
def get_group(self, group_name):
"""
Get group Id
:param group_name:
:return: group ID
"""
group = self.connection.do_request("hostgroup.get", {
"filter": {"name": [group_name]},
"output": "extend"
}).get("result")
if group:
result = group[0].get("groupid")
else:
# print("create Group")
result = False
return result
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(" ")])
except IndexError:
result = False
except ValueError:
# print(value)
result = False
return result
def host_create(self, data):
'''
Create host item
:param host_params:
:return: host id
'''
return self.connection.do_request('host.create', data)[0].get("result")
def assign_template_to_host(self, host_id):
"""
Assign template to host
:param host_id: host id
:return:
"""
return self.connection.do_request("template.update", teamplateid=self.template_id, hosts=[host_id])
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
# test_dict = {'ext': '1105', 'ip_address': '192.168.10.55', 'status': 'OK', 'ping': '5 ms', 'user': 'Secretary',
# 'user_agent': 'Cisco/SPA508G-7.4.9a'}
sender_data = []
host_id = data.get("ext")
# print(ZABBIX_HOST)
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get("ip_address"))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, "extPhonePing", self.clear_ping(data.get("ping", 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, "extStatus", data.get("status", ""))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, "extUser", data.get("user", ""))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, "extUserAgent", data.get("user_agent", ""))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {
'filter': {'host': data["ext"]},
'output': ['hostid']
}).get("result")
# print("host_raw", host_raw)
if host_raw:
host_id = host_raw[0].get("hostid")
else:
host_new = self.connection.do_request('host.create', {"host" : f"{data.get('ext')}",
"templates": [
{"templateid" : self.template_id}
],
"groups": [
{"groupid": self.group_id}
]
})
host_id = host_new.get("result").get("hostids")[0]
self.send_data(data)
|
8,964 | 237277e132c8223c6048be9b754516635ab720e2 | # -*- coding: utf-8 -*-
import requests
import json
import boto3
from lxml.html import parse
CardTitlePrefix = "Greeting"
def build_speechlet_response(title, output, reprompt_text, should_end_session):
"""
Build a speechlet JSON representation of the title, output text,
reprompt text & end of session
"""
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': CardTitlePrefix + " - " + title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
"""
Build the full response JSON from the speechlet response
"""
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def get_welcome_response():
welcome_response= "Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda."
print(welcome_response);
session_attributes = {}
card_title = "Hello"
speech_output = welcome_response;
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "I'm sorry - I didn't understand. You should say give me latest motions."
should_end_session = True
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def replace_with_longform_name(name):
if name == "LASD":
longformName = "Los Angeles County Sheriff's Department"
elif name == "DMH":
longformName = "Department of Mental Health"
else:
longformName = name;
return longformName;
def get_next_motions_response(session):
print("Initial session attributes are "+str(session['attributes']));
if "result_number" not in session['attributes']:
print("Second session attributes are "+str(session['attributes']));
session['attributes']['result_number'] = 1;
print("Value is "+str(session['attributes']['result_number']));
print("Final session attributes are "+str(session['attributes']))
result_number = session['attributes']['result_number'];
host = "http://api.lacounty.gov";
url = host + "/searchAPIWeb/searchapi?type=bcsearch&database=OMD&" \
"SearchTerm=1&title=1&content=1&PStart=" + str(result_number) +"&PEnd=" + str(result_number) +"&_=1509121047612"
response = requests.get(url);
#print(response.text);
data = json.loads(response.text)
alexaResponse = "";
if(result_number == 1):
alexaResponse = "Here is the latest correspondence before the L.A. board (both upcoming and past): "
alexaResponse += str(result_number)+": From the "+replace_with_longform_name(data["results"][0]["department"])+ ", "
alexaResponse += "on "+data["results"][0]["date"]+", "
alexaResponse += data["results"][0]["title"]+"... "
alexaResponse += "You can say text me link or next item"
session['attributes']['result_number'] = result_number + 1;
session['attributes']['result_url'] = data["results"][0]["url"];
#text_url_to_number(session);
reprompt_text = "I'm sorry - I didn't understand. You should say text me link or next item"
card_title = "LA Board Latest Motions Message";
greeting_string = alexaResponse;
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, False))
def get_next_agenda_response(session):
print("Initial session attributes are "+str(session['attributes']));
host = "http://bos.lacounty.gov/Board-Meeting/Board-Agendas";
url = host;
page = parse(url)
nodes = page.xpath("//div[a[text()='View Agenda']]");
latest_agenda_node = nodes[0];
headline = latest_agenda_node.find("ul").xpath("string()").strip();
print(headline);
agenda_url = latest_agenda_node.find("a[@href]").attrib['href'];
print("http://bos.lacounty.gov"+agenda_url)
agenda_heading = headline;
#session['attributes']['result_url']
session['attributes']['result_url'] = "http://bos.lacounty.gov"+agenda_url;
card_title = "Agenda";
greeting_string = "I have a link for the "+agenda_heading+". Say text me and I'll send it to you.";
reprompt = "Say text me to receive a link to the agenda."
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt, False))
def text_url_to_number(session, intent):
if "phone_number" not in session['attributes'] and "value" not in intent['slots']['phoneNumber']:
greeting_string = "Say your nine digit phone number, including the area code";
card_title = "What's your phone number?";
reprompt_text = "I didn't understand. Please say your nine digit mobile phone number."
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, False))
else:
number = intent['slots']['phoneNumber']['value'];
if "result_url" not in session['attributes']:
session['attributes']['result_url'] = 'http://portal.lacounty.gov/wps/portal/omd';
url = session['attributes']['result_url'];
session['attributes']['phone_number'] = number;
sns_client = boto3.client('sns')
response = sns_client.publish(
PhoneNumber='1'+str(number),
Message="Thank you for using the LA Board of Supervisors Skill. Here's your URL: "+url
)
greeting_string = "Sent text message to "+ " ".join(number);
card_title = "Sent motion URL via text message";
reprompt_text = "I didn't understand. Please say your nine digit mobile phone number."
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, True))
def on_session_started(session_started_request, session):
""" Called when the session starts """
#session.attributes['result_number'] = 1
session['attributes'] = {}
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def handle_session_end_request():
card_title = "County of LA Board of Supervisors Skill- Thanks"
speech_output = "Thank you for using the County of LA Board of Supervisors Skill. See you next time!"
should_end_session = True
return build_response({}, build_speechlet_response(card_title, speech_output, None, should_end_session));
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they want """
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "GetLatestAgendaIntent":
return get_next_agenda_response(session)
elif intent_name == "GetLatestMotionsIntent":
return get_next_motions_response(session)
elif intent_name == "GetNextMotionIntent":
return get_next_motions_response(session)
elif intent_name == "SetPhoneNumberIntent":
return text_url_to_number(session, intent);
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def lambda_handler(event, context):
print("Test!")
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return handle_session_end_request()
|
8,965 | 035043460805b7fe92e078e05708d368130e3527 | # ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test create publication target github"""
from os.path import join as opj
# this must with with and without pygithub
from datalad.api import create_sibling_github
from datalad.api import Dataset
from datalad.support.exceptions import MissingExternalDependency
from datalad.tests.utils import with_tempfile
from nose.tools import assert_raises, assert_in, assert_true, assert_false, \
assert_not_in, assert_equal
from nose import SkipTest
try:
import github as gh
except ImportError:
# make sure that the command complains too
assert_raises(MissingExternalDependency, create_sibling_github, 'some')
raise SkipTest
@with_tempfile
def test_invalid_call(path):
# no dataset
assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)
ds = Dataset(path).create()
# no user
assert_raises(gh.BadCredentialsException, ds.create_sibling_github, 'bogus', github_user='')
@with_tempfile
def test_dont_trip_over_missing_subds(path):
ds1 = Dataset(opj(path, 'ds1')).create()
ds2 = Dataset(opj(path, 'ds2')).create()
subds2 = ds1.install(source=ds2.path, path='subds2')
assert_true(subds2.is_installed())
assert_in('subds2', ds1.get_subdatasets())
subds2.uninstall(remove_handles=True, remove_history=True)
assert_in('subds2', ds1.get_subdatasets())
assert_false(subds2.is_installed())
# this will deinit the submodule
ds1.save(files=['subds2'])
# see if it wants to talk to github (and fail), or if it trips over something
# before
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github, 'bogus', recursive=True, github_user='')
# inject remote config prior run
assert_not_in('github', ds1.repo.get_remotes())
# fail on existing
ds1.repo.add_remote('github', 'http://nothere')
assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive=True, github_user='')
# talk to github when existing is OK
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github, 'bogus', recursive=True, github_user='', existing='reconfigure')
# return happy emptiness when all is skipped
assert_equal(ds1.create_sibling_github('bogus', recursive=True, github_user='', existing='skip'), [])
|
8,966 | 97a059d6d34b924a0512ebe6ff5ab1d5ccc072d5 | # Author: Loren Matilsky
# Date created: 03/02/2019
import matplotlib.pyplot as plt
import numpy as np
import sys, os
sys.path.append(os.environ['raco'])
sys.path.append(os.environ['rapl'])
sys.path.append(os.environ['rapl'] + '/timetrace')
from common import *
from cla_util import *
from plotcommon import *
from timey_util import *
# Set fontsize
fontsize = default_titlesize
# Read command-line arguments (CLAs)
args = sys.argv
clas0, clas = read_clas(args)
dirname = clas0['dirname']
dirname_stripped = strip_dirname(dirname)
# See if magnetism is "on"
magnetism = clas0['magnetism']
# defaults
kwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False, 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None, 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})
kwargs_default.update(plot_timey_kwargs_default)
# check for bad keys
find_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)
# overwrite defaults
kw = update_dict(kwargs_default, clas)
# add in groupname keys
kw.update(get_quantity_group(kw.groupname, magnetism))
# user may have wanted to change some groupname keys
kw = update_dict(kw, clas)
kw_plot_timey = update_dict(plot_timey_kwargs_default, clas)
# check if we want the real or imaginary vals
if kw.imag:
take_real = False
else:
take_real = True
# baseline time unit
time_unit, time_label, rotation, simple_label = get_time_unit(dirname)
# get grid info
di_grid = get_grid_info(dirname)
datatype = 'mertimelat'
dataname = 'mertimelat'
sampleaxis = di_grid['tt_lat']
if kw.rad:
datatype = 'mertimerad'
dataname = 'mertimerad'
sampleaxis = di_grid['rr']/rsun
if kw.mtimerad:
kw.rad = True
radlevs = get_slice_levels(dirname)
datatype = 'mtimerad'
dataname = 'mtimerad'
radlevs = get_slice_levels(dirname)
sampleaxis = radlevs.radius/rsun
datatype += '_mval%03i' %kw.mval
if 'groupname' in kw:
dataname += '_' + kw.groupname
if not kw.rcut is None:
dataname += '_rcut%0.3f' %kw.rcut
#dataname += clas0['tag']
# get data
if kw.the_file is None:
kw.the_file = get_widest_range_file(clas0['datadir'] +\
datatype + '/', dataname)
# Read in the data
print ('reading ' + kw.the_file)
di = get_dict(kw.the_file)
vals = di['vals']
times = di['times']
iters = di['iters']
qvals_avail = np.array(di['qvals'])
if kw.mtimerad:
samplevals_avail = di['latvals']
else:
samplevals_avail = di['samplevals']
iter1, iter2 = get_iters_from_file(kw.the_file)
times /= time_unit
# maybe thin data
if not kw.ntot == 'full':
print ("ntot = %i" %kw.ntot)
print ("before thin_data: len(times) = %i" %len(times))
times = thin_data(times, kw.ntot)
iters = thin_data(iters, kw.ntot)
vals = thin_data(vals, kw.ntot)
print ("after thin_data: len(times) = %i" %len(times))
# these all need to be arrays
kw.qvals = make_array(kw.qvals)
kw.isamplevals = make_array(kw.isamplevals)
if not isall(kw.samplevals):
kw.samplevals = make_array(kw.samplevals)
# get raw traces of desired variables
terms = []
for qval in kw.qvals:
qind = np.argmin(np.abs(qvals_avail - qval))
if take_real:
the_term = np.real(vals[:, :, :, qind])
else:
the_term = np.imag(vals[:, :, :, qind])
terms.append(the_term)
# set figure dimensions
sub_width_inches = 7.5
sub_height_inches = 2.0
margin_bottom_inches = 3/8 # space for x-axis label
margin_top_inches = 1
margin_left_inches = 5/8 # space for latitude label
margin_right_inches = 7/8 # space for colorbar
if 'ycut' in clas:
margin_right_inches *= 2
nplots = len(terms)
# determine desired levels to plot
if not kw.samplevals is None: # isamplevals being set indirectly
# check for special 'all' option
if isall(kw.samplevals):
kw.isamplevals = np.arange(len(samplevals_avail))
else:
kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')
for i in range(len(kw.samplevals)):
kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.samplevals[i]))
# Loop over the desired levels and save plots
for isampleval in kw.isamplevals:
if not kw.shav:
sampleval = samplevals_avail[isampleval]
# set some labels
axislabel = 'latitude (deg)'
samplelabel = r'$r/R_\odot$' + ' = %.3f' %sampleval
position_tag = '_rval%.3f' %sampleval
if kw.rad:
axislabel = r'$r/R_\odot$'
samplelabel = 'lat = ' + lat_format(sampleval)
position_tag = '_lat' + lat_format(sampleval)
# Put some useful information on the title
maintitle = dirname_stripped
maintitle += '\n' + samplelabel
maintitle += '\nmval = %03i' %kw.mval
if kw.navg is None:
maintitle += '\nt_avg = none'
else:
averaging_time = (times[-1] - times[0])/len(times)*kw.navg
maintitle += '\n' + ('t_avg = %.1f Prot' %averaging_time)
print('plotting sampleval = %0.3f (i = %02i)' %(sampleval, isampleval))
# make plot
fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=sub_width_inches, sub_height_inches=sub_height_inches, margin_left_inches=margin_left_inches, margin_right_inches=margin_right_inches, margin_top_inches=margin_top_inches, margin_bottom_inches=margin_bottom_inches)
for iplot in range(nplots):
ax = axs[iplot, 0]
if kw.rad:
field = terms[iplot][:, isampleval, :]
else:
field = terms[iplot][:, :, isampleval]
plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)
# title the plot
ax.set_title(kw.titles[iplot], fontsize=fontsize)
# Turn the x tick labels off for the top strips
#if iplot < nplots - 1:
# ax.set_xticklabels([])
# Put time label on bottom strip
if iplot == nplots - 1:
ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)
# Put ylabel on middle strip
if iplot == nplots//2:
ax.set_ylabel(axislabel, fontsize=fontsize)
fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle, fontsize=fontsize, ha='left', va='bottom')
# Save the plot
if clas0['saveplot']:
# Make appropriate file name to save
# save the figure
basename = dataname + '_%08i_%08i' %(iter1, iter2)
plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])
if take_real:
realtag = '_real'
else:
realtag = '_imag'
savename = basename + position_tag + realtag + '.png'
print ("saving", plotdir + '/' + savename)
plt.savefig(plotdir + '/' + savename, dpi=200)
# Show the plot if only plotting at one latitude
if clas0['showplot'] and len(kw.isamplevals) == 1:
plt.show()
else:
plt.close()
print ("=======================================")
|
8,967 | dfaea1687238d3d09fee072689cfdea392bc78f9 | #-*- coding: utf-8 -*-
import argparse
import pickle
def str2bool(v):
return v.lower() in ('true', '1')
arg_lists = []
parser = argparse.ArgumentParser()
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--num_steps', type=int, default=150, help='')
net_arg.add_argument('--cell_size', type=int, default=700, help='')
net_arg.add_argument('--hyper_size', type=int, default=400, help='')
net_arg.add_argument('--embed_size', type=int, default=128, help='')
net_arg.add_argument('--hidden_size', type=int, default=256, help='')
net_arg.add_argument('--num_layers', type=int, default=2, help='')
net_arg.add_argument('--fast_layers', type=int, default=2, help='')
net_arg.add_argument('--zoneout_c', type=float, default=0.5, help='')
net_arg.add_argument('--zoneout_h', type=float, default=0.9, help='')
net_arg.add_argument('--keep_prob', type=float, default=0.65, help='')
net_arg.add_argument('--input_dim', type=int, default=300, help='')
net_arg.add_argument('--num_glimpse', type=int, default=1, help='')
net_arg.add_argument('--use_terminal_symbol', type=str2bool, default=True, help='Not implemented yet')
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--task', type=str, default='ptb')
data_arg.add_argument('--batch_size', type=int, default=128)
data_arg.add_argument('--vocab_size', type=int, default=50)
data_arg.add_argument('--input_size', type=int, default=300)
data_arg.add_argument('--min_data_length', type=int, default=5)
data_arg.add_argument('--max_data_length', type=int, default=80)
data_arg.add_argument('--train_num', type=int, default=1000000)
data_arg.add_argument('--valid_num', type=int, default=1000)
data_arg.add_argument('--test_num', type=int, default=1000)
# Training / test parameters
train_arg = add_argument_group('Training')
train_arg.add_argument('--is_train', type=str2bool, default=True, help='')
train_arg.add_argument('--optimizer', type=str, default='rmsprop', help='')
train_arg.add_argument('--max_epoch', type=int, default=200, help='')
train_arg.add_argument('--max_max_epoch', type=int, default=200, help='')
train_arg.add_argument('--max_step', type=int, default=1000000, help='')
train_arg.add_argument('--init_scale', type=float, default=0.002, help='')
train_arg.add_argument('--lr_start', type=float, default=0.01, help='')
train_arg.add_argument('--lr_decay_step', type=int, default=5000, help='')
train_arg.add_argument('--lr_decay_rate', type=float, default= 0.1, help='')
train_arg.add_argument('--max_grad_norm', type=float, default=1.0, help='')
train_arg.add_argument('--checkpoint_secs', type=int, default=300, help='')
# Misc
misc_arg = add_argument_group('Misc')
misc_arg.add_argument('--log_step', type=int, default=2, help='')
misc_arg.add_argument('--num_log_samples', type=int, default=3, help='')
misc_arg.add_argument('--log_level', type=str, default='INFO', choices=['INFO', 'DEBUG', 'WARN'], help='')
misc_arg.add_argument('--log_dir', type=str, default='logs')
misc_arg.add_argument('--data_dir', type=str, default='data')
misc_arg.add_argument('--output_dir', type=str, default='outputs')
misc_arg.add_argument('--data_path', type=str, default='/Ujjawal/fast-slow-lstm/data' )
misc_arg.add_argument('--debug', type=str2bool, default=False)
misc_arg.add_argument('--gpu_memory_fraction', type=float, default=1.0)
misc_arg.add_argument('--random_seed', type=int, default=123, help='')
def get_config():
config, unparsed = parser.parse_known_args()
return config
|
8,968 | 3b77f7ea5137174e6723368502659390ea064c5a | import csv
with open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:
reader = csv.reader(users_csv)
d = {}
for row in reader:
userId, profileName = row
if profileName == 'A Customer':
continue
value = d.get(profileName)
if not value:
d.setdefault(profileName, userId)
else:
if value != userId:
print(f'{userId}, {value}, {profileName}') |
8,969 | 34c7e6b6bc687bc641b7e3b9c70fd0844af8e340 | """
CONVERT HOURS INTO SECONDS
Write a function that converts hours into seconds.
Examples:
- how_many_seconds(2) -> 7200
- how_many_seconds(10) -> 36000
- how_many_seconds(24) -> 86400
Notes:
- 60 seconds in a minute; 60 minutes in a hour.
- Don't forget to return your answer.
"""
"""
U.P.E.R.
(A) UNDERSTAND:
- Objective:
- Write an algorithm that takes in a single input integer (representing a
given number of hours) and returns a single output (representing the
equivalent number of seconds).
- Expected Inputs:
- Number: 1
- Data Type: integer
- Variable Name: 'hrs_int'
- Expected Outputs:
- Number: 1
- Data Type: integer
- Variable Name: 'secs_int'
- My Examples:
- how_many_seconds(1) -> 3600
- 1 hr * (60 min/1 hr) * (60 sec/1 min) = 3600 secs
- how_many_seconds(5) -> 18000
- 5 hr * (60 min/1 hr) * (60 sec/1 min) = 18000 secs
- how_many_seconds(12) -> 43200
- 12 hr * (60 min/1 hr) * (60 sec/1 min) = 43200 secs
- Edge Cases & Constraints to Consider:
- Can the input be negative?
- No, because time is measured in positive units. The input must be greater than 0.
- Can the input be a floating point number?
- Yes, because the number of hours doesn't need to be whole in order
to find an equivalent number of seconds.
- Can the input be None?
- No, because you cannot convert 'None' number of hours.
(B) PLAN:
(1) Create a function that takes in a single given input, 'hrs_int', and returns a single output, 'secs_int'.
(2) Assign the value of 'None' to two new variables, 'mins_int' and 'secs_int'.
(3) Make sure that a conversion of hours to seconds will NOT occur unless the given input, 'hrs_int', is in fact of either "integer" or "float" data type.
(a) If the given input, 'hrs_int', is a valid argument, proceed with converting the given number of hours into an equivalent number of seconds.
i. Convert the number of hours in 'hrs_int' into an equivalent number of minutes and store that value in the previously declared 'mins_int' variable.
ii. Convert the number of minutes in 'mins_int' into an equivalent number of seconds and store that value in the previously declared 'secs_int' variable.
(b) If the given input, 'hrs_int', is an INVALID argument (i.e. - negative value, not of 'integer' or 'float' data types, null), handle the error with a 'TypeError' exception.
(4) Return the value of 'secs_int'.
"""
# (C) EXECUTE:
# def how_many_seconds(hrs_int):
# mins_int = None
# secs_int = None
# if hrs_int > 0 and hrs_int is not None:
# mins_int = hrs_int * 60 # converts given hours into minutes
# secs_int = mins_int * 60 # converts given minutes into seconds
# else:
# raise TypeError("Invalid input type")
# return secs_int
# (D) REFLECT/REFACTOR:
# Asymptotic Analysis:
# - Time Complexity = O(1)
# - Space Complexity = O(1)
# Can the brute force solution be optimized further?
# - Yes, but only by reducing the total number of lines of code and NOT by
# improving time/space complexity of the solution.
def how_many_seconds(hrs_int):
secs_int = None
if hrs_int > 0 and hrs_int is not None:
secs_int = hrs_int * 60 * 60 # converts given hours into seconds
return secs_int
else:
raise TypeError("Invalid input type") |
8,970 | 68493acce71060799da8c6cb03f2ddffce64aa92 | import requests, vars
def Cardid(name):
query = {"key":vars.Key, "token":vars.Token, "cards":"visible"}
execute = requests.request("GET", vars.BoardGetUrl, params=query).json()
for row in execute['cards']:
if row['name'] == name:
cardID = 1
break
else:
cardID = 0
return cardID
|
8,971 | 6f5eda426daf5db84dc205f36ec31e9076acb8ee | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 13:04:32 2018
@author: andrew
"""
import os
import glob
import initialize
import psf
from astropy.io import fits
import filters
import numpy as np
import sys
import MR
from tqdm import tqdm
def sextractor_MR(location, MR_method='swarp', use_config_file=True):
'''
runs SExtractor on master residual
'''
check_MR = glob.glob("%s/residuals/MR.fits" % (location))
if check_MR == []:
print("-> Master residual does not exist, creating it first...")
if use_config_file == True:
MR_method = initialize.get_config_value('MR_method')
MR.MR(location, MR_method)
master_res = glob.glob("%s/residuals/MR.fits" % (location))
temp = glob.glob("%s/templates/*.fits" % (location))
if len(master_res) == 1:
if len(temp) == 1:
MR = master_res[0]
template = temp[0]
temp_name = template.split('/')[-1]
temp_name = temp_name[:-5]
MR_hdu = fits.open(MR)
MR_header = MR_hdu[0].header
saturate = MR_header['SATURATE']
temp_hdr = fits.getheader(template)
pixscale = temp_hdr['PIXSCALE']
MR_hdu.close()
FWHM = psf.fwhm_template(template)
config_loc = location + '/configs/default.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.param" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> SExtracting master residual...")
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[51] = "SATUR_LEVEL" + " " + str(saturate) + "\n"
data[62] = "SEEING_FWHM" + " " + str(FWHM) + "\n"
data[106] = "PSF_NAME" + " " + location + "/psf/" + temp_name + ".psf" + "\n"
data[58] = "PIXEL_SCALE" + " " + str(pixscale) + "\n"
data[32] = "WEIGHT_IMAGE" + " " + "%s[1]" % (MR) + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0]> %s/sources/MR_sources.txt -c %s" % (MR, location, config_loc))
temp_hdu_data = fits.PrimaryHDU((fits.getdata(MR))*-1, header=fits.getheader(MR))
temp_hdu_mask = fits.ImageHDU(fits.getdata(MR, 1))
temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])
temp_hdu_list.writeto("%s/residuals/MR_neg.fits" % (location))
os.system("sextractor %s/residuals/MR_neg.fits[0]> %s/sources/MR_sources_2.txt -c %s" % (location, location, config_loc))
append_negative_sources(MR, MR=True)
MR_filter_sources(location)
else:
print("-> Error: Problem with number of template images\n-> Could not finish SExtracting master residual")
else:
print("-> Error: Problem with number of master residuals\n-> Could not finish SExtracting master residual")
def sextractor(location):
'''
runs SExtractor on all residual images
'''
x = 0
sources = location + "/sources"
residuals = location + "/residuals"
check = os.path.exists(sources)
check_temp = os.path.exists(sources + '/temp')
length = len(residuals) + 1
if check == False:
os.system("mkdir %s" % (sources))
os.system("mkdir %s/temp" % (sources))
else:
if check_temp == False:
os.system("mkdir %s/temp" % (sources))
images = glob.glob(residuals + "/*_residual_.fits")
initialize.create_configs(location)
config_loc = location + '/configs/default.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.param" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("-> Converting all residual masks into weight maps...\n")
for r in tqdm(images):
weight = weight_map(r)
hdu = fits.open(r, mode='update')
data = hdu[0].data
hdr = hdu[0].header
try:
if hdr['WEIGHT'] == 'N':
hdr.set('WEIGHT','Y')
hduData = fits.PrimaryHDU(data, header=hdr)
hduWeight = fits.ImageHDU(weight)
hduList = fits.HDUList([hduData, hduWeight])
hduList.writeto(r, overwrite=True)
except KeyError:
hdr.set('WEIGHT','Y')
hduData = fits.PrimaryHDU(data, header=hdr)
hduWeight = fits.ImageHDU(weight)
hduList = fits.HDUList([hduData, hduWeight])
hduList.writeto(r, overwrite=True)
hdu.close()
try:
if fits.getval(r, 'NORM') == 'N':
fits.setval(r, 'NORM', value='Y')
MR.normalize(r)
except KeyError:
fits.setval(r, 'NORM', value='Y')
MR.normalize(r)
print("\n-> SExtracting residual images...")
for i in images:
name = i[length:-5]
data_name = location + '/data/' + name.replace('residual_','') + '.fits'
FWHM = psf.fwhm(data_name)
im_hdu = fits.open(data_name)
im_header = im_hdu[0].header
saturate = im_header['SATURATE']
pixscale = im_header['PIXSCALE']
im_hdu.close()
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[51] = "SATUR_LEVEL" + " " + str(saturate) + "\n"
data[62] = "SEEING_FWHM" + " " + str(FWHM) + "\n"
data[106] = "PSF_NAME" + " " + location + "/psf/" + name[:-9] + ".psf" + "\n"
data[58] = "PIXEL_SCALE" + " " + str(pixscale) + "\n"
data[32] = "WEIGHT_IMAGE" + " " + "%s[1]" % (i) + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0]> %s/temp/%s.txt -c %s" % (i, sources, name, config_loc))
temp_hdu_data = fits.PrimaryHDU((fits.getdata(i))*-1, header=fits.getheader(i))
temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))
temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])
temp_hdu_list.writeto("%s/residuals/temp.fits" % (location))
os.system("sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s" % (location, sources, name, config_loc))
append_negative_sources(i)
os.remove("%s/residuals/temp.fits" % (location))
x += 1
per = float(x)/float(len(images)) * 100
print("\t %.1f%% sextracted..." % (per))
print("-> SExtracted %d images, catalogues placed in 'sources' directory\n" % (len(images)))
print("-> Filtering source catalogs...\n")
src_join(location)
filter_sources(location)
def sextractor_sim(image):
location = image.split('/')[:-2]
location = '/'.join(location)
sources = location + "/sources"
check = os.path.exists(sources)
check_temp = os.path.exists(sources + '/temp')
if check == False:
os.system("mkdir %s" % (sources))
os.system("mkdir %s/temp" % (sources))
else:
if check_temp == False:
os.system("mkdir %s/temp" % (sources))
initialize.create_configs(location)
config_loc = location + '/configs/default.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.param" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> SExtracting fake image...")
name = image.split('/')[-1]
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[106] = "PSF_NAME" + " " + location + "/psf/" + name[:-5] + ".psf" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0]> %s/temp/%s.txt -c %s" % (image, sources, name, config_loc))
temp_hdu_data = fits.PrimaryHDU((fits.getdata(image))*-1, header=fits.getheader(image))
temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))
temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])
temp_hdu_list.writeto("%s/residuals/temp.fits")
os.system("sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s" % (location, sources, name, config_loc))
os.remove("%s/residuals/temp.fits" % (location))
src_join(location)
filter_sources(location)
def sextractor_psf(location):
x = 0
psf_loc = location + "/psf"
data = location + "/data"
templates = location + "/templates"
check = os.path.exists(psf_loc)
if check == False:
os.system("mkdir %s" % (psf_loc))
temps = glob.glob(templates + "/*.fits")
images = glob.glob(data + "/*_A_.fits")
for t in temps:
images.append(t)
cats = glob.glob(location + '/psf/*.cat')
images_names = [(i.split('/')[-1])[:-5] for i in images]
cats_names = [(c.split('/')[-1])[:-4] for c in cats]
imageCats = [im for im in images_names if im not in cats_names]
images = []
if temps == []:
temps.append('')
for imcats in imageCats:
if imcats == (temps[0].split('/')[-1])[:-5]:
images.append(temps[0])
else:
images.append(location+'/data/'+imcats+'.fits')
initialize.create_configs(location)
config_loc = location + '/configs/psf.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.psfex" + "\n"
data[19] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> Creating PSF catalogs...")
if len(temps) == 1:
for i in images:
name = i.split('/')[-1][:-5]
hdu = fits.open(i)
hdr = hdu[0].header
pixscale = hdr['PIXSCALE']
hdu.close()
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[6] = "CATALOG_NAME" + " " + psf_loc + "/" + name + ".cat" + "\n"
data[44] = "PIXEL_SCALE" + " " + str(pixscale) + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0] -c %s" % (i, config_loc))
x += 1
per = float(x)/float(len(images)) * 100
print("\t %.1f%% sextracted..." % (per))
print("-> SExtracted %d images, catalogues placed in 'psf' directory\n" % (len(images)))
else:
print("\n-> Error: Problem with number of template images\n")
sys.exit()
return images
def sextractor_psf_sim(location, image):
psf_loc = location + "/psf"
data = location + "/data"
check = os.path.exists(psf_loc)
length = len(data) + 1
if check == False:
os.system("mkdir %s" % (psf_loc))
initialize.create_configs(location)
config_loc = location + '/configs/psf.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.psfex" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> Creating PSF catalog of fake image...")
name = image[length:-5]
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[6] = "CATALOG_NAME" + " " + psf_loc + "/" + name + ".cat" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0] -c %s" % (image, config_loc))
def weight_map(image):
hdu = fits.open(image)
hduMask = hdu[1].data
zeroMask = np.zeros(hduMask.shape)
weightMap = (np.logical_not(np.logical_or(hduMask,zeroMask))).astype(float)
hdu.close()
return weightMap
def src_join(location):
source_loc = location + '/sources'
temp_source_loc = source_loc + '/temp'
temp_source_files = glob.glob(temp_source_loc + '/*.txt')
image_names = filters.get_image_names(location)
for file in temp_source_files:
with open(file, 'r') as fl:
data = fl.readlines()
data = [str(file.replace('txt','fits')[len(source_loc)+6:]) + '\n'] + data
data.append("\n\n\n")
with open(source_loc + '/sources.txt', 'a+') as s:
if data[0] not in image_names:
s.writelines(data)
os.remove(file)
try:
os.rmdir(temp_source_loc)
except:
print("-> Error: Problem removing temp directory in '/sources'")
def filter_sources(location, mask_sources=False):
print("\n-> Filtering out non PSF-like sources...")
filters.spread_model_filter(location)
print("-> Filtering out diveted detections...")
images = glob.glob(location + '/data/*_A_.fits')
for i in images:
indices = filters.divot(i)
filters.update_filtered_sources(location, indices)
residuals = glob.glob("%s/residuals/*_residual_.fits" % (location))
if mask_sources == True:
for r in residuals:
filters.mask_sources_image(r)
def MR_filter_sources(location):
with open("%s/sources/MR_sources.txt" % (location), 'r') as MR_src:
MR_lines = MR_src.readlines()
MR_lines.insert(0, "MR.fits\n")
with open("%s/sources/MR_sources.txt" % (location), 'w+') as MR_src:
for line in MR_lines:
MR_src.write(line)
MR_loc = "%s/residuals/MR.fits" % (location)
print("\n-> Filtering out non PSF-like sources in master residual...")
filters.spread_model_filter(location, MR=True)
print("-> Filtering out diveted detections in master residual...")
indices = filters.divot(MR_loc, MR=True)
filters.update_filtered_sources(location, indices, MR=True)
filters.write_total_sources(location)
def append_negative_sources(residual, MR=False):
location = residual.split('/')[:-2]
location = '/'.join(location)
name = residual.split('/')[-1]
name = name.replace('.fits', '')
if MR == True:
with open("%s/sources/%s_sources_2.txt" % (location, name), 'r') as neg_sources:
lines = neg_sources.readlines()
with open("%s/sources/%s_sources.txt" % (location, name), 'a') as sources:
for l in lines:
if l[0] != '#':
sources.write(l)
os.remove("%s/sources/%s_sources_2.txt" % (location, name))
else:
with open("%s/sources/temp/%s_2.txt" % (location, name), 'r') as neg_sources:
lines = neg_sources.readlines()
with open("%s/sources/temp/%s.txt" % (location, name), 'a') as sources:
for l in lines:
if l[0] != '#':
sources.write(l)
os.remove("%s/sources/temp/%s_2.txt" % (location, name)) |
8,972 | 9b7601a5230bfd2370e73a71d141d6de68ade50f | # Generated by Django 2.2.1 on 2020-02-13 05:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0004_auto_20200213_1202'),
]
operations = [
migrations.DeleteModel(
name='Subject',
),
migrations.RenameField(
model_name='user',
old_name='name',
new_name='user_name',
),
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
8,973 | 6c6a49dfced680fe034cbbc2fa28d57d2aa1273e | import discord
import requests
import math
from keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY
client = discord.Client()
# Constant
DISCORD_TOKEN = GITHUB_DISCORD_TOKEN
FORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY
LIST = ['Verified']
VERIFIED = 4
# Return the current season squad K/D of the fortnite player
def get_ratio(username):
try:
print(username)
link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username
response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY})
if response.status_code == 200:
collection = response.json()
if 'error' in collection:
return "-1"
else:
ratio = collection['stats']['curr_p9']['kd']['value']
return ratio
print("Invalid username")
return "-1"
else:
print("Error parsing data.")
return "-2"
except KeyError:
print("Error finding data. KeyError was returned.")
return "-3"
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
# The command !patch return a link with the lastest patch note
if message.content.startswith('!patch'):
await message.channel.send('Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/')
# The command !help explains the one function
if message.content.startswith('!help'):
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify Bot Help", icon_url="")
embed.add_field(name="Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify", value="You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\'t be able to verify you.", inline=False)
await message.channel.send(embed=embed)
# The command !verify return attribute a rank according to the K/D of the user
if message.content.startswith("!verify"):
for list in LIST:
roles = discord.utils.get(message.guild.roles, name=list)
username = '{0.author.display_name}'.format(message)
ratio = float(get_ratio(username))
msgRatio = str(ratio)
msgVerified = str(VERIFIED)
print(ratio)
if ratio == -1.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="Fortnite player **" + message.author.display_name + "** not found.", value="\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.", inline=False)
await message.channel.send(embed=embed)
elif ratio == -2.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="Data not found.", value="Fortnite Tracker is down. Please try again shortly.", inline=False)
await message.channel.send(embed=embed)
elif ratio == -3.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="No stats found for squad mode in the current season.", value="Play some games and try again.", inline=False)
await message.channel.send(embed=embed)
elif ratio > 0 and ratio < VERIFIED:
print("🚫")
print("-")
embed = discord.Embed(colour=discord.Colour(0x45278e), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=message.author.display_name + " does not have over a " + msgVerified + " K/D.", value="Current season squads K/D: **" + msgRatio + "**", inline=False)
await message.channel.send(embed=embed)
elif ratio >= VERIFIED:
print("✅")
print("-")
role = discord.utils.get(message.guild.roles, name=LIST[0])
embed = discord.Embed(colour=discord.Colour(0x45278e), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=message.author.display_name + " has over a " + msgVerified + " K/D. Verified!", value="Current season squads K/D: **" + msgRatio + "**", inline=False)
user=message.author
await message.channel.send(embed=embed)
await user.add_roles(role)
@client.event
async def on_ready():
print("-")
print("Logged in as: " + client.user.name)
print("With Client User ID: " + str(client.user.id))
print("Verified set to: " + str(VERIFIED))
print("-")
client.run(DISCORD_TOKEN)
|
8,974 | e3aa38b5d01823ed27bca65331e9c7315238750a | import utils
from problems_2019 import intcode
def run(commands=None):
memory = utils.get_input()[0]
initial_inputs = intcode.commands_to_input(commands or [])
program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER)
while True:
_, return_signal = program.run()
for output in program.yield_outputs():
try:
print(chr(output), end='')
except ValueError:
print(output)
if return_signal == intcode.ReturnSignal.AWAITING_INPUT:
# Run in interactive mode if more commands needed
program.add_inputs(*intcode.commands_to_input([input()]))
elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:
return
else:
raise Exception(f'Unexpected return signal {return_signal}')
@utils.part
def part_1():
commands = [
'south',
'take food ration',
'west',
'north',
'north',
'east',
'take astrolabe',
'west',
'south',
'south',
'east',
'north',
'east',
'south',
'take weather machine',
'west',
'take ornament',
'east',
'north',
'east',
'east',
'east',
'south',
]
run(commands=commands)
|
8,975 | 14cc048f517efd3dad9960f35fff66a78f68fb45 | from django.test import TestCase
from ..models import FearConditioningData, FearConditioningModule
from ..registry import DataViewsetRegistry, ModuleRegistry
class ModuleRegistryTest(TestCase):
def test_register_module_create_view(self) -> None:
registry = ModuleRegistry()
registry.register(FearConditioningModule)
self.assertEqual(
registry.urls[0].pattern._route,
"projects/<int:project_pk>/experiments/<int:experiment_pk>/modules/"
"fear-conditioning/add/",
)
self.assertEqual(
registry.urls[0].callback, registry.views["fear_conditioning_create"]
)
self.assertEqual(registry.urls[0].name, "fear_conditioning_create")
self.assertEqual(registry.modules, [FearConditioningModule])
class DataViewsetRegistryTest(TestCase):
def test_register_data_model(self) -> None:
registry = DataViewsetRegistry()
registry.register(FearConditioningData)
self.assertEqual(registry.data_models, [FearConditioningData])
# List view
self.assertEqual(
registry.urls[0].pattern._route,
"projects/<int:project_pk>/experiments/<int:experiment_pk>/data/"
"fear-conditioning/",
)
self.assertEqual(
registry.urls[0].callback, registry.views["fear_conditioning_data_list"]
)
self.assertEqual(registry.urls[0].name, "fear_conditioning_data_list")
# Detail view
self.assertEqual(
registry.urls[1].pattern._route,
"projects/<int:project_pk>/experiments/<int:experiment_pk>/data/"
"fear-conditioning/<int:data_pk>/",
)
self.assertEqual(
registry.urls[1].callback, registry.views["fear_conditioning_data_detail"]
)
self.assertEqual(registry.urls[1].name, "fear_conditioning_data_detail")
|
8,976 | bd81f4431699b1750c69b0bbc82f066332349fbd | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render_to_response
from post.models import Post
#def ver_un_post(request, idpost):
# post = Post.objects.get(id=idpost)
#
# return render_to_response("post.html",{"post":post,},)
def home(request):
cursos = Curso.objects.order_by("numero")
return render_to_response("home.html",{"posts":posts},)
|
8,977 | 7ea81f83f556fcc55c9c9d44bcd63c583829fc08 | import re
n = input("電話番号を入力してください>>")
pattern = r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}'
if re.findall(pattern, n):
print(n, "は電話番号の形式です")
else:
print(n, "は電話番号の形式ではありません")
|
8,978 | 34536e3112c8791c8f8d48bb6ffd059c1af38e2f | from django.db.models import manager
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.utils import serializer_helpers
from rest_framework.views import APIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.status import HTTP_200_OK
from .serializers import StockSerializer
from .models import Stock
# Create your views here.
class TestView(APIView):
def get(self, request, *args, **kwargs):
ans = {
"msg": "Test"
}
return Response(ans)
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors) |
8,979 | d412e5768b23b8bbb8f72e2ae204650bbc1f0550 | class MinHeap:
__heap = [-0]
def __init__(self): pass
def insert(self, value):
self.__heap.append(value)
self.__sift_up()
def pop(self):
if len(self.__heap) == 1:
return None
minimum = self.__heap[1]
if len(self.__heap) == 2:
self.__heap.pop()
else:
self.__heap[1] = self.__heap.pop()
self.__sift_down()
return minimum
def __sift_up(self):
idx = len(self.__heap) - 1
parent = idx >> 1
while idx > 1 and self.__heap[idx] < self.__heap[parent]:
tmp = self.__heap[idx]
self.__heap[idx] = self.__heap[parent]
self.__heap[parent] = tmp
idx = parent
parent = idx >> 1
def __sift_down(self):
idx = 1
size = len(self.__heap)
while idx < size:
minimum = self.__heap[idx]
left = idx << 1
right = left + 1
swap = None
if left < size and self.__heap[left] < minimum:
minimum = self.__heap[left]
swap = left
if right < size and self.__heap[right] < minimum:
swap = right
if swap is None:
break
tmp = self.__heap[swap]
self.__heap[swap] = self.__heap[idx]
self.__heap[idx] = tmp
idx = swap
|
8,980 | 9f7b1cfcc3c20910201fc67b5a641a5a89908bd1 | import numpy as np, pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, TransformerMixin
from datetime import timedelta
import sys
DEBUG = False
class DailyAggregator(BaseEstimator, TransformerMixin):
''' Aggregates time-series values to daily level. '''
def __init__(self, id_columns, time_column, value_columns ):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
def fit(self, X, y=None): return self
def transform(self, X):
X = X.copy()
X[self.time_column] = X[self.time_column].dt.normalize()
X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[self.value_columns].sum()
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):
''' Adds missing time intervals in a time-series dataframe. '''
DAYS = 'days'
MINUTES = 'minutes'
HOURS = 'hours'
def __init__(self, id_columns, time_column, value_columns, time_unit, step_size ):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
self.time_unit = time_unit
self.step_size = int(step_size)
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
min_time = X[self.time_column].min()
max_time = X[self.time_column].max()
# print(min_time, max_time)
if self.time_unit == MissingTimeIntervalFiller.DAYS:
num_steps = ( (max_time - min_time).days // self.step_size ) + 1
all_time_ints = [min_time + timedelta(days=x*self.step_size) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.HOURS:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1
num_steps = (max_time - min_time).days + 1
all_time_ints = [min_time + timedelta(hours=x*self.step_size) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.MINUTES:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (60 * self.step_size)) + 1
# print('num_steps', num_steps)
all_time_ints = [min_time + timedelta(minutes=x*self.step_size) for x in range(num_steps)]
else:
raise Exception(f"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].")
# create df of all time intervals
full_intervals_df = pd.DataFrame(data = all_time_ints, columns = [self.time_column])
# get unique id-var values from original input data
id_cols_df = X[self.id_columns].drop_duplicates()
# get cross join of all time intervals and ids columns
full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(foo=1)).drop('foo', 1)
# merge original data on to this full table
full_df = full_df.merge(X[self.id_columns + [self.time_column] + self.value_columns],
on=self.id_columns + [self.time_column], how='left')
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(full_df.head())
print(full_df.shape)
return full_df
class DataPivoter(BaseEstimator, TransformerMixin):
''' Pivots a dataframe with a given column '''
def __init__(self, non_pivoted_columns, pivoting_column, pivoted_columns, fill_na_val):
super().__init__()
self.non_pivoted_columns = \
[non_pivoted_columns] if not isinstance(non_pivoted_columns, list) else non_pivoted_columns
self.pivoted_columns = [pivoted_columns] if not isinstance(pivoted_columns, list) else pivoted_columns
self.pivoting_column = pivoting_column
self.fill_na_val = fill_na_val
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
processed_X = X.pivot_table(index = self.non_pivoted_columns,
aggfunc=sum,
columns=self.pivoting_column,
values=self.pivoted_columns,
fill_value = self.fill_na_val
).reset_index()
# pivot table will result in multi column index. To get a regular column names
processed_X.columns = [ col[0] if col[1] == '' else col[1] for col in processed_X.columns ]
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(processed_X.head())
print(processed_X.shape)
return processed_X
def inverse_transform(self, preds_df):
# unpivot given dataframe
preds_df2 = pd.melt(preds_df.reset_index(),
id_vars=self.non_pivoted_columns,
value_vars=preds_df.columns,
var_name = self.pivoting_column,
value_name = self.pivoted_columns[0]
)
return preds_df2
class IndexSetter(BaseEstimator, TransformerMixin):
''' Set index '''
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
''' Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
'''
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None): return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(f"Error sampling series. Target length {self.series_len} exceeds current length {curr_len}")
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append( data_arr[i, rand_idx: rand_idx + self.series_len] )
idx = list(X.index) * self.num_reps
col_names = [ f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index= idx)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
'''
Adds left right flipped version of tensor
'''
def __init__(self): pass
def fit(self, X, y=None): return self
def transform(self, X):
X_flipped = pd.DataFrame( np.fliplr(X), columns=X.columns, index=X.index )
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
'''
Trims the length of a series to use latest data points
'''
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None): return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(f"Error trimming series. Target length {self.series_len} exceeds current length {curr_len}")
X_vals = X.values[:, -self.series_len:]
col_names = [ f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle = True):
self.shuffle = shuffle
def fit(self, X, y=None): return self
def transform(self, X, y=None):
if self.shuffle == False: return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
'''Scales history and forecast parts of time-series based on history data'''
def __init__(self, scaling_len, upper_bound = 5.):
if scaling_len < 2: raise Exception("Min Max scaling length must be >= 2")
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f''' Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. '''
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[ X.columns[ : self.scaling_len ] ]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
# print(X.shape, self.row_sums.shape)
# sys.exit()
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
'''Scales history and forecast parts of time-series based on history data'''
def __init__(self, scaling_len, upper_bound = 5.):
if scaling_len < 2: raise Exception("Min Max scaling length must be >= 2")
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None): return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f''' Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. '''
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].min(axis=1), axis = 1)
self.max_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].max(axis=1), axis = 1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-5, self.ranges)
# print(self.min_vals.shape, self.ranges.shape)
# sys.exit()
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where( X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
'''Splits the time series into X (history) and Y (forecast) series'''
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None): return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = (0 if self.Y_len == 'auto' else self.Y_len)
if curr_len < encode_len + decode_len:
msg = f''' Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. '''
raise Exception(msg)
# bit of a hack but sklearn pipeline only allows one thing to be returned in transform()
cols = X.columns
if self.Y_len == 'auto': return { 'X': X[cols[-self.X_len :]], 'Y': X[cols[-self.X_len :]] }
if self.Y_len == 0: return { 'X': X[cols[-self.X_len :]], 'Y': pd.DataFrame() }
return {
'X': X[cols[-( self.X_len + self.Y_len) : -self.Y_len] ],
'Y':X[cols[ -self.Y_len : ] ]
}
if __name__ == "__main__":
# data = pd.read_parquet("wfm_single_q_Internal_daily_history.parquet")
# data = pd.read_parquet("WFM_200q_Internal_daily_history.parquet")
# data.rename(columns={ 'queueid': 'seriesid', 'date': 'ts', 'callvolume': 'v',}, inplace=True)
data = pd.read_parquet("History_series_0028C91B.002795_filled.parquet")
data.rename(columns={ 'queueid': 'seriesid', 'time': 'ts', 'callvolume': 'v',}, inplace=True)
data['ts'] = pd.to_datetime(data['ts'])
data = data[['seriesid', 'ts', 'v']]
hist_len = 365
fcst_len = 90
print("-----------orig data -------------------")
# print(data.head()); print(data.shape)
print("-----------after daily agg -------------------")
agg = DailyAggregator('seriesid', 'ts', 'v')
data = agg.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after adding missing intervals -------------------")
filler = MissingTimeIntervalFiller('seriesid', 'ts', 'v', 'days', 1)
data = filler.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after pivoting -------------------")
pivoter = DataPivoter('seriesid', 'v', 'ts', 0)
data = pivoter.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after indexing -------------------")
indexer = IndexSetter('seriesid', drop_existing=True)
data = indexer.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after sampling -------------------")
sampler = SubTimeSeriesSampler(series_len=hist_len+fcst_len, num_reps=5)
data = sampler.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after shuffling -------------------")
shuffler = DFShuffler()
data = shuffler.fit_transform(data)
print(data.head()); print(data.shape)
print("-----------after max scaling -------------------")
scaler = TSMinMaxScaler(scaling_len=hist_len)
data = scaler.fit_transform(data)
print(data.head()); print(data.shape)
print("-----------after X Y split -------------------")
splitter = TimeSeriesXYSplitter(hist_len, fcst_len)
data = splitter.fit_transform(data)
print(data.keys())
print(data['X'])
print(data['Y'])
|
8,981 | 4b85479af7d65d208fab08c10afbf66086877329 | import sys
n= int(sys.stdin.readline())
dp = {1:'SK', 2: 'CY', 3:'SK', 4:'SK', 5:'SK',6:'SK'}
def sol(k):
if k in dp:
return dp[k]
else:
for i in range(7, k+1):
if dp[i-3]=='SK' and dp[i-1]=='SK' and dp[i-4]=='SK':
dp[i] = 'CY'
else:
dp[i] = 'SK'
return dp[k]
print(sol(n))
|
8,982 | f1c65fc4acafbda59aeea4f2dfca2cf5012dd389 | from pyecharts.charts.pie import Pie
from pyecharts.charts.map import Map
import static.name_map
from pymongo import MongoClient
# html代码头尾
html1 = '<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8"><title>疫情数据可视化</title><script src="/static/echarts/echarts.js"></script><script src="/static/china.js"></script><script src="/static/world.js"></script></head><body>'
html2 = '</body></html>'
# 绘制饼图,返回值为html代码
def make_PieChart(country):
global Data
Data = []
# 读取数据库中的数据
client = MongoClient()
db = client.mydb
if country == 'China':
tb = db.ChinaData
else:
tb = db.WorldData
re = list(tb.find())
attrs = ['现存确诊', '死亡', '治愈']
values = []
currentConfirmedCount = 0
deadCount = 0
curedCount = 0
for i in re:
currentConfirmedCount += i['currentConfirmedCount']
deadCount += i['deadCount']
curedCount += i['curedCount']
values.append(currentConfirmedCount)
values.append(deadCount)
values.append(curedCount)
if country == 'China':
country = '中国'
else:
country = '世界'
# 绘制饼图
pie = Pie(country+"疫情数据饼图")
pie.add(
"",
attrs,
values,
is_label_show=True,
is_more_utils=True
)
# pie.render_embed() 是绘制饼图的html代码
html = html1 + pie.render_embed() + html2
return html
# 绘制中国地图
def make_ChinaMap(type):
global ChinaData
ChinaData = []
# 读取数据库中的数据
client = MongoClient()
db = client.mydb
tb = db.ChinaData
re = list(tb.find())
allProvinces = []
values = []
for i in re:
province = i['provinceName'].replace('省', '').replace('壮族自治区', '').replace('维吾尔自治区', '').replace('回族自治区', '').replace('自治区', '').replace('市', '')
allProvinces.append(province)
values.append(i[type])
if type == 'currentConfirmedCount':
type = '现存确诊'
elif type == 'confirmedCount':
type = '累计确诊'
elif type == 'deadCount':
type = '死亡'
elif type == 'curedCount':
type = '治愈'
# 绘制地图
map = Map("全国疫情"+type+"数据", '中国', width=1200, height=600)
map.add(type, allProvinces, values, visual_range=[1, 1000], maptype='china', is_visualmap=True, visual_text_color='#000')
# map.render_embed() 是绘制地图的html代码
html = html1 + map.render_embed() + html2
return html
# 绘制世界地图
def make_WorldMap(type):
global WorldData
WorldData = []
# 读取数据库中的数据
client = MongoClient()
db = client.mydb
tb = db.WorldData
re = list(tb.find())
allCountries = []
values = []
for i in re:
country = i['provinceName']
allCountries.append(country)
values.append(i[type])
# 国家中文名转为英文
for a in range(len(allCountries)):
for b in static.name_map.name_map.keys():
if allCountries[a] == static.name_map.name_map[b]:
allCountries[a] = b
else:
continue
if type == 'currentConfirmedCount':
type = '现存确诊'
elif type == 'confirmedCount':
type = '累计确诊'
elif type == 'deadCount':
type = '死亡'
elif type == 'curedCount':
type = '治愈'
# 绘制地图
map = Map("国外疫情" + type + "数据", '国外', width=1200, height=600)
map.add(type, allCountries, values, visual_range=[1, 100000], maptype='world', is_visualmap=True,
visual_text_color='#000')
# map.render_embed() 是绘制地图的html代码
html = html1 + map.render_embed() + html2
return html
|
8,983 | 6eac04bc10ef712ab4e2cde4730950ddcbe42585 | import queue
from enum import IntEnum
from time import sleep
import keyboard
# I know, I copy pasted this horrobly written class
# again...
# and again.. I should really write a proper intcode computer
class IntCodeComputer:
def __init__(self, code):
self.defaultCode = code
self.runningCode = self.defaultCode.copy()
self.instructionPointer = 0
self.outputQueue = queue.Queue()
self.relativeBase = 0
def AccessLocation(self, index):
if index >= len(self.runningCode):
self.runningCode.extend([0 for i in range(0, index - len(self.runningCode) + 1)])
return self.runningCode[index]
def StoreLocation(self, index, value):
if index >= len(self.runningCode):
self.runningCode.extend([0 for i in range(0, index - len(self.runningCode) + 1)])
self.runningCode[index] = value
def Run(self, inputArray, reset):
if reset == True:
self.runningCode = self.defaultCode.copy()
self.instructionPointer = 0
self.outputQueue = queue.Queue()
self.relativeBase = 0
inputIndex = 0
while self.instructionPointer < len(self.runningCode):
instruction = self.runningCode[self.instructionPointer] % 100;
aMode = (self.runningCode[self.instructionPointer] // 100) % 10
bMode = (self.runningCode[self.instructionPointer] // 1000) % 10
cMode = (self.runningCode[self.instructionPointer] // 10000) % 10
a = b = c = 0
if instruction == 1 or instruction == 2 or instruction == 7 or instruction == 8:
a = self.AccessLocation(self.instructionPointer + 1)
b = self.AccessLocation(self.instructionPointer + 2)
c = self.AccessLocation(self.instructionPointer + 3)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
if bMode == 0:
b = self.AccessLocation(b)
if bMode == 2:
b = self.AccessLocation(b + self.relativeBase)
if cMode == 2:
c = c + self.relativeBase
if instruction == 5 or instruction == 6:
a = self.AccessLocation(self.instructionPointer + 1)
b = self.AccessLocation(self.instructionPointer + 2)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
if bMode == 0:
b = self.AccessLocation(b)
if bMode == 2:
b = self.AccessLocation(b + self.relativeBase)
if instruction == 1:
self.StoreLocation(c, a + b)
self.instructionPointer += 4
elif instruction == 2:
self.StoreLocation(c, a * b)
self.instructionPointer += 4
elif instruction == 3:
a = self.AccessLocation(self.instructionPointer + 1)
if aMode == 2:
a = a + self.relativeBase
self.StoreLocation(a, inputArray[inputIndex])
inputIndex += 1
self.instructionPointer += 2
elif instruction == 4:
a = self.AccessLocation(self.instructionPointer + 1)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
self.instructionPointer += 2
return a
elif instruction == 5:
if a != 0:
self.instructionPointer = b
else:
self.instructionPointer += 3
elif instruction == 6:
if a == 0:
self.instructionPointer = b
else:
self.instructionPointer += 3
elif instruction == 7:
if a < b:
self.StoreLocation(c, 1)
else:
self.StoreLocation(c, 0)
self.instructionPointer += 4
elif instruction == 8:
if a == b:
self.StoreLocation(c, 1)
else:
self.StoreLocation(c, 0)
self.instructionPointer += 4
elif instruction == 9:
a = self.AccessLocation(self.instructionPointer + 1)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
self.relativeBase += a
self.instructionPointer += 2
elif instruction == 99:
self.instructionPointer = len(self.runningCode) + 1
return None
else:
print ("WTF")
return None
return None
def Render(screenMatrix):
finalString = ""
for row in range(0, len(screenMatrix)):
for column in range(0, len(screenMatrix[i])):
finalString += str(screenMatrix[row][column])
finalString += "\n"
print (finalString, end = "\r")
def GetBallX(screenMatrix):
for row in range(0, len(screenMatrix)):
for column in range(0, len(screenMatrix[i])):
if screenMatrix[row][column] == 4:
return column
return 0
def GetPadX(screenMatrix):
for row in range(0, len(screenMatrix)):
for column in range(0, len(screenMatrix[i])):
if screenMatrix[row][column] == 3:
return column
return 0
inputFile = open("input.txt", "r")
code = [int(x) for x in inputFile.read().split(",")]
computer = IntCodeComputer(code)
screenMatrix = [0] * 24
for i in range(0, len(screenMatrix)):
screenMatrix[i] = [0] * 42
cond = True
while cond:
result1 = computer.Run([], False)
if result1 != None:
result2 = computer.Run([], False)
result3 = computer.Run([], False)
screenMatrix[result2][result1] = result3
else:
cond = False
counter = 0;
for i in range(0, len(screenMatrix)):
for j in range(0, len(screenMatrix[i])):
if screenMatrix[i][j] == 2:
counter += 1
print (counter)
code[0] = 2
computer = IntCodeComputer(code)
screenMatrix = [0] * 24
for i in range(0, len(screenMatrix)):
screenMatrix[i] = [0] * 42
cond = True
iter = 0
score = 0
while cond:
cond2 = True
exec = 0
if iter >= len(screenMatrix) * len(screenMatrix[0]):
sleep(0.001)
while cond2 or iter < len(screenMatrix) * len(screenMatrix[0]):
cond2 = True
exec += 1
inp = 0
ballX = GetBallX(screenMatrix)
padX = GetPadX(screenMatrix)
if padX == ballX:
inp = 0
elif padX > ballX:
inp = -1
else:
inp = 1
result1 = computer.Run([inp], False)
if result1 != None:
result2 = computer.Run([inp], False)
result3 = computer.Run([inp], False)
if result1 == -1 and result2 == 0:
score = result3
else:
screenMatrix[result2][result1] = result3
if result3 == 4 or exec >= 10:
cond2 = False
else:
cond = False
break
Render(screenMatrix)
iter += 1
print(score)
inputFile.close() |
8,984 | 0150e1db3ef2f6c07280f21971b43ac71fc4cada | """Handles loading and tokenising of datasets"""
import enum
import numpy as np
import os.path
import pickle
from tqdm import tqdm
import nltk
from nltk import WordPunctTokenizer
nltk.download('punkt')
from nltk.tokenize import word_tokenize
from lib.utils import DATASETS_BASE_PATH, SAVED_POS_BASE_PATH
from lib.pos import get_pos_tags
class DatasetType(enum.Enum):
"""
Represents the type of dataset
"""
TRAIN = 0
VAL = 1
TEST = 2
class Language(enum.Enum):
"""
Represents the dataset language
"""
GERMAN = 0
ENGLISH = 1
CHINESE = 2
def load_text(path):
"""
Given a path to csv file, loads the data and
returns it as a numpy array
"""
with open(path) as f:
read_text = f.read().splitlines()
return np.array(read_text)
def load_data(data_type=DatasetType.TRAIN, target_language=Language.GERMAN, augmented=False):
"""
Given the dataset type, target language and whether or not to use augmented data,
loads and returns numpy array representations of the source text, translation text and scores.
"""
if target_language == Language.ENGLISH:
raise ValueError("Target language cannot be english")
base_path = DATASETS_BASE_PATH
if target_language == Language.GERMAN:
language_folder = "en-de" if not augmented else "en-de-aug"
language = "ende"
path = os.path.join(base_path, language_folder)
else:
language_folder = "en-zh"
language = "enzh"
path = os.path.join(base_path, language_folder)
if data_type == DatasetType.TRAIN:
prefix = "train"
elif data_type == DatasetType.VAL:
prefix = "dev"
elif data_type == DatasetType.TEST:
prefix = "test"
src_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.src'))
translation_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.mt'))
scores = None
if data_type != DatasetType.TEST:
score_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.scores'))
scores = np.loadtxt(score_file)
src = load_text(src_file)
translated = load_text(translation_file)
return src, translated, scores
def tokenize(text_array, use_pos=False, data_type=None, lang=None):
"""
Given an array of sentences, returns:
If use_pos:
An array of tokenised sentences (where each tokenised sentence is an array of tokens)
else:
An array of tokenised sentences (where each tokenised sentence is an array of tuples of (token, POS tag))
NOTE: If use_pos is False, the rest of the kwargs are ignored
"""
if use_pos:
# Since POS tags take long to generate, use cached version if exists
cache_path = None
if data_type == DatasetType.TRAIN:
cache_path = os.path.join(SAVED_POS_BASE_PATH, f'train-{lang}-pos.pickle')
elif data_type == DatasetType.VAL:
cache_path = os.path.join(SAVED_POS_BASE_PATH, f'val-{lang}-pos.pickle')
elif data_type == DatasetType.TEST:
cache_path = os.path.join(SAVED_POS_BASE_PATH, f'test-{lang}-pos.pickle')
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as handle:
sentences = pickle.load(handle)
return sentences
tokeniser = WordPunctTokenizer()
sentences = []
with tqdm(total=len(text_array)) as pbar:
for sentence in text_array:
tokens = tokeniser.tokenize(sentence)
lower_cased_tokens = []
for tok in tokens:
tok_lower = tok.lower()
lower_cased_tokens.append(tok_lower)
if use_pos:
# Store tokenised sentence i.e. arrays of (token, POS_TAG) tuples
try:
sentences.append(get_pos_tags(lower_cased_tokens, lang))
except:
sentences.append([get_pos_tags([tok], lang)[0] for tok in lower_cased_tokens])
else:
# Store tokenised sentence
sentences.append(lower_cased_tokens)
pbar.update(1)
if use_pos:
# Store POS tags to allow faster loading on next invocation
with open(cache_path, 'wb') as handle:
pickle.dump(sentences, handle)
return sentences
def pad_to_length(word_embeddings, length, padding):
"""
Given some data (word_embeddings or other), of shape (x, variable, dimensionality)
returns the data padded in the 2nd dimension to size length i.e. (x, length, dimensionality)
"""
for sentence in word_embeddings:
num_to_append = length - len(sentence)
assert num_to_append >= 0
for _ in range(num_to_append):
sentence.append(padding)
|
8,985 | 18ae982c7fac7a31e0d257f500da0be0851388c2 | from secrets import randbelow
print(randbelow(100)) |
8,986 | ad53b100a1774f5429278379302b85f3a675adea | # Generated by Django 2.2 on 2019-05-13 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base_data_app', '0008_key_keyslider'),
]
operations = [
migrations.AddField(
model_name='key',
name='image',
field=models.ImageField(null=True, upload_to='key', verbose_name='Картинка'),
),
]
|
8,987 | 2cff5fdfc86793592dd97de90ba9c3a11870b356 | from odoo import api, tools, fields, models, _
import base64
from odoo import modules
class InheritUser(models.Model):
_inherit = 'pos.config'
related_pos_user = fields.One2many('pos.session.users', 'pos_config', string='Related User')
class InheritSession(models.Model):
_name = 'pos.session.users'
user = fields.Many2one('res.users')
pos_config = fields.Many2one('pos.config')
class InheritUser(models.Model):
_inherit = 'res.users'
pos_sessions = fields.Many2many('pos.config', string='Point of Sale Accessible')
@api.multi
def write(self, vals):
if 'pos_sessions' in vals:
if vals['pos_sessions'][0][2]:
self.env["pos.session.users"].search(
[('user', '=', self.id)]).unlink()
for pos_session in vals['pos_sessions'][0][2]:
self.env['pos.session.users'].create({'pos_config': pos_session, 'user': self.id})
else:
self.env["pos.session.users"].search(
[('user', '=', self.id)]).unlink()
result = super(InheritUser, self).write(vals)
return result
@api.model
def create(self, vals):
create_id = super(InheritUser, self).create(vals)
if vals['pos_sessions'][0][2]:
for pos_session in vals['pos_sessions'][0][2]:
self.env['pos.session.users'].create({'pos_config': pos_session, 'user': create_id.id})
return create_id
|
8,988 | 9fdc7c1eb68a92451d41313861164a915b85fcee | from django.conf.urls import url
from .views.show import show_article, show_articles, export_db
urlpatterns = [
url(r'^$', show_articles, name='index'),
url(r'^article/$', show_article, name='article'),
url(r'^export/$', export_db, name='article'),
]
|
8,989 | 458124aa0d6f04268ad052f74d546b12d3f3f5f7 | import os, gc, random
from time import time
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss, f1_score, accuracy_score
from collections import Counter
from IPython.display import clear_output
import torch
from transformers import (
AutoTokenizer, RobertaTokenizerFast,
BertTokenizerFast, ElectraTokenizerFast
)
def seed_everything(seed):
print(f'Set seed to {seed}.')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def is_blackbone(n):
return n.startswith('model')
def evaluation(ytrue, y_pred, labels=[0,1,2,3]):
log = log_loss(ytrue, y_pred, labels=labels)
f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')
acc = accuracy_score(ytrue, y_pred.argmax(1))
return {'Logloss': log, 'F1': f1, 'Acc': acc}
def getTokenizer(model_config, tok_name):
return AutoTokenizer.from_pretrained(tok_name, config=model_config, add_prefix_space=False)
class EarlyStopping:
def __init__(self, patience=5, mode='max'):
self.step = 0
self.stop = False
self.score = 0
self.patience = patience
self.mode = mode
self.mult = 1 if mode=='max' else -1
def update(self, score):
if self.mult*(self.score-score) > 0:
self.step += 1
else:
self.step = 0
self.score = score
if self.step == self.patience:
self.stop = True
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
def _start(self):
self._time = time()
def _stop(self):
if not self.is_stopped:
self.is_stopped = True
self._time = time()-self._time
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return "{:02d}:{:02d}".format(*self.m_s())
def m_s(self):
t = round(self.time)
s = t%60
m = t//60
return m,s
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = "\r"
for key in kwargs.keys():
str_log += "{}: {} - ".format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time = None):
str_log = f"⏰ {time} | " if time else ""
str_log += "Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}".format(epoch, losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print("_"*100, "\nFold ", self.fold)
for p in self._print:
print("_" * 100)
print('| '+ p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv = 'cv_score.pkl', path='evals/roberta-base/'):
assert df.fold.nunique()==n_folds, "Unconsistency between df.n_folds and n_folds"
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path+'fold_{}_best_eval.npy'
shape = ( self.df.shape[0], self.df.label.nunique() )
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold==i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds #oof_cv_scores
self.score2 = evaluation(self.df.label.values, eval_preds, labels=self.df.label.unique())[self.metric_name] #ovr_score
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1, self.score2))
if return_score: return self.score1, self.score2
|
8,990 | 1a569b88c350124968212cb910bef7b09b166152 |
## This file is the celeryconfig for the Task Worker (scanworker).
from scanworker.commonconfig import *
import sys
sys.path.append('.')
BROKER_CONF = {
'uid' : '{{ mq_user }}',
'pass' : '{{ mq_password }}',
'host' : '{{ mq_host }}',
'port' : '5672',
'vhost' : '{{ mq_vhost }}',
}
BROKER_URL = 'amqp://'+BROKER_CONF['uid']+':'+BROKER_CONF['pass']+'@'+BROKER_CONF['host']+':'+BROKER_CONF['port']+'/'+BROKER_CONF['vhost']
BROKER_HEARTBEAT=True
CELERY_IMPORTS = ('scanworker.tasks',)
from scanworker.tasks import VALID_SCANNERS as vs
VALID_SCANNERS=vs()
CELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()
CELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()
|
8,991 | 743d261052e4532c1304647501719ad897224b4e | #!/usr/bin/env python3
"""
Python class to access Netonix® WISP Switch WebAPI
** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.**
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
"""
import requests
from requests.exceptions import Timeout
from copy import deepcopy
import time
import json
try:
from deepdiff import DeepDiff
DIFF = True
except:
DIFF = False
class Netonix():
def __init__(self):
self.ip = None
self.s = None
self.url = {}
self.url["login"] = "/index.php"
self.url["backup"] = "/api/v1/backup"
self.url["config"] = "/api/v1/config"
self.url["apply"] = "/api/v1/apply"
self.url["confirm"] = "/api/v1/applystatus"
self.url["reboot"] = "/api/v1/reboot"
self.url["restore"] = "/api/v1/restore"
self.url["mac"] = "/api/v1/mactable"
self.url["status"] = "/api/v1/status/30sec"
self.url["id"] = "/api/v1/bootid"
self.url["update"] = "/api/v1/uploadfirmware"
self.url["doupdate"] = "/api/v1/upgradefirmware"
self.config = {}
self.orig_config = None
self.mac = {}
self.status = {}
self.id = ""
def _get(self, url, params=None, timeout=15, **kwargs):
full_url = "https://"+self.ip+self.url[url]
return self.s.get(full_url, params=params, timeout=timeout, **kwargs)
def _post(self, url, data=None, json=None, timeout=15, **kwargs):
full_url = "https://"+self.ip+self.url[url]
return self.s.post(
full_url,
data=data,
json=json,
timeout=timeout,
**kwargs
)
@staticmethod
def _merge_by_key(old, new, key="Number", append=True):
for item in new:
found = False
for old_item in old:
if(key not in old_item):
continue
if(old_item[key] != item[key]):
continue
old_item.update(item)
found = True
break
if(found is False):
if(append is True):
old_item.append(new)
else:
raise LookupError()
def open(self, ip, user, password):
self.ip = ip
self.s = requests.session()
self.s.verify = False
data = {}
data["username"] = user
data["password"] = password
r = self._post("login", data)
if("Invalid username or password" in r.text):
raise Exception("Invalid username or password")
def getConfig(self):
r = self._get("config")
result = r.json()
if("Config_Version" in result):
self.config = result
def putConfig(self):
r = self._post("config", json=self.config)
try:
r = self._post("apply")
except Timeout:
pass
self.ip = self.config["IPv4_Address"]
for a in range(5):
try:
r = self._post("confirm")
except Timeout:
continue
break
if(r.status_code != requests.codes.ok):
raise Exception("Config Confirm Request Failed")
# return r.json()
def backup(self, output):
r = self.s.get("https://"+self.ip+self.url["backup"]+"/"+self.ip)
if(r.status_code != requests.codes.ok):
raise Exception("Backup Request Failed")
newFile = open(output, "wb")
newFile.write(r.content)
newFile.close()
def restore(self, i):
raise Exception("the restore method is still untested.")
newFile = open(i, "rb")
data = ""
for a in newFile:
data += a
newFile.close()
r = self._post("restore", data)
print(r.json())
if(r.status_code != requests.codes.ok):
raise Exception("Restore Request Failed")
r = self._get("reboot")
return r.json()
def getMAC(self):
r = self._get("mac", timeout=60)
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.mac = r.json()["MACTable"]
def getID(self):
r = self._get("id", params={"_": time.time()})
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.id = r.json()["BootID"]
def getStatus(self):
if(self.id == ""):
self.getID()
r = self.s.get("https://"+self.ip+self.url["status"]+"?%s&_=%d" % (self.id, time.time()))
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.status = r.json()
def update(self, i):
data = ""
with open(i, mode='rb') as file: # b is important -> binary
data = file.read()
r = self._post("update", data)
if(r.status_code != requests.codes.ok):
raise Exception("Firmware Upload Failed")
r = self._get("doupdate")
if(r.status_code != requests.codes.ok):
raise Exception("Update Request Failed")
def mergeConfig(self, config):
self.orig_config = deepcopy(self.config)
for k, v in config.items():
if(k == "Ports"):
self._merge_by_key(self.config[k], v, key="Number")
continue
if(k == "LACP"):
self._merge_by_key(self.config[k], v, key="Port")
continue
if(k == "VLANs"):
self._merge_by_key(self.config[k], v, key="ID")
continue
if(type(v) is dict):
continue
if(type(v) is list):
self.config[k] += v
continue
self.config[k] = v
def replaceConfig(self, config):
self.orig_config = deepcopy(self.config)
if("Config_Version" in config):
del config["Config_Version"]
self.config.update(config)
def getDiff(self):
if(self.orig_config is None):
return {}
if(DIFF is False):
raise ImportError("Missing DeepDiff Module")
return DeepDiff(
self.orig_config,
self.config,
exclude_paths="root['Config_Version']"
)
if __name__ == '__main__':
import getpass
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ip = str(input("switch ip:"))
user = str(input("user:"))
pw = getpass.getpass("password:")
n = Netonix()
n.open(ip, user, pw)
n.getMAC()
print(json.dumps(n.mac, indent=4))
n.getMAC()
print(json.dumps(n.mac, indent=4))
|
8,992 | e1da3255668999c3b77aa8c9332b197a9203478e | from marshmallow import ValidationError
from werkzeug.exceptions import HTTPException
from flask_jwt_extended.exceptions import JWTExtendedException
from memedata.util import mk_errors
from memedata import config
def jwt_error_handler(error):
code = 401
messages = list(getattr(error, 'args', []))
return mk_errors(code, messages)
def http_error_handler(error):
resp = error.response
if resp is None:
code = error.code
messages = [error.description]
else:
code = getattr(resp, 'status_code', 500)
json = resp.get_json()
if 'errors' in json and json['errors']:
messages = [e['message'] for e in json['errors'] if 'message' in e]
else:
messages = [str(resp.status)]
return mk_errors(code, messages)
def validation_error_handler(error):
code = getattr(error, 'status_code', 500)
messages = getattr(error, 'messages', [])
return mk_errors(code, messages)
def generic_error_handler(error):
code = getattr(error, 'status_code', 500)
if config.debug:
messages = [str(error)]
else:
messages = ['something went wrong!']
return mk_errors(code, messages)
def error_handler(error):
try:
if isinstance(error, JWTExtendedException):
return jwt_error_handler(error)
elif isinstance(error, HTTPException):
return http_error_handler(error)
elif isinstance(error, ValidationError):
return validation_error_handler(error)
else:
return generic_error_handler(error)
except:
return mk_errors(500, 'something went wrong!')
def register_handlers(app):
app.errorhandler(Exception)(error_handler)
app.errorhandler(HTTPException)(error_handler)
app.handle_user_exception = error_handler
|
8,993 | a1ca6c258298feda99b568f236611c1c496e3262 | C = {i:0 for i in range(9)}
N = int(input())
A = list(map(int,input().split()))
for i in range(N):
a = A[i]
if a<400:
C[0] += 1
elif a<800:
C[1] += 1
elif a<1200:
C[2] += 1
elif a<1600:
C[3] += 1
elif a<2000:
C[4] += 1
elif a<2400:
C[5] += 1
elif a<2800:
C[6] += 1
elif a<3200:
C[7] += 1
else:
C[8] += 1
cmin = 0
for i in range(8):
if C[i]>0:
cmin += 1
if cmin==0:
cmin = 1
cmax = C[8]
else:
cmax = cmin+C[8]
print(cmin,cmax) |
8,994 | ba34bae7849ad97f939c1a7cb91461269cd58b64 | from numpy import array
import xspec as xs
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Grid
from spectralTools.step import Step
class xspecView(object):
def __init__(self):
#xs.Plot.device="/xs"
xs.Plot.xAxis='keV'
self.swift = []
self.nai=[]
self.bgo=[]
def LoadSwiftPHAs(self,phaFiles):
'''
Load The Swift PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-15. 150.-**")
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self,phaFiles):
'''
Load The GBM NaI PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-8. 1999..-**")
cnts = sum(s.values)
self.nai.append(cnts)
def LoadBGOPHAs(self,phaFiles):
'''
Load The GBM BGO PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-250. 10000.-**")
cnts = sum(s.values)
self.bgo.append(cnts)
def SetTimeBins(self,starts,stops):
self.tBins = array(zip(starts,stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column')
Step(grid[0],self.tBins,self.swift,'r',1.)
Step(grid[1],self.tBins,self.nai,'b',1.)
Step(grid[2],self.tBins,self.bgo,'g',1.)
|
8,995 | a22aa66bd65033750f23f47481ee84449fa80dbc | # Python 3.6. Written by Alex Clarke
# Breakup a large fits image into smaller ones, with overlap, and save to disk.
# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing
import itertools
import bdsf
import glob
import pickle
from matplotlib.pyplot import cm
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from memory_profiler import profile
# list of functions
# load/save pickle objects
# save_cutout
# do_image_chopping
# make_image_cubes
# do_sourcefinding
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
#Loading/saving python data objects
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def update_header_from_cutout2D(hdu, cutout):
# update data
newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = cutout.data
hdu.data = newdata
# update header cards returned from cutout2D wcs:
hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])
hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])
hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])
hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])
hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])
hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])
hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])
hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])
return hdu
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_primarybeam_correction(pbname, imagename):
print(' Preparing to apply the primary beam correction to {0}'.format(imagename))
hdu = fits.open(imagename)[0]
pb = fits.open(pbname)[0]
wcs = WCS(pb.header)
# cutout pb field of view to match image field of view
x_size = hdu.header['NAXIS1']
x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one
size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing
print(' Cutting out image FOV from primary beam image...')
cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)
# Update the FITS header with the cutout WCS by hand using my own function
# don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.
#pb.header.update(cutout.wcs.to_header()) #
pb = update_header_from_cutout2D(pb, cutout)
# write updated fits file to disk
pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
# regrid PB image cutout to match pixel scale of the image FOV
print(' Regridding image...')
# get header of image to match PB to
montage.mGetHdr(imagename, 'hdu_tmp.hdr')
# regrid pb image (270 pixels) to size of ref image (32k pixels)
montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)
os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk
# update montage output to float32
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')
newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = pb[0].data
pb[0].data = newdata # naxis will automatically update to 4 in the header
# fix nans introduced in primary beam by montage at edges and write to new file
print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions')
mask = np.isnan(pb[0].data)
pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])
pb.flush()
pb.close()
# apply primary beam correction
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]
hdu.data = hdu.data / pb.data
hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)
print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') )
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_image_chopping(input_image, split_into):
hdu = fits.open(input_image)[0]
wcs = WCS(hdu.header)
# currently hard coded to only accept square images
im_width = hdu.header['NAXIS1'] # get image width
print(' Input fits image dimensions: {0}'.format(im_width))
print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))
# get centre positions for each new fits image. assuming x=y. divide image width by split_into*2
positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))
# round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later
positions = positions.astype(int) # keep as original
positions_x = positions # make copy to append to in loop
positions_y = positions # make copy to append to in loop
# Make a 2D array of all centre positions. length = split_into**2.
for i in range(split_into-1):
# stack x coords repeating split_into times.
positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times]
# stack y coords, but np.roll shifts array indices by 1 to get different combinations
positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]
# create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]
position_coords_inpixels = np.array([positions_x,positions_y]).T
# create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through
size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later
# size array needs to be same shape as position_coords_inpixels
size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)
# loop over images to be cut out
plt.figure() # plot original image and overlay cutout boundaries at the end.
data[data<1e-7]=1e-7 # min pixel brightness to display
data[data>1e-5]=1e-5 # max pixel brightness to display
plt.imshow(hdu.data[0,0,:,:], origin='lower')
colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour
for i in range(split_into**2):
print(' Cutting out image {0} of {1}'.format(i+1, split_into**2))
cutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)
cutout.plot_on_original(color=next(colourlist))
# Update the FITS header with the cutout WCS by hand using my own function
hdu = update_header_from_cutout2D(hdu, cutout)
hdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))
plt.savefig(input_image+'_cutout_annotation.png')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
# make image cube for pybdsf spectral index mode, looping over all cutouts
def make_image_cubes_for_cutouts():
# get cutout file names, must be in same order so they are matched correctly
images_560 = sorted(glob.glob('560*_cutout.fits'))
images_1400 = sorted(glob.glob('1400*_cutout.fits'))
# loop over image cutouts to make cube for each of them
for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):
print(' Making cube {0} of {1}'.format(i, len(images_560)-1))
hdu560 = fits.open(file560)[0]
hdu1400 = fits.open(file1400)[0]
# make cube from the input files along freq axis
cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))
cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data
cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data
hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)
# update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel
hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.
hdu_new.writeto('cube_cutout_'+str(i)+'.fits')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_sourcefinding(imagename, si=True):
# get beam info manually. SKA image seems to cause PyBDSF issues finding this info.
f = fits.open(imagename)
beam_maj = f[0].header['BMAJ']
beam_min = f[0].header['BMIN']
#beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular
beam_pa = 0
f.close()
# using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.
if si==True:
img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \
collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])
if si==False:
img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
if __name__ == '__main__':
# Applying primary beam correction
do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')
do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')
# divide x and y axes by split_into. This gives split_into**2 output images.
# a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image
split_into = 3
# load image to get properties
input_image_560 = '560mhz1000hours.fits'
input_image_1400 = '1400mhz1000hours.fits'
# cut up images and save to disk
do_image_chopping(input_image_560, split_into)
do_image_chopping(input_image_1400, split_into)
# make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode
# currently not working since don't need this part at the moment.
make_image_cubes()
# sourcefinding on individual frequency bands
imagenames = glob.glob('*_cutout.fits')
for image in imagenames:
do_sourcefinding(image)
# sourcefinding on cube to get spectral indcies (si=True)
# currently not working since need to chop images to same field of view before making cubes.
# use code from pipeline.py if needed?
#imagenames = sorted(glob.glob('cube_cutout_*.fits'))
#for image in imagenames:
# do_sourcefinding(image, si=True)
#
|
8,996 | eb043c4c981b48763164e3d060fd52f5032be0ea | """ Version 3 of IRC (Infinite Recursive classifier). Based on the idea that each output is placed in a certain
location.
Let me try to solve a simpler problem first. Let me forget about the gate and do non stop recursive classification
step by step, one bye one.
Update. 19 May 2015. Let me stept this up. Instead of having a fixed width,
Update. 21 May 2015: Split into files, created School.py
#TODO: extending classifier
let me keep expanding the width. Only the
Variable width output for classifier.
Assign any function to a classifier node.
input width is fixed.
# TODO Need a better predictor.
"""
__author__ = 'Abhishek Rao'
# Headers
import numpy as np
from sklearn import svm
import math
import matplotlib.pyplot as plt
import pickle
import os.path
from sklearn.metrics import accuracy_score
import School
# Constants
# Classes
class ClassifierNode:
""" A node that contains classifier, it's input address and output address.
"""
def __init__(self, end_in_address, out_address, classifier_name='Default',
given_predictor=None):
self.out_address = out_address
self.end_in_address = end_in_address # end column
self.label = classifier_name # The name of this concept. e.g. like apple etc.
# Check whether to create a standard classifier or a custom, given one.
if given_predictor:
self.given_predictor = given_predictor
self.classifier_type = 'custom'
else:
self.classifier = svm.LinearSVC(dual=False, penalty='l1')
self.classifier_type = 'standard'
def fit(self, x_in, y):
new_x_in = x_in[:, :self.end_in_address]
self.classifier.fit(new_x_in, y)
def predict(self, x_in):
"""
Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.
:param x_in: The Classifier banks working memory, full matrix_in.
:return: A column of predicted values.
"""
new_x_in = x_in[:, :self.end_in_address]
if self.classifier_type == 'standard':
dec_fx_in = self.classifier.decision_function(new_x_in)
else:
dec_fx_in = self.given_predictor(new_x_in)
# Convert it into mapping between 0 to 1 instead of -1 to 1
return np.array([sigmoid_10(i) for i in dec_fx_in])
class SimpleClassifierBank:
""" A machine which stores both input X and the current output of bunch of classifiers.
API should be similar to scikit learn"""
def __init__(self, max_width, input_width, height):
"""
Initialize this class.
:rtype : object self
:param max_width: maximum data dimension in current working memory, should be greater than
input_width.
:param input_width: maximum input dimension.
:param height: maximum number of input samples
:return: None
"""
self.current_working_memory = np.zeros([height, max_width])
self.classifiers_out_address_start = input_width # the start of classifiers output.
self.classifiers_current_count = 0 # starting address for output for new classifier
self.classifiers_list = []
def predict(self, x_pred):
"""Give out what it thinks from the input. Input x_pred should be 2 dimensional.
:param: x_pred: input, dimension 2, (samples x_pred dimension)"""
self.current_working_memory *= 0 # Flush the current input
x_pred = np.array(x_pred)
input_number_samples, input_feature_dimension = x_pred.shape
if len(x_pred.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred
for classifier_i in self.classifiers_list:
predicted_value = classifier_i.predict(self.current_working_memory)
predicted_shape = predicted_value.shape
if len(predicted_shape) < 2:
predicted_value = predicted_value.reshape(-1, 1)
predicted_shape = predicted_value.shape
self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value
# need to return the rightmost nonzero column.
for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns
if np.any(self.current_working_memory[:input_number_samples, column_j]):
soft_dec = self.current_working_memory[:input_number_samples, column_j]
return np.array(soft_dec > 0.5, dtype=np.int16)
print 'Cant find any nonzero column'
return self.current_working_memory[:, 0]
def fit(self, x_in, y, task_name='Default'):
"""
Adds a new classifier and trains it, similar to Scikit API
:param x_in: 2d Input data
:param y: labels
:return: None
"""
# check for limit reach for number of classifiers.
if self.classifiers_current_count + self.classifiers_out_address_start \
> self.current_working_memory.shape[1]:
print 'No more space for classifier. ERROR'
raise MemoryError
x_in = np.array(x_in)
input_number_samples, input_feature_dimension = x_in.shape
if len(x_in.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in
# Procure a new classifier, this might be wasteful, later perhaps reuse classifier
# instead of lavishly getting new ones, chinese restaurant?
new_classifier = ClassifierNode(
end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,
out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],
classifier_name=task_name)
self.classifiers_current_count += 1
# Need to take care of mismatch in length of working memory and input samples.
new_classifier.fit(self.current_working_memory[:input_number_samples], y)
self.classifiers_list.append(new_classifier)
def fit_custom_fx(self, custom_function, input_width, output_width, task_name):
"""
Push in a new custom function to classifiers list.
:param custom_function: The function that will be used to predict. Should take in a 2D array input and
give out a 2d array of same height and variable width.
:param input_width: The width of input.
:param output_width: The width of output. If a single neuron this is one.
:param task_name: name of this function
:return: None
"""
new_classifier = ClassifierNode(
end_in_address=input_width,
out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),
classifier_name=task_name,
given_predictor=custom_function
)
self.classifiers_current_count += output_width
self.classifiers_list.append(new_classifier)
def status(self):
"""Gives out the current status, like number of classifier and prints their values"""
print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'
classifiers_coefficients = np.zeros(self.current_working_memory.shape)
print [classifier_i.label for classifier_i in self.classifiers_list]
for count, classifier_i in enumerate(self.classifiers_list):
coeffs_i = classifier_i.classifier.coef_ \
if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])
classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i
# print 'Classifier: ', classifier_i
# print 'Classifier name: ', classifier_i.label
# print 'Out address', classifier_i.out_address
# print 'In address', classifier_i.end_in_address
# print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_
plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')
plt.title('Current working memory')
plt.figure()
plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')
plt.title('Classifier coefficients')
plt.show()
def remove_classifier(self, classifier_name):
"""
Removes the classifier whose name is same as classifier_name
:param classifier_name: the label of the classifier to be removed.
:return: the index of removed classifier. -1 if not found.
"""
try:
labels_list = [classifier_i.label for classifier_i in self.classifiers_list]
except ValueError:
print 'The specified label does not exist.'
return -1
removing_index = labels_list.index(classifier_name)
self.classifiers_list.pop(removing_index)
print 'Classifier was removed. Its nae was', classifier_name
return removing_index
def score(self, x_in, y):
"""
Gives the accuracy between predicted( x_in) and y
:param x_in: 2d matrix, samples x_in dimension
:param y: actual label
:return: float, between 0 to 1
"""
yp_score = self.predict(x_in)
return accuracy_score(y, y_pred=yp_score)
def generic_task(self, x_in, y, task_name):
"""
A generic framework to train on different tasks.
"""
self.fit(x_in, y, task_name=task_name)
print 'The score for task ', task_name, ' is ', self.score(x_in, y)
# Global functions
# Reason for having 10 sigmoid is to get sharper distinction.
def sigmoid_10(x):
return 1 / (1 + math.exp(-10*x))
# Following are required for custom functions Task 1,2
def meanie(x):
return np.mean(x, axis=1)
def dot_with_11(x):
return np.dot(x, np.array([0.5, 0.5]))
if __name__ == '__main__':
learning_phase = False
classifier_file_name = 'ClassifierFile.pkl'
if os.path.isfile(classifier_file_name):
Main_C1 = pickle.load(open(classifier_file_name, 'r'))
else:
Main_C1 = SimpleClassifierBank(max_width=2000, input_width=1500, height=500)
# Learn or not learn?
if learning_phase:
School.class_digital_logic(Main_C1)
School.simple_custom_fitting_class(Main_C1)
# Main_C1.fit_custom_fx(np.mean,input_width=1500, output_width=1, task_name='np.mean')
yp = Main_C1.predict(np.random.randn(8, 22))
print 'Predicted value is ', yp
# Main_C1.remove_classifier('np.mean')
Main_C1.status()
pickle.dump(Main_C1, open(classifier_file_name, 'w'))
|
8,997 | 58204b4b035aa06015def7529852e882ffdd369a | #!/usr/bin/env python
##############
#### Your name: Alexis Vincent
##############
import numpy as np
import re
from skimage.color import convert_colorspace
from sklearn.model_selection import GridSearchCV
from sklearn import svm, metrics
from skimage import io, feature, filters, exposure, color
from skimage.feature import hog
import matplotlib.pyplot as plt
class ImageClassifier:
def __init__(self):
self.classifer = None
def imread_convert(self, f):
return io.imread(f).astype(np.uint8)
def load_data_from_folder(self, dir):
# read all images into an image collection
ic = io.ImageCollection(dir + "*.jpg", load_func=self.imread_convert)
# create one large array of image data
data = io.concatenate_images(ic)
# extract labels from image names
labels = np.array(ic.files)
for i, f in enumerate(labels):
m = re.search("_", f)
labels[i] = f[len(dir):m.start()]
return (data, labels)
def extract_image_features(self, data):
# Please do not modify the header above
# extract feature vector from image data
fd = None
for pic in data:
#grey_picture = color.rgb2gray(pic)
#gaussian_picture = filters.gaussian(pic, 1)
rescaled_picture = exposure.rescale_intensity(pic)
feature_data = hog(rescaled_picture,
orientations=11,
#pixels_per_cell=(32, 32),
pixels_per_cell=(20, 20),
cells_per_block=(6, 6),
# transform_sqrt=True,
feature_vector=True,
block_norm='L2-Hys')
# self.print_hog_pics(color.rgb2gray(gaussian_picture))
if fd is None:
fd = feature_data.reshape(1, feature_data.shape[0])
else:
fd = np.concatenate([fd, feature_data.reshape(1, feature_data.shape[0])])
# Please do not modify the return type below
return fd
def train_classifier(self, train_data, train_labels):
# Please do not modify the header above
# train model and save the trained model to self.classifier
clf = svm.SVC(C=1, gamma=0.001, kernel='linear')
self.classifer = clf.fit(train_data, train_labels)
def predict_labels(self, data):
# Please do not modify the header
# predict labels of test data using trained model in self.classifier
# the code below expects output to be stored in predicted_labels
predicted_labels = self.classifer.predict(data)
# Please do not modify the return type below
return predicted_labels
def print_hog_pics(self, image):
#orientations=8, pixels_per_cell=(16, 16) cells_per_block=(1, 1), visualise=True
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all', sharey='all')
ax1.axis('off')
ax1.imshow(image)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
ax2.axis('off')
ax2.imshow(hog_image_rescaled)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
plt.show()
def main():
img_clf = ImageClassifier()
# load images
(train_raw, train_labels) = img_clf.load_data_from_folder('./train/')
(test_raw, test_labels) = img_clf.load_data_from_folder('./test/')
# convert images into features
train_data = img_clf.extract_image_features(train_raw)
test_data = img_clf.extract_image_features(test_raw)
# train model and test on training data
img_clf.train_classifier(train_data, train_labels)
predicted_labels = img_clf.predict_labels(train_data)
print("\nTraining results")
print("=============================")
print("Confusion Matrix:\n", metrics.confusion_matrix(train_labels, predicted_labels))
print("Accuracy: ", metrics.accuracy_score(train_labels, predicted_labels))
print("F1 score: ", metrics.f1_score(train_labels, predicted_labels, average='micro'))
print(predicted_labels)
# test model
predicted_labels = img_clf.predict_labels(test_data)
print("\nTesting results")
print("=============================")
print("Confusion Matrix:\n", metrics.confusion_matrix(test_labels, predicted_labels))
print("Accuracy: ", metrics.accuracy_score(test_labels, predicted_labels))
print("F1 score: ", metrics.f1_score(test_labels, predicted_labels, average='micro'))
print(predicted_labels)
if __name__ == "__main__":
main() |
8,998 | edfad88c837ddd3bf7cceeb2f0b1b7a5356c1cf7 | from sense_hat import SenseHat
import time
import random
#Set game_mode to True for single roll returning value
#False for demonstration purposes
class ElectronicDie:
def __init__(self, mode):
self.game_mode = mode
sense = SenseHat()
#Colours
O = (0,0,0)
B = (0, 0, 255)
#Settings
#game_mode setting determines if dice will roll infinitely (Used for testing)
accel_limit = 1.5
display_time = 3
game_mode = False
#Die LED arrays
one = [O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O]
two = [B, B, O, O, O, O, O, O,
B, B, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, B, B,
O, O, O, O, O, O, B, B]
three = [B, B, O, O, O, O, O, O,
B, B, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, B, B,
O, O, O, O, O, O, B, B]
four = [B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B]
five = [B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B,
O, O, O, O, O, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, O, O, O, O, O,
B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B]
six = [B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B,
O, O, O, O, O, O, O, O,
B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B,
O, O, O, O, O, O, O, O,
B, B, O, O, O, O, B, B,
B, B, O, O, O, O, B, B]
#Roll function
def roll_die(self):
r = random.randint(1, 6)
if r == 1:
self.sense.set_pixels(self.one)
elif r == 2:
self.sense.set_pixels(self.two)
elif r == 3:
self.sense.set_pixels(self.three)
elif r == 4:
self.sense.set_pixels(self.four)
elif r == 5:
self.sense.set_pixels(self.five)
elif r == 6:
self.sense.set_pixels(self.six)
return r
#Accelerometer measuring reference
#https://projects.raspberrypi.org/en/projects/getting-started-with-the-sense-hat/8
#Prompt for shaking to roll
def prompt(self):
try:
self.sense.clear()
self.sense.show_message("Shake")
print("Shake Pi to roll dice")
while True:
x, y, z = self.sense.get_accelerometer_raw().values()
x1 = abs(x)
y1 = abs(y)
z1 = abs(z)
if x1 > self.accel_limit or y1 > self.accel_limit or z1 > self.accel_limit:
r = self.roll_die()
time.sleep(self.display_time)
self.sense.clear()
if self.game_mode == True:
return r
except Exception as e:
print(str(e))
self.sense.clear()
#Standalone testing
if __name__ == '__main__':
die = ElectronicDie(False)
die.prompt() |
8,999 | 6a954197b13c9adf9f56b82bcea830aaf44e725f | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TriggerPipelineReference(Model):
"""Pipeline that needs to be triggered with the given parameters.
:param pipeline_reference: Pipeline reference.
:type pipeline_reference: ~azure.mgmt.datafactory.models.PipelineReference
:param parameters: Pipeline parameters.
:type parameters: dict[str, object]
"""
_attribute_map = {
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
def __init__(self, pipeline_reference=None, parameters=None):
self.pipeline_reference = pipeline_reference
self.parameters = parameters
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.