text
stringlengths
38
1.54M
# Generated by Django 2.2 on 2020-05-28 16:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Fabricas', '0004_auto_20200527_2245'), ] operations = [ migrations.CreateModel( name='Prueba', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('idfk', models.CharField(max_length=200)), ('fecha_recibido', models.CharField(max_length=200)), ('fecha_entrega', models.CharField(max_length=200)), ('estado', models.CharField(max_length=200)), ('repuestos', models.CharField(max_length=200)), ('clientes', models.CharField(max_length=200)), ('precio_final', models.CharField(max_length=200)), ('cantidad', models.CharField(max_length=200)), ], ), ]
from . import utilities components_manager_mock = { "port": 8082, "bind": "0.0.0.0", "hostname": "manager", "container": "wukongsun/moon_manager:v4.3.1", "external": { "port": 30001, "hostname": "88.88.88.2" } } openstack_keystone_mock = { "url": "http://keystone:5000/v3", "user": "admin", "password": "p4ssw0rd", "domain": "default", "project": "admin", "check_token": False, "certificate": False, "external": { "url": "http://88.88.88.2:30006/v3" } } def register_consul(m): for component in utilities.COMPONENTS: m.register_uri( 'GET', 'http://consul:8500/v1/kv/{}'.format(component), json=[{'Key': component, 'Value': utilities.get_b64_conf(component)}] ) m.register_uri( 'GET', 'http://manager:30001', json={} ) m.register_uri( 'GET', 'http://keystone:5000/v3', json={} ) m.register_uri( 'POST', 'http://keystone:5000/v3/auth/tokens', headers={'X-Subject-Token': "111111111"} ) m.register_uri( 'DELETE', 'http://keystone:5000/v3/auth/tokens', headers={'X-Subject-Token': "111111111"} ) m.register_uri( 'POST', 'http://keystone:5000/v3/users?name=testuser&domain_id=default', json={"users": {}} ) m.register_uri( 'GET', 'http://keystone:5000/v3/users?name=testuser&domain_id=default', json={"users": {}} ) m.register_uri( 'POST', 'http://keystone:5000/v3/users/', json={"users": [{"id": "1111111111111"}]} )
__author__ = 'tang' from hashlib import sha256 def encode(password): gen = sha256() gen.update(password) return gen.hexdigest()
"""travellerProject URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from home import views urlpatterns = [ path('', include('home.urls')), path('user/',include('user.urls')), path('aboutus', views.aboutus, name='aboutus'), path('reference', views.reference, name='reference'), path('contact', views.contact, name='contact'), path('home/', include('home.urls')), path('gezi/',include('gezi.urls')), path('admin/', admin.site.urls), path(r'^ckeditor/', include('ckeditor_uploader.urls')), path('category/<int:id>/<slug:slug>/', views.category_gezi, name='category_gezi'), path('gezi/<int:id>/<slug:slug>/', views.gezi_detay, name='gezi_detay'), path('search/', views.gezi_search, name='gezi_search'), path('logout/', views.logout_view, name='logout_view'), path('login/', views.login_view, name='login_view'), path('signup/', views.signup_view, name='signup_view'), ] if settings.DEBUG: # new urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
valores = [3,3,3,2,2,2,1,1,1,1,] #Se hace con pila y cola def contarMasRep(lista): contar = contarMasRep(valores)
import cv2 import dropbox import time import random start_time = time.time() def take_snapshot(): number = random.randint(0,100) videoCamera = cv2.VideoCapture(0) result = True while(result): ret,frame = videoCamera.read() image_name = "img"+str(number)+".png" cv2.imwrite(image_name,frame) start_time = time.time result = False return image_name print("snapshot_taken") videoCamera.release() cv2.destroyAllWindows() def upload_image(img_name): access_token = "McfFQUW_RWwAAAAAAAAAAd0MW3V1zlnibGo96jPegh4MtIPlodRJBXpi_24uNRk9" files = img_name file_from = files file_to = "/photos/"+(img_name) dbx = dropbox.Dropbox(access_token) with open(file_from, "rb") as f: dbx.files_upload(f.read(),file_to,mode = dropbox.files.WriteMode.overwrite) print("file uploaded") def main(): while(True): if((time.time()-start_time)>= 30): name = take_snapshot() upload_image(name) main()
import argparse import numpy as np from scipy.misc import logsumexp from numpy import logaddexp LETTER_TO_INDEX = {'A':0, 'C':1, 'G':2, 'T':3, '^':4, '$':5} def build_transition_matrix(num_states, motif_len, p, q): transition_matrix = np.zeros((num_states, num_states)) transition_matrix[0, 1] = q transition_matrix[0, -2] = 1-q transition_matrix[1, 1] = 1-p transition_matrix[1, 2] = p transition_matrix[-2, -2] = 1-p transition_matrix[-2, -1] = p for i in range(motif_len): transition_matrix[2+i, 3+i] = 1 return np.log(transition_matrix) def build_emission_matrix(initial_emissions, motif_len): zero_row = np.zeros((1, 4)) zero_col = np.zeros((motif_len+4, 1)) bg_emissions = np.array([0.25, 0.25, 0.25, 0.25]) emission_matrix = np.vstack((zero_row, bg_emissions, initial_emissions, bg_emissions, zero_row)) emission_matrix = np.hstack((emission_matrix, zero_col, zero_col)) emission_matrix[0, -2] = 1 emission_matrix[-1, -1] = 1 return np.log(emission_matrix) def calculate_viterbi(num_states, seq, transition_matrix, emission_matrix): viterbi_matrix = np.full((num_states, len(seq)), fill_value=np.NINF, dtype=float) trace_matrix = np.zeros((num_states, len(seq)), dtype=int) viterbi_matrix[0, 0] = 0 for j in range(1, len(seq)): for i in range(num_states): vec = viterbi_matrix[:,j-1] + transition_matrix[:,i] viterbi_matrix[i, j] = max(vec) + emission_matrix[i, LETTER_TO_INDEX[seq[j]]] trace_matrix[i, j] = np.argmax(vec) return viterbi_matrix, trace_matrix def calculate_forward(num_states, seq, transition_matrix, emission_matrix): forward_matrix = np.full((num_states, len(seq)), fill_value=np.NINF, dtype=float) forward_matrix[0, 0] = 0 for j in range(1, len(seq)): for i in range(num_states): vec = forward_matrix[:, j-1] + transition_matrix[: ,i] forward_matrix[i, j] = logsumexp(vec) + emission_matrix[i, LETTER_TO_INDEX[seq[j]]] result = logsumexp(forward_matrix[:, -1]) return forward_matrix, result def calculate_backward(num_states, seq, transition_matrix, emission_matrix): backward_matrix = np.full((num_states, len(seq)), fill_value=np.NINF, dtype=float) backward_matrix[:, -1] = 0 for j in range(len(seq)-1, 0, -1): for i in range(num_states): vec = backward_matrix[:, j] + transition_matrix[i, :] + emission_matrix[:, LETTER_TO_INDEX[seq[j]]] backward_matrix[i, j-1] = logsumexp(vec) result = backward_matrix[0, 0] return backward_matrix, result def print_50(str1, str2): i,j = 0,0 while i < len(str1)-1: j = min(i+50, len(str1)) print(str1[i:j]) print(str2[i:j], '\n') i = j def main(): np.set_printoptions(precision=2) # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--alg', help='Algorithm (e.g. viterbi)', required=True) parser.add_argument('seq', help='A sequence over the alphabet [A,C,G,T] (e.g. ACTGGACTACGTCATGCA)') parser.add_argument('initial_emission', help='Path to emission table (e.g. initial_emission.tsv)') parser.add_argument('p', help='transition probability p (e.g. 0.01)', type=float) parser.add_argument('q', help='transition probability q (e.g. 0.5)', type=float) args = parser.parse_args() # handle input parameters initial_emissions = np.genfromtxt(args.initial_emission, delimiter='\t')[1:,] seq = '^' + args.seq + '$' seq_len = len(seq) motif_len = len(initial_emissions) motif_states = ["s"+str(i+1) for i in range(motif_len)] states = ["s", "b1"] + motif_states + ["b2", "e"] num_states = len(states) p = args.p q = args.q transition_matrix = build_transition_matrix(num_states, motif_len, p, q) emission_matrix = build_emission_matrix(initial_emissions, motif_len) if args.alg == 'viterbi': viterbi_matrix, trace_matrix = calculate_viterbi(num_states, seq, transition_matrix, emission_matrix) # traceback state sequence index = np.argmax(viterbi_matrix[:, -1]) trace = str(int(trace_matrix[index, -1])) for j in range(seq_len-2): index = int(trace[-1]) trace += str(int(trace_matrix[index, -j-2])) trace = trace[::-1] result = ''.join(['M' if int(trace[j]) > 1 and int(trace[j]) < (num_states-2) else 'B' for j in range(1, len(trace))]) print_50(result, seq[1:-1]) return result elif args.alg == 'forward': forward_matrix, result = calculate_forward(num_states, seq, transition_matrix, emission_matrix) print(result) return result elif args.alg == 'backward': backward_matrix, result = calculate_backward(num_states, seq, transition_matrix, emission_matrix) print(result) return result elif args.alg == 'posterior': forward_matrix, result1 = calculate_forward(num_states, seq, transition_matrix, emission_matrix) backward_matrix, result2 = calculate_backward(num_states, seq, transition_matrix, emission_matrix) posterior_matrix = forward_matrix + backward_matrix # traceback state sequence trace = str(int(np.argmax(posterior_matrix[:, -2]))) for j in range(1, seq_len-1): trace += str(int(np.argmax(posterior_matrix[:, -j-2]))) trace = trace[::-1] result = ''.join(['M' if int(trace[j]) > 1 and int(trace[j]) < (num_states-2) else 'B' for j in range(1, len(trace))]) print_50(result, seq[1:-1]) return result if __name__ == '__main__': main()
""" GROMACS ANALYSER (for molecular dynamics trajectories) This script is for analysing the trajectories obtained by gromacs software. It will use the MDtraj module for such analysis. There will be some flag options which the user can pick on and the files and plots will be saved in a new created directory Done by: Ruben Canadas Rodriguez """ # Let's import packages import mdtraj as md import glob, os, sys, argparse import numpy as np import matplotlib.pyplot as plt import seaborn as sns import itertools import matplotlib from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler matplotlib.rcParams.update({"font.size": 22}) plt.style.use("seaborn-pastel") __author__="Ruben Canadas" __mail__ = "rubencr14@gmail.com" __maintainer__="Ruben Canadas" __version__=1.0 class OpenFiles: def __init__(self, xtc_file, top_file, trr_file=None, file=None): self.xtc_file = xtc_file self.top_file = top_file self.trr_file = trr_file self.file = file #Any file of format: pdb,xtc,trr,dcd,binpos,Netcdf,hdf5.. def load_xtc(self): """ :param xtc_file: Trajectory in xtc file :param top_file: Topology file of such trajectory :return: trajectory loaded """ return md.load_xtc(self.xtc_file, self.top_file) def load_trr(self): return md.load_trr(self.trr_file, self.top_file) def load_trajectory(self): return md.load(self.file, self.top_file) def information(self, file): with open("trajectory_info.txt", "w") as outfile: outfile.write("Number of frames: {}\nnumber of atoms: {}\nnumber of residues: {}\number of chains: {}\n\n".format(file.n_frames, file.n_atoms, file.n_residues, file.n_chains)) def number_frames(self, file): return file.n_frames class directory_manager: def __init__(self, directory): self.directory = directory def create_directory(self): if not os.path.exists(self.directory): os.mkdir(self.directory) else: pass class TrajectoryProperties: """ This class will have methods for computing properties of a trajectory: rmsd, distances .. """ def __init__(self, traj, metric="angstrom"): self.traj = traj self.metric = metric def nanometer_to_angstrom(func): """ This will be a decoration method :param func: Function to decorate :return: """ def new_metric(self, pairs): return func(self, pairs)[0] * 10 return new_metric def traj_rmsd(self, reference_frame): return md.rmsd(self.traj, reference_frame) # @nanometer_to_angstrom def compute_distance(self, atom_pairs): """ :param atom_pairs: Each row gives the indices of two atoms involved in the interaction: np.ndarray, shape=(num_pairs, 2), dtype=int :return: distances : np.ndarray, shape=(n_frames, num_pairs), dtype=float (The distance, in each frame, between each pair of atoms) """ return md.compute_distances(self.traj, atom_pairs) @nanometer_to_angstrom # Decorator for transforming array in nanometer to array in angstroms def compute_displacements(self, atom_pairs): """ :param atom_pairs: Each row gives the indices of two atoms: np.ndarray, shape[num_pairs, 2], dtype=int :return: displacements : np.ndarray, shape=[n_frames, n_pairs, 3], dtype=float32 """ return md.displacements(self.traj, atom_pairs) @nanometer_to_angstrom #Decorator for transforming array in nanometer to array in angstroms def compute_contacts(self, residue_pairs): """ :param residue_pairs: An array containing pairs of indices (0-indexed) of residues to compute the contacts between :return: distances: np.ndarray, shape=(n_frames, n_pairs); residues_pairs: np.ndarray, shape=(n_pairs, 2) """ return md.compute_contacts(self.traj, residue_pairs) def compute_angles(self, angle_indices): """ :param angle_indices: Each row gives the indices of three atoms which together make an angle (np.ndarray, shape=(num_angles, 3), dtype=int) :return: The angles are in radians (np.ndarray, shape=[n_frames, n_angles], dtype=float) """ return md.compute_angles(self.traj, angle_indices) def compute_dihedrals(self, indices): """ :param indices: Each row gives the indices of four atoms which together make a dihedral angle (np.ndarray, shape=(n_dihedrals, 4), dtype=int) :return: dihedrals : np.ndarray, shape=(n_frames, n_dihedrals), dtype=float. The output array gives, in each frame from the trajectory, each of the n_dihedrals torsion angles. The angles are measured in radians """ return md.compute_dihedrals(self.traj, indices) def compute_hbonds(self, plot=True, color=itertools.cycle(["r","b","gold", "g"]), hbonds_to_plot=[14,15,16, 17]): """ """ hbonds = md.baker_hubbard(self.traj, periodic=False) label = lambda hbond: "%s -- %s" % (self.traj.topology.atom(hbond[0]), self.traj.topology.atom(hbond[2])) dist = md.compute_distances(self.traj, hbonds[:, [0, 2]], periodic=False) for hbond in hbonds: print(label(hbond)) if plot: for i in hbonds_to_plot: plt.hist(dist[:,i], color=next(color), label=label(hbonds[i]), alpha=0.5) plt.legend() plt.ylabel("Freq", labelpad=20) plt.xlabel("donor-receptor distance [nm]", labelpad=20) plt.show() def pca_analysis(self, scale=True, xlabel="PC1", ylabel="PC2", fontsize=20, labelpad=20): """ """ pca = PCA(n_components=2) self.traj.superpose(self.traj, 0) if scale: reduced = pca.fit_transform(StandardScaler().fit_transform(self.traj.xyz.reshape(self.traj.n_frames, self.traj.n_atoms * 3))) else: reduced = reduced = pca.fit_transform(self.traj.xyz.reshape(self.traj.n_frames, self.traj.n_atoms * 3)) plt.scatter(reduced[:,0], reduced[:,1], marker="o", c=self.traj.time) plt.xlabel(xlabel, fontsize=fontsize, labelpad=labelpad) plt.ylabel(ylabel, fontsize=fontsize, labelpad=labelpad) cbar = plt.colorbar(); cbar.set_label("Time [ps]") plt.show() def compute_sasa(self, radius=0.14, mode="residue"): """ Compute the solvent accessible surface area of each atom or residue in each simulation frame :param radius: The radius of the probe, in nm :param mode: In mode == atom the extracted areas are resolved peratom In mode == residue, this is consolidated down to the per-residue SASA by summing over the atoms in each residue :return: The accessible surface area of each atom or residue in every frame. If mode == atom, the second dimension will index the atoms in the trajectory, whereas if mode == residue, the second dimension will index the residues. ( np.array, shape=(n_frames, n_features) """ return md.shrake_rupley(self.traj, probe_radius = radius, mode=mode) def compute_radius_of_gyration(self): """ Compute the radius of gyration for every frame. :return: Rg for every frame (ndarray) """ return md.compute_rg(self.traj) def compute_inertia_tensor(self): """ Compute the inertia tensor of a trajectory. :return: I_ab: np.ndarray, shape=(traj.n_frames, 3, 3), dtype=float64 (Inertia tensors for each frame) """ return md.compute_inertia_tensor(self.traj) class Plotter: def __init__(self, x_axis, y_axis, figure_name, style="seaborn-pastel", z_axis=None, plot=False, save=True): self.x_axis = x_axis self.y_axis = y_axis self.z_axis = z_axis self.plot = plot self.save = save self.path = "." self.dpis = 300 self.title = "Old triple mutant" self.x_label = "Steps" self.y_label ="Contact His195-Ser15" self.z_label=None self.cmap = "plasma" self.figure_name = figure_name plt.style.use(style) def scatter_plot(self): plt.plot(self.x_axis, self.y_axis) plt.title(self.title); plt.xlabel(self.x_label); plt.ylabel(self.y_label) if self.z_axis is not None: bar = plt.colorbar(); bar.set_label(self.z_label) if self.plot: plt.show() if self.save: plt.savefig(os.path.join(self.path, "md_{}_plot.png".format(self.figure_name)),dpi=self.dpis) plt.clf() def box_plot(self): sns.boxplot(self.y_axis, orient="v") plt.title(self.title); plt.xlabel(self.x_label, labelpad=20); plt.ylabel(self.y_label, labelpad=20) if self.plot:plt.show() if self.save: plt.savefig(os.path.join(self.path, "md_{}_boxplot.png".format(self.figure_name)), dpi=self.dpis) plt.clf() def superpose_plots(self, Y_array): """ :param Y_array: array of Y arrays for different trajectories (for instance when comparing the effect between mutants) :return: """ #TODO: we have to load another trajectory so as to be compared for array in Y_array: plt.plot(self.x_axis, array) if self.plot: plt.show() if self.save: plt.savefig(self.path, dpi=self.dpis) plt.clf() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("xtc", type=str, help="Trajectory in .xtc format") parser.add_argument("top", type=str, help="Topology file (normally in .gro format)") parser.add_argument("-info", "--info", help="Save informfation of trajectory in file", action="store_true") parser.add_argument("-distance","--distance", type=int, help="Two atoms (number) to compute it distance along the trajectory", nargs=2) parser.add_argument("-contact", "--contact", type=int, help="Two residues (number of residues) to compute its contacts along the trajectory", nargs=2) parser.add_argument("-displacement", "--displacement", type=int, help="Atom pair for computing its displacements along the tractory", nargs=2) parser.add_argument("-gyration", "--gyration", help="Compute the gyration at each trajectory frame", action="store_true") parser.add_argument("-sasa", "--sasa",help="Compute the solvent accessible surface area of each atom or residue in each simulation frame (shrake rupley method)", action="store_true") parser.add_argument("-plot_style","--plot_style", type=str, help="Style of the plots (default=ggplot)", default="ggplot") parser.add_argument("-plot", "--plot", help="Show plots", action="store_true") parser.add_argument("-save_plot", "--save_plot", help="Save plots in directory", action="store_true") parser.add_argument("-rep", "--replicas", help="Compute mean of the replicas", type=str, nargs="+") parser.add_argument("-p", "--pca", help="PCA analysis of the trajectory", action="store_true") parser.add_argument("-hb", "--hbond", help="Compute hbond distribution", action="store_true") args = parser.parse_args() return args.xtc, args.top, args.distance, args.contact, args.displacement, args.gyration , args.sasa, \ args.plot_style, args.plot, args.save_plot, args.replicas, args.pca, args.hbond def main(): xtc, top, distance, contact, displacement, gyration, sasa, plot_style, plot, save_plot, replicas, pca, hbond = parse_args() xtc = OpenFiles(xtc, top) traj = xtc.load_xtc() prop = TrajectoryProperties(traj) number_of_frames = xtc.number_frames(traj) x_axis = np.arange(0,number_of_frames,1) if pca: prop.pca_analysis() if hbond: prop.compute_hbonds() if distance is not None: distances = prop.compute_distance([distance]) pl = Plotter(x_axis, distances, figure_name="distance", plot=plot, save=save_plot) pl.scatter_plot() pl.box_plot() print("distances ", distances) if contact is not None: contacts = prop.compute_contacts([contact]) pl = Plotter(x_axis, contacts, figure_name="contact", plot=plot, save=save_plot) pl.scatter_plot() pl.box_plot() if sasa: sasa =prop.compute_sasa() print("sasa ", sasa) if __name__=="__main__": main()
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class RefundSubFee(object): def __init__(self): self._refund_charge_fee = None self._switch_fee_rate = None @property def refund_charge_fee(self): return self._refund_charge_fee @refund_charge_fee.setter def refund_charge_fee(self, value): self._refund_charge_fee = value @property def switch_fee_rate(self): return self._switch_fee_rate @switch_fee_rate.setter def switch_fee_rate(self, value): self._switch_fee_rate = value def to_alipay_dict(self): params = dict() if self.refund_charge_fee: if hasattr(self.refund_charge_fee, 'to_alipay_dict'): params['refund_charge_fee'] = self.refund_charge_fee.to_alipay_dict() else: params['refund_charge_fee'] = self.refund_charge_fee if self.switch_fee_rate: if hasattr(self.switch_fee_rate, 'to_alipay_dict'): params['switch_fee_rate'] = self.switch_fee_rate.to_alipay_dict() else: params['switch_fee_rate'] = self.switch_fee_rate return params @staticmethod def from_alipay_dict(d): if not d: return None o = RefundSubFee() if 'refund_charge_fee' in d: o.refund_charge_fee = d['refund_charge_fee'] if 'switch_fee_rate' in d: o.switch_fee_rate = d['switch_fee_rate'] return o
from django.shortcuts import render, redirect, HttpResponse from .models import AgileCard, AgileCardForm from django.contrib.auth.decorators import login_required @login_required def cards(request): if request.method == 'POST': new_card = AgileCardForm(request.POST) new_card.save() return redirect('/') else: active_cards = AgileCard.objects.filter(active=True).order_by('-modified_at') context = {'todo': active_cards.filter(state='TODO').order_by('-modified_at'), 'in_progress': active_cards.filter(state='IN_PROGRESS').order_by('-modified_at'), 'done': active_cards.filter(state='DONE'), 'new_card': AgileCardForm()} return render(request, 'cards.html', context) @login_required def update(request, id=0, state=''): card = AgileCard.objects.get(id=id) if card and state: card.state = state card.save() return redirect('/') else: return HttpResponse('card not found or state not provided') @login_required def delete(request, id=0): card = AgileCard.objects.get(id=id) if card: card.active = False card.save() return redirect('/') else: return HttpResponse('card not found') @login_required def edit(request, id): card = AgileCard.objects.get(id=id) if request.method == 'POST': card.content = request.POST['content'] card.state = request.POST['state'] card.save() return redirect('/') else: context = { 'card': card } return render(request, 'edit_card.html', context)
from __future__ import print_function import pandas as pd import numpy as np from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding, Dropout, LSTM from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import roc_auc_score data = pd.read_csv('utterance-data-with-labels-binary.csv') utterances = data['TranscribedUtterance'] data = data.drop(['IsInstructionalStatement','IsDisciplinaryUtterance','IsDisciplinaryStatement','IsEvaluationFollowupIncluded','IsEvaluationValencePositive','CombinedAuthCogUptake','Uptake','IsInstructionalQuestion', 'IsDisciplinaryQuestion','IsStudentResponsePresent','IsSerialQuestion'], axis=1) respList = np.array([list(data.iloc[0,4:])]) for i in range(1,len(data)): respList = np.append(respList, [list(data.iloc[i,4:])], axis=0) docs = list(utterances.values) # vectorize bag of words vectorizer = CountVectorizer(min_df=0, lowercase=False) vectorizer.fit(docs) docs2 = vectorizer.transform(docs).toarray() info = [] result=[] for i in range(7): newList = respList[:,i] #one class at a time class_name = data.columns[4+i] print('----------------->>> STARTING: ', class_name, ' CLASS <<<------') X_train, X_test, y_train, y_test = train_test_split(docs2, newList, test_size=0.2) input_dim = X_train.shape[1] model = Sequential() model.add(Dense(64, input_dim=input_dim, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(32, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) batchsz = 4 model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy']) print('Train...') model.fit(X_train, y_train, batch_size=batchsz, epochs=20, validation_data=(X_test, y_test), shuffle=True) score, acc = model.evaluate(X_test, y_test,batch_size=batchsz) y_pred = model.predict(X_test) result.append([[class_name,' score:', score],['accuracy:', acc],['AUC:', roc_auc_score(y_test, y_pred, multi_class='ovr')]]) print("") print("") print(result) print("") print("") exit()
import random aa = [0,0,0,0] # 4칸 리스트 # C사용할 때는 메모리 확보를 해놔야 함 for i in range(4): num = random.randint(0,99) aa[i] = num print(aa)
# -*- coding: utf-8 -*- """ Created on Sat Jul 20 14:25:52 2019 @author: VAIBHAV """ import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('Salary_Data.csv') x = dataset.iloc[:,:-1].values y = dataset.iloc[:,1].values #SPLIT DATASET INTO TRAINING AND TEST SET from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=1/3,random_state=0) #FEATURES SCALING """from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() x_train = sc_x.fit_transform(x_train) x_test = sc_x.transform(x_test)""" #FITTINF THE SIMPLE LINEAR REGRESSION TO TRAINING SET from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(x_train, y_train) #PREDICTING THE TEST SET RESULT y_pred = regressor.predict(x_test) #PLOTTING THE TRAINING SET plt.scatter(x_train, y_train, color='red') plt.plot(x_train, regressor.predict(x_train)) plt.xlabel('Experiance') plt.ylabel('Salary') plt.title('Experiance vs Salary') plt.show() #PLOTTING FOR TEST SET plt.scatter(x_test, y_test, color='red') plt.plot(x_train, regressor.predict(x_train)) plt.xlabel('Experience') plt.ylabel('Salary') plt.title('Experiance vs Salary') plt.show()
from django.shortcuts import render from django.shortcuts import HttpResponse from django.shortcuts import redirect # Create your views here. accountInfo = {'admin':'123', 'Manchester':'921'} onlineAccount = [] def gontoLogin(request): info = {} if 'account' in request.COOKIES: info['account'] = request.get_signed_cookie('account', salt='&1`)g@{a#4') if 'password' in request.COOKIES: info['password'] = request.get_signed_cookie('password', salt='&1`)g@{a#4') return render(request, 'login.html', info) def gontoHome(request): if request.POST: info = {} info['account'] = request.POST.get('account', None) info['password'] = request.POST.get('password', None) if info['account'] in accountInfo and info['password'] == accountInfo[info['account']]: # if info['account'] not in onlineAccount: rep = render(request, 'home.html', info) rep.set_signed_cookie('account', info['account'], salt='&1`)g@{a#4') rep.set_signed_cookie('password', info['password'], salt='&1`)g@{a#4') request.session['account'] = info['account'] request.session['password'] = info['password'] onlineAccount.append(info['account']) return rep return redirect('/', {'msg':'用户名或密码错误'}) return render(request, 'login.html', {'msg':'用户名或密码错误'}) return redirect('/') def logout(request): del request.session['account'] del request.session['password'] return redirect('/')
import decomp_network import plot_pf_output pflotran_exe='../pflotran-interface/src/pflotran/pflotran' simlength=365 pools = [ decomp_network.decomp_pool(name='cellulose',CN=50,constraints={'initial':1e2},kind='immobile'), decomp_network.decomp_pool(name='HRimm',constraints={'initial':1e-20},kind='immobile'), decomp_network.decomp_pool(name='DOM1',CN=50,constraints={'initial':1e-30},kind='primary'), decomp_network.decomp_pool(name='H+',kind='primary',constraints={'initial':'4.0 P'}), decomp_network.decomp_pool(name='O2(aq)',kind='primary',constraints={'initial':1e-12}), decomp_network.decomp_pool(name='HCO3-',kind='primary',constraints={'initial':'400e-6 G CO2(g)'}), decomp_network.decomp_pool(name='Fe+++',kind='primary',constraints={'initial':'.37e-3 M Fe(OH)3'}), decomp_network.decomp_pool(name='Fe++',kind='primary',constraints={'initial':'0.37e-3'}), decomp_network.decomp_pool(name='NH4+',kind='primary',constraints={'initial':1e-15}), # SOMDecomp sandbox requires this decomp_network.decomp_pool(name='Tracer',kind='primary',constraints={'initial':1e-15}), # Just to accumulate CO2 loss decomp_network.decomp_pool(name='CH4(aq)',kind='primary',constraints={'initial':1e-15}), decomp_network.decomp_pool(name='Acetate-',kind='primary',constraints={'initial':1e-15}), decomp_network.decomp_pool(name='CO2(g)',kind='gas'), decomp_network.decomp_pool(name='O2(g)',kind='gas'), decomp_network.decomp_pool(name='CO2(aq)',kind='secondary'), decomp_network.decomp_pool(name='OH-',kind='secondary'), decomp_network.decomp_pool(name='FeCO3+',kind='secondary'), decomp_network.decomp_pool(name='Fe(OH)4-',kind='secondary'), decomp_network.decomp_pool(name='Acetic_acid(aq)',kind='secondary'), decomp_network.decomp_pool(name='FeCH3COO+',kind='secondary'), # decomp_network.decomp_pool(name='FeCO3(aq)',kind='secondary'), decomp_network.decomp_pool(name='Fe(OH)3',rate='1.d-5 mol/m^2-sec',constraints={'initial':'1.75d-1 1.d2 m^2/m^3'},kind='mineral'), # decomp_network.decomp_pool(name='Goethite',rate='1.d-5 mol/m^2-sec',constraints={'initial':'1.75d-2 1.d1 m^2/m^3'},kind='mineral'), # decomp_network.decomp_pool(name='Fe',rate='1.d-7 mol/m^2-sec',constraints={'initial':'1.0e-6 1. m^2/m^3'},kind='mineral'), decomp_network.decomp_pool(name='Fe(OH)2',rate='1.d-7 mol/m^2-sec',constraints={'initial':'0.0e-20 1.e-10 m^2/m^3'},kind='mineral'), decomp_network.decomp_pool(name='Rock(s)',rate='0.0 mol/m^2-sec',constraints={'initial':'0.5 5.0e3 m^2/m^3'},kind='mineral'), decomp_network.decomp_pool(name='>Carboxylate-',kind='surf_complex',mineral='Rock(s)',site_density=1000.0,complexes=['>Carboxylic_acid']), ] pools_lowFe=pools.copy() for n,p in enumerate(pools_lowFe): if p['name']=='Fe(OH)3': pools_lowFe[n]=pools_lowFe[n].copy() pools_lowFe[n].update(constraints={'initial':'0.0 1.e2 m^2/m^3'}) pools_highO2=pools.copy() for n,p in enumerate(pools_highO2): if p['name']=='O2(aq)': pools_highO2[n]=pools_highO2[n].copy() pools_highO2[n].update(constraints={'initial':'1.0e1'}) # if p['name']=='Fe+++': # pools_lowFe[n]=pools_lowFe[n].copy() # pools_lowFe[n].update(constraints={'initial':'1.d-20'}) # Herndon et al 2015, BGC: Fe(III) average concentration 0.37 mmol/L (same as mM). SO4- was 0.07, NO3- was 0.03. # Fe(III) was 60% of dissolved Fe # ** What form was it in? Does it just dissolve or was it released/complexed by microbial activity? # DOC 5-15 mmol/L # Fe probably present as iron oxide precipitates or Fe-OM complexes, not primary minerals # # Herndon et al 2015, JGR-B: Organic acids 12% of WEOC in organic layer. formate 48.6 umol/ g SOC; acetate 66, propionate 0.76 # Each mol of acetate produces 1 mol of methane or 8 mol of Fe(II) # Ferrihydrite specific surface area ~250 m2/g (Kaiser and Guggenberger 2008 https://doi.org/10.1046/j.1365-2389.2003.00544.x) # # Major issue so far is balancing pH: Fe(III) release from mineral sucks up a lot of H+, and I'm not sure whether it's being replenished from # some reaction involving Fe++ # Beth: There should be a lot of solid OM functional groups, probably carboxylate/carboxylic acid that exchange protons and buffer pH in these soils # Fe(II) is probably sorbing onto OM, or staying dissolved # Fe(III) should precipitate in this system, so if it's dissolved it would be complexed with something # Per Persson has papers on Fe partitioning in soils as a function of pH # Derek Lovely has done a lot of Fe reduction lab work to look at reduction rates # Kinetics of water-rock interaction (textbook) has lots of rate constants. Look for chapter by Eric Rodin # Aaron Thompson does work on Fe oxides and redox fluctuations hydrolysis = decomp_network.reaction(name='Hydrolysis',reactant_pools={'cellulose':1.0},product_pools={'DOM1':1.0}, rate_constant=1e-1,rate_units='y', # Jianqiu Zheng et al., 2019: One third of fermented C is converted to CO2 inhibition_terms=[decomp_network.inhibition(species='DOM1',type='MONOD',k=1e-5)]) # Calculating these as per unit carbon, so dividing by the 6 carbons in a glucose # C6H12O6 + 4 H2O -> 2 CH3COO- + 2 HCO3- + 4 H+ + 4 H2 fermentation = decomp_network.reaction(name='fermentation',reactant_pools={'DOM1':6/6},product_pools={'Acetate-':2/6,'HCO3-':2/6,'H+':4/6+4*2/6,'Tracer':2/6}, # balancing pH of FeIII release requires an extra 5.5 H+ to be released here rate_constant=1e-10,reactiontype='MICROBIAL', # Jianqiu Zheng et al., 2019: One third of fermented C is converted to CO2 inhibition_terms=[decomp_network.inhibition(species='O2(aq)',k=6.25e-8,type='MONOD'),decomp_network.inhibition(species='Acetate-',k=6.25e-5,type='MONOD')], monod_terms=[decomp_network.monod(species='DOM1',k=1e-5,threshold=1.1e-15)]) # CH2O + H2O -> CO2 + 4H+ + 4 e- # O2 + 4H+ + 4 e- -> 2H2O DOM_resp = decomp_network.reaction(name='DOM aerobic respiration',reactant_pools={'DOM1':1.0,'O2(aq)':1.0},product_pools={'HCO3-':1.0,'H+':1.0,'Tracer':1.0}, monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-5,threshold=1.1e-12),decomp_network.monod(species='DOM1',k=1e-8,threshold=1.1e-14)], rate_constant=1.0e-9,reactiontype='MICROBIAL') # C2H3O2- + 2 H2O -> 2 CO2 + 7 H+ + 8 e- # 2 O2 + 8 H+ + 8 e- -> 4 H2O acetate_resp = decomp_network.reaction(name='Acetate aerobic respiration',reactant_pools={'Acetate-':1.0,'O2(aq)':2.0},product_pools={'HCO3-':2.0,'H+':2.0,'Tracer':2.0}, monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-5,threshold=1.1e-12),decomp_network.monod(species='Acetate-',k=1e-8,threshold=1.1e-14)], rate_constant=1.0e-9,reactiontype='MICROBIAL') # C2H3O2- + 2 H2O -> 2 CO2 + 7 H+ + 8 e- # 8 Fe+++ + 8 e- -> 8 Fe++ Fe_reduction = decomp_network.reaction(name='Fe reduction',reactant_pools={'Acetate-':1.0,'Fe+++':8.0},product_pools={'HCO3-':2.0,'Fe++':8.0,'H+':9.0,'Tracer':2.0}, monod_terms=[decomp_network.monod(species='Acetate-',k=2e-3,threshold=1.1e-15),decomp_network.monod(species='Fe+++',k=1.3e-12,threshold=1.1e-15)], inhibition_terms=[decomp_network.inhibition(species='O2(aq)',k=6.25e-8,type='MONOD')], rate_constant=2e-10,reactiontype='MICROBIAL') # C2H3O2- + H+ -> CH4 + HCO3- + H+ Methanogenesis = decomp_network.reaction(name='Methanogenesis',reactant_pools={'Acetate-':1.0},product_pools={'CH4(aq)':1.0,'HCO3-':1.0, 'Tracer':1.0}, monod_terms=[decomp_network.monod(species='Acetate-',k=2e-3,threshold=1.1e-15), ], inhibition_terms=[decomp_network.inhibition(species='O2(aq)',k=6.25e-8,type='MONOD'),decomp_network.inhibition(species='Fe+++',k=6.25e-9,type='MONOD')], rate_constant=1e-10,reactiontype='MICROBIAL') fermentation_network = decomp_network.decomp_network(pools=pools_lowFe,reactions=[hydrolysis,fermentation,DOM_resp,acetate_resp,Fe_reduction]) fermentation_network_Fe = decomp_network.decomp_network(pools=pools,reactions=[hydrolysis,fermentation,DOM_resp,acetate_resp,Fe_reduction,Methanogenesis]) fermentation_network_Fe_CH4 = decomp_network.decomp_network(pools=pools_lowFe,reactions=[hydrolysis,fermentation,DOM_resp,acetate_resp,Fe_reduction,Methanogenesis]) fermentation_network_highO2 = decomp_network.decomp_network(pools=pools_highO2,reactions=[hydrolysis,fermentation,DOM_resp,acetate_resp,Fe_reduction,Methanogenesis]) result,units=decomp_network.PF_network_writer(fermentation_network).run_simulation('SOMdecomp_template.txt','fermentation',pflotran_exe,print_output=False,length_days=simlength) result_Fe,units_Fe=decomp_network.PF_network_writer(fermentation_network_Fe).run_simulation('SOMdecomp_template.txt','fermentation',pflotran_exe,print_output=False,length_days=simlength) result_Fe_CH4,units_Fe_CH4=decomp_network.PF_network_writer(fermentation_network_Fe_CH4).run_simulation('SOMdecomp_template.txt','fermentation',pflotran_exe,print_output=False,length_days=simlength) result_highO2,units_highO2=decomp_network.PF_network_writer(fermentation_network_highO2).run_simulation('SOMdecomp_template.txt','fermentation',pflotran_exe,print_output=False,length_days=simlength) from pylab import * result,units=plot_pf_output.convert_units(result,units,'M') result_Fe,units_Fe=plot_pf_output.convert_units(result_Fe,units_Fe,'M') result_Fe_CH4,units_Fe_CH4=plot_pf_output.convert_units(result_Fe_CH4,units_Fe_CH4,'M') result_highO2,units_highO2=plot_pf_output.convert_units(result_highO2,units_highO2,'M') figure('Network diagram',figsize=(11.8,4.8));clf() ax=subplot(121) decomp_network.draw_network(fermentation_network_Fe_CH4,omit=['secondary','surf_complex','NH4+','Rock(s)'],arrowstyle='-|>') title('Decomposition network diagram (without complexes)') ax=subplot(122) decomp_network.draw_network(fermentation_network_Fe_CH4,omit=['NH4+','Rock(s)'],arrowstyle='-|>') title('Decomposition network diagram (with aqueous complexes)') figure('Cellulose sim');clf() subplot(311) # ax.set_yscale('log') handles=[] for pool in ['cellulose','Total CH4(aq)']: l=plot(result[pool],label=pool)[0] plot(result_Fe[pool],ls='--',c=l.get_color()) plot(result_Fe_CH4[pool],ls=':',c=l.get_color()) plot(result_highO2[pool],ls='-.',c=l.get_color()) handles.append(l) l=plot(result['Total Tracer']+result['HRimm'],label='CO2 produced')[0] handles.append(l) plot(result_Fe['Total Tracer']+result_Fe['HRimm'],c=l.get_color(),ls='--') plot(result_Fe_CH4['Total Tracer']+result_Fe_CH4['HRimm'],c=l.get_color(),ls=':') plot(result_highO2['Total Tracer']+result_highO2['HRimm'],c=l.get_color(),ls='-.') # handles.append(Line2D([0,0],[0,0],color='k',ls='-',label='Aerobic')) # handles.append(Line2D([0,0],[0,0],color='k',ls='--',label='With Fe(III)')) # handles.append(Line2D([0,0],[0,0],color='k',ls=':',label='With methanogenesis')) # handles.append(Line2D([0,0],[0,0],color='k',ls='-.',label='Anaerobic')) legend(handles=handles,fontsize='small',ncol=2) title('Concentrations') ylabel('Concentration (M)') xlabel('Time (days)') # figure('Cellulose sim pH and log',figsize=(6,8));clf() subplot(313) l=plot(-log10(result['Free H+']),label='Anaerobic')[0] plot(-log10(result_highO2['Free H+']),c=l.get_color(),ls='-.',label='Aerobic') plot(-log10(result_Fe['Free H+']),c=l.get_color(),ls='--',label='With Fe(III)') plot(-log10(result_Fe_CH4['Free H+']),c=l.get_color(),ls=':',label='With methanogenesis') legend(fontsize='small') title('pH') ylabel('pH') xlabel('Time (days)') ax=subplot(312) ax.set_yscale('log') # for pool in ['Free DOM1','Free Acetate-']: # l=plot(result[pool],label=pool)[0] # # plot(result_Fe[pool],ls='--',c=l.get_color()) # # plot(result_Fe_CH4[pool],ls=':',c=l.get_color()) # plot(result_highO2[pool],ls='-.',c=l.get_color()) # # title('Concentrations (log scale)') # ylabel('Concentration (M)') # xlabel('Time (days)') # legend(fontsize='small',ncol=1) # ylim(1e-15,1e-1) l=plot(result.index.values[:-1],diff(result['Total CH4(aq)'])/diff(result.index.values),label='CH4')[0] plot(result_highO2.index.values[:-1],diff(result_highO2['Total CH4(aq)'])/diff(result_highO2.index.values),ls='-.',c=l.get_color()) plot(result_Fe.index.values[:-1],diff(result_Fe['Total CH4(aq)'])/diff(result_Fe.index.values),ls='--',c=l.get_color()) plot(result_Fe_CH4.index.values[:-1],diff(result_Fe_CH4['Total CH4(aq)'])/diff(result_Fe_CH4.index.values),ls=':',c=l.get_color()) # l=plot(result.index.values[:-1],diff(result['Total Fe++'])/diff(result.index.values),label='Fe(II)')[0] l=plot(result_Fe.index.values[:-1],diff(result_Fe['Total Fe++'])/diff(result_Fe.index.values),ls='--',label='Fe++')[0] plot(result_highO2.index.values[:-1],diff(result_highO2['Total Fe++'])/diff(result_highO2.index.values),ls='-.',c=l.get_color()) # plot(result_Fe_CH4.index.values[:-1],diff(result_Fe_CH4['Total Fe++'])/diff(result_Fe_CH4.index.values),ls=':',c=l.get_color()) l=plot(result.index.values[:-1],diff(result['Total Tracer']+result['HRimm'])/diff(result.index.values),label='CO2')[0] # plot(result_highO2.index.values[:-1],diff(result_highO2['Total Tracer']+result_highO2['HRimm'])/diff(result_highO2.index.values),ls='-.',c=l.get_color()) plot(result_Fe.index.values[:-1],diff(result_Fe['Total Tracer']+result_Fe['HRimm'])/diff(result_Fe.index.values),ls='--',c=l.get_color()) plot(result_Fe_CH4.index.values[:-1],diff(result_Fe_CH4['Total Tracer']+result_Fe_CH4['HRimm'])/diff(result_Fe_CH4.index.values),ls=':',c=l.get_color()) title('Production rates') ylabel('Rate (M/day)') xlabel('Time (days)') legend(fontsize='small') tight_layout() # CTC network with fermentation simlength=365*10 # CTC decomposition network decomp_network_CTC=decomp_network.decomp_network() decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='SOIL1',CN= 12.,constraints={'initial':1e-10},kind='immobile') ) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='SOIL2',CN= 12.,constraints={'initial':1e-10},kind='immobile')) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='SOIL3',CN= 10.,constraints={'initial':1e-10},kind='immobile')) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='SOIL4',CN= 10.,constraints={'initial':1e-10},kind='immobile')) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='LITR1',constraints={'initial':1e2},initCN=20,kind='immobile')) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='LITR2',constraints={'initial':1e-10},initCN=20,kind='immobile') ) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='LITR3',constraints={'initial':1e-10},initCN=20,kind='immobile')) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='CWD',constraints={'initial':1e-10},initCN=20,kind='immobile')) decomp_network_CTC.add_pool(decomp_network.decomp_pool(name='HRimm',constraints={'initial':1e-10},kind='immobile')) for pool in pools[1:]: decomp_network_CTC.add_pool(pool) # CWD decomposition to litter decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'CWD':1.0},product_pools={'LITR2':0.76,'LITR3':0.24}, rate_constant=0.00010,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)], name='CWD fragmentation')) # Litter decomposition decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'LITR1':1.0},product_pools={'SOIL1':0.61,'HCO3-':1-0.61}, rate_constant=1.204,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='LITR1 decomposition',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'LITR2':1.0},product_pools={'SOIL2':0.45,'HCO3-':1-0.45}, rate_constant=0.0726,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='LITR2 decomposition',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'LITR3':1.0},product_pools={'SOIL3':0.71,'HCO3-':1-0.71}, rate_constant=0.0141,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='LITR3 decomposition',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) # SOM decomposition decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'SOIL1':1.0},product_pools={'SOIL2':0.72,'HCO3-':1-0.72}, rate_constant=0.0726,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='SOIL1 decomp',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'SOIL2':1.0},product_pools={'SOIL3':0.54,'HCO3-':1-0.54}, rate_constant=0.0141,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='SOIL2 decomp',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'SOIL3':1.0},product_pools={'SOIL4':0.45,'HCO3-':1-0.45}, rate_constant=0.00141,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='SOIL3 decomp',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) decomp_network_CTC.add_reaction(decomp_network.reaction(reactant_pools={'SOIL4':1.0},product_pools={'HCO3-':1.0}, rate_constant=0.0001,rate_units='1/d',turnover_name='RATE_DECOMPOSITION',name='SOIL4 decomp',reactiontype='SOMDECOMP', monod_terms=[decomp_network.monod(species='O2(aq)',k=1e-8,threshold=1e-12)])) decomp_network_CTC.add_reaction(DOM_resp) decomp_network_CTC.add_reaction(Fe_reduction) decomp_network_CTC.add_reaction(acetate_resp) decomp_network_CTC.add_reaction(fermentation) decomp_network_CTC.add_reaction(Methanogenesis) decomp_network_CTC.add_reaction( decomp_network.reaction(name='hydrolysis',reactant_pools={'LITR1':1.0},product_pools={'DOM1':1.0}, rate_constant=1e-1,rate_units='y', # Jianqiu Zheng et al., 2019: One third of fermented C is converted to CO2 inhibition_terms=[decomp_network.inhibition(species='DOM1',type='MONOD',k=1e-5),decomp_network.inhibition(species='O2(aq)',k=6.25e-8,type='MONOD')])) CTC_result,CTC_units=decomp_network.PF_network_writer(decomp_network_CTC).run_simulation('SOMdecomp_template.txt','CTC',pflotran_exe,length_days=simlength) # Run with low Fe mineral concentration to cut off Fe reduction pathway decomp_network_CTC_lowFe=decomp_network_CTC.copy() decomp_network_CTC_lowFe.nodes['Fe(OH)3']['constraints']={'initial':'0.0d-5 1. m^2/m^3'} CTC_result_lowFe,CTC_units_lowFe=decomp_network.PF_network_writer(decomp_network_CTC_lowFe).run_simulation('SOMdecomp_template.txt','CTC',pflotran_exe,length_days=simlength) # Run with abundant oxygen so aerobic CTC reactions will proceed decomp_network_CTC_highO2=decomp_network_CTC.copy() decomp_network_CTC_highO2.nodes['O2(aq)']['constraints']={'initial':1.0e1} CTC_result_highO2,CTC_units_highO2=decomp_network.PF_network_writer(decomp_network_CTC_highO2).run_simulation('SOMdecomp_template.txt','CTC',pflotran_exe,length_days=simlength) figure('CTC network');clf() decomp_network.draw_network(decomp_network_CTC,omit=['secondary','Rock(s)','NH4+'],arrowstyle='-|>') figure('CTC results');clf() CTC_result,CTC_units=plot_pf_output.convert_units(CTC_result,CTC_units,'M') CTC_result_highO2,CTC_units_highO2=plot_pf_output.convert_units(CTC_result_highO2,CTC_units_highO2,'M') CTC_result_lowFe,CTC_units_lowFe=plot_pf_output.convert_units(CTC_result_lowFe,CTC_units_lowFe,'M') subplot(311) handles=[] for pool in ['LITR1C','SOIL1','SOIL2','SOIL3','SOIL4']: l=plot(CTC_result_highO2[pool],label=pool,ls='-.')[0] plot(CTC_result[pool],ls='--',c=l.get_color()) plot(CTC_result_lowFe[pool],ls=':',c=l.get_color()) handles.append(l) handles.append(Line2D([0],[0],c='k',ls='-.',label='Aerobic')) handles.append(Line2D([0],[0],c='k',ls='--',label='Anaerobic, high Fe(III)')) handles.append(Line2D([0],[0],c='k',ls=':',label='Anaerobic, low Fe(III)')) legend(handles=handles,fontsize='small',ncol=2) ylabel('Concentration (M)') xlabel('Time (days)') title('CTC SOM pools') subplot(313) l=plot(-log10(CTC_result['Free H+']),label='With Fe(III)')[0] plot(-log10(CTC_result_highO2['Free H+']),c=l.get_color(),ls='-.',label='Aerobic') plot(-log10(CTC_result_lowFe['Free H+']),c=l.get_color(),ls=':',label='Low Fe(III)') legend(fontsize='small') title('pH') ylabel('pH') xlabel('Time (days)') subplot(312) l=plot(CTC_result_highO2['HRimm']+CTC_result_highO2['Total Tracer'],label='CO2',ls='-.')[0] plot(CTC_result['HRimm']+CTC_result['Total Tracer'],c=l.get_color(),ls='--') plot(CTC_result_lowFe['HRimm']+CTC_result_lowFe['Total Tracer'],c=l.get_color(),ls=':') l=plot(CTC_result_highO2['Total CH4(aq)'],label='CH4',ls='-.')[0] plot(CTC_result['Total CH4(aq)'],ls='--',c=l.get_color()) plot(CTC_result_lowFe['Total CH4(aq)'],ls=':',c=l.get_color()) title('Cumulative production') ylabel('Production (M)') xlabel('Time (days)') legend(fontsize='small') tight_layout() figure('Log concentrations');clf() ax=subplot(111) ax.set_yscale('log') for pool in ['Total DOM1','Free Acetate-','Total Fe+++','Total Fe++','Total CH4(aq)']: l=plot(CTC_result[pool],label=pool)[0] # plot(result_Fe[pool],'--',color=l.get_color()) ylabel('Concentration (M)') xlabel('Time (days)') title('Log aqueous concentrations') ax.set_xlim(left=-10,right=365) ax.set_ylim(bottom=1e-12) legend(fontsize='small') tight_layout() show()
import re from os import path import glob import chardet from .base import Base class Source(Base): def __init__(self, vim): super().__init__(vim) self.name = 'latex-bib' self.filetypes = ['tex'] self.input_pattern = r'(\\cite{|\\citep{|\\citet{)[^"#\'()={}%\\]*?' self.input_pattern_re = re.compile( r'(\\cite{|\\citep{|\\citet{)([^"#\'()={}%\\]*?,)*' ) self.mark = '[bib]' self.cite_key_re = re.compile(r'{.+,') self.bibitem_re = re.compile(r'\\bibitem(?:\[\d+\])?\{(.+?)\}') self.bib_encs = {} def get_complete_position(self, context): if not self.input_pattern_re.search(context['input']): return -1 # Find complete position; the match before and nearest to cursor. cursor_pos = self.vim.call('getcurpos')[2] match_pos = 0 while 1: match = self.input_pattern_re.search( context['input'], match_pos ) if match and match.end() < cursor_pos: match_pos = match.end() else: break return match_pos def gather_candidates(self, context): candidates = [] # Find bib files that are contained in the same directory as # the TeX file. file_dir = self.vim.call('expand', '%:p:h') bib_files = glob.glob(path.join(file_dir, '*.bib')) # Search cite key within bib files # TODO: Make candidates `dict`, and add `abbr` key to show # bib info. enc_detector = chardet.UniversalDetector() for bib in bib_files: # Detect encoding, assuming the encoding never changes if bib not in self.bib_encs.keys(): enc_detector.reset() for l in open(bib, 'rb'): enc_detector.feed(l) enc_detector.close() self.bib_encs[bib] = enc_detector.result['encoding'] with open(bib, 'r', encoding=self.bib_encs[bib]) as f: bib_lines = f.readlines() for l in bib_lines: if l[0] == '@': candidates.append(self.cite_key_re.search(l)[0][1:-1]) # Search cite key defined by \bibitem within TeX files tex_files = glob.glob(path.join(file_dir, '*.tex')) for tex in tex_files: # Detect encoding, assuming the encoding never changes if tex not in self.bib_encs.keys(): enc_detector.reset() for l in open(tex, 'rb'): enc_detector.feed(l) enc_detector.close() self.bib_encs[tex] = enc_detector.result['encoding'] with open(tex, 'r', encoding=self.bib_encs[tex]) as f: tex_lines = f.readlines() for l in tex_lines: match = self.bibitem_re.search(l) if match: candidates.append(match[1]) return candidates
import unittest import pyfribidi class TestFribidi(unittest.TestCase): text = "سلام test تست" result = 'ﺖﺴﺗ test ﻡﻼﺳ' def test_log2vis(self): result = pyfribidi.log2vis(self.text) self.assertEqual(self.result, result) if __name__ == '__main__': unittest.main()
"""data_analysis URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from django.conf.urls import url, include from data.views.views import * from rest_framework import routers from rest_framework.urlpatterns import format_suffix_patterns from rest_framework.authtoken import views router = routers.DefaultRouter() # router.register(r'md', common.ModuleNameList, basename='md') router.register(r'cpu', CpuMessageSet) urlpatterns = [ path('admin/', admin.site.urls), # # path('add/', common.add_latest_info), path('system/', get_system_info), path('fs/', get_first_system_info), # path('index/', common.index), path('home/', home), url(r'^', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls')), url(r'^api-token-auth/', views.obtain_auth_token), url(r'^memory/$', memory_list), path('memory/<int:pk>/', memory_control), url(r'^ssd/$', SolidStateDiskList.as_view()), url(r'^ssd/(?P<pk>[0-9]+)/$', SolidStateControl.as_view()), url(r'^net/$', NetList.as_view()), url(r'^net/(?P<pk>[0-9]+)/$', NetControl.as_view()), url(r'^net2/$', NetToList.as_view()), url(r'^net2/(?P<pk>[0-9]+)/$', NetToControl.as_view()), url(r'^user/$', MyUserList.as_view()), url(r'^user/(?P<pk>[0-9]+)/$', MyUserControl.as_view()), url(r'^sys/$', SystemInfoList.as_view()), ] # urlpatterns = format_suffix_patterns(urlpatterns)
import cv2 import os import numpy as np import time from time import sleep import copy from multiprocessing.pool import ThreadPool from collections import deque print(cv2.__version__) print('Esc - End') # define framerate and period of framerate framerate = 236 showTime = int((1 / framerate) * 1000) # thresholds for circle dection highThresh = 75 accumThresh = 30 # min and max radius of circle minRad = 10 maxRad = 20 # minimum distance between mutliple circles allowed minDist = 1000 # threshold for filtering out noise when subtracting images differenceThresh = 150 # threshold for filtering out erroneous circles # circles that appear at an angle too different from the previous pair will be filtered out angleThresh = 10 # threshold for list length # if angle list is > threshold, ball has been detected listThresh = 7 # amount of time to sleep after ball detection sleepOnSuccess = 4 lastSuccessTime = time.time() - sleepOnSuccess # To ensure multiples aren't added circleAdded = False # red and green colors for circle display red = (0, 0, 255) green = (0, 255, 0) # for easy file access #rootDir = '../testData/fastCamCaps/' rootDir = '../testData/756/' fileList = os.listdir(rootDir) path = fileList[2] print(path) # video capture, takes file path as arg # integer value for integrated webcam / usb cameras vidcap = cv2.VideoCapture(rootDir + path) #vidcap = cv2.VideoCapture(1) #vidcap.set(cv2.CAP_PROP_FPS, framerate) # function for reading a frame # returns boolean for succes/fail and the frame as an ndarray success, prevFrame = vidcap.read() # create windows cv2.namedWindow('Blob Detection') cv2.namedWindow('Difference') # ---- params = cv2.SimpleBlobDetector_Params() params.minThreshold = 70 params.maxThreshold = 90 params.filterByArea = True params.minArea = 314 params.maxArea = 1256 params.filterByCircularity = True params.minCircularity = 0.4 params.filterByConvexity = True params.minConvexity = 0.8 params.filterByInertia = True params.maxInertiaRatio = .8 params.minInertiaRatio = .2 params.filterByColor = False # ---- detector = cv2.SimpleBlobDetector_create(params) # main loop of algorithm while success: # read in frame success, frame = vidcap.read() # exit on fail if success == False: break #diff = cv2.medianBlur(frame, 3) - cv2.medianBlur(prevFrame, 3) diff = frame - prevFrame # deep copy of frame # 'prevFrame = frame' actually saves frame's address prevFrame = copy.copy(frame) diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) # perform preprocessing ret, diff = cv2.threshold(diff, differenceThresh, 255, cv2.THRESH_BINARY) diff = cv2.medianBlur(diff, 15) diff = cv2.GaussianBlur(diff, (9, 9), 0) # perform circle detection blobs = detector.detect(diff) for blob in blobs: print(blob.pt) diffWithBlobs = cv2.drawKeypoints(diff, blobs, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) frameWithBlobs = cv2.drawKeypoints(frame, blobs, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # show iamges to windows cv2.imshow('Blob Detection', frameWithBlobs) cv2.imshow('Difference', diffWithBlobs) #key = cv2.waitKey(0) key = cv2.waitKey(showTime) if key == 27 or not success: break # destroy windows and release file/camera handle # important, don't remove cv2.destroyAllWindows() vidcap.release() print('done')
""" Operadores Lógicos and, or, not in e not in """ """ nome = "Juliana" if 'Ju' not in nome: print('Executei.') else: print("Existe o texto.") """ usuario = input('Nome de usuário: ') senha = input('Senha do usuário: ') usuario_bd = 'Juliana' senha_bd = '123456' if usuario_bd == usuario and senha_bd == senha: print('Você está logado no sistema') else: print('Você não está logado no sistema.')
import pandas as pd import numpy as np def preprocessing(data, keep_columns, target_column, target_values): """ Create Week_number from WeekStarting Drop two unnecessary columns: WeekStarting, Revenue """ df = data[keep_columns].fillna('other') # First, we need to address any imbalance in the data by samping down the majority class. # NOTE- this method needs at least around 1k rows in the minority class #https://machinelearningmastery.com/what-is-imbalanced-classification/ r1 = pd.DataFrame(np.random.uniform(0,1,len(df))) df['rand'] = r1 imbalance = (len(df.loc[df[target_column] == target_values[0]])/len(df)) if imbalance > .6: x = (1 - imbalance)/(imbalance) df['Flagged'] = 0 df.loc[(df[target_column]== target_values[0]) & (df['rand']>= x), 'Flagged'] = 1 df = df.loc[df['Flagged'] == 0] df = df.loc[df['Flagged'] == 0].drop(columns = ['Flagged', 'rand']) elif imbalance < .4: x = imbalance/(1 - imbalance) df['Flagged'] = 0 df.loc[(df[target_column]== target_values[1]) & (df['rand']>= x), 'Flagged'] = 1 df = df.loc[df['Flagged'] == 0].drop(columns = ['Flagged', 'rand']) return df def predict(model, data): # Perform predictions and add three new columns to the Pandas dataframe data['Prediction'] = model.predict(data) data[['Probability4True', 'Probability4False']] = pd.DataFrame(model.predict_proba(data)) return data
class ListaPlanograma(object): lista_planograma = [ { "id":1, "nome":"planograma 1", "pdm":{"id":1} }, { "id":2, "nome":"planograma 2", "pdm":{"id":2} }, { "id":3, "nome":"planograma 3", "pdm":{"id":3} } ]
import re import logging from copy import copy from bs4 import BeautifulSoup from .util import download_page, rebuild_string class CamBridge: base_url = 'https://dictionary.cambridge.org/dictionary/' def __init__(self, language): self.language = language self.prefix_url = self.base_url + ('english-{language}/'.format(language=language.strip()) if language else 'english/') def extract_head(self, head): word = { 'text': head.find('span', class_='hw dhw').text, 'pos' : head.find('span', class_='pos dpos').text if head.find('span', class_='pos dpos') else None } if word['pos'] == 'verb': word['grammar'] = ' '.join(g.text for g in head.find_all('span', class_='gc dgc')) return word def extract_body(self, body): for def_block in body.find_all('div', class_='def-block ddef_block', recurisive=False): define = rebuild_string(text.strip() for text in def_block.find('div', class_='ddef_h').find('div', class_='def ddef_d db').find_all(text=True) if text.strip()) grammar = def_block.find_all('span', class_='gc dgc') translate = def_block.find('div', class_='def-body ddef_b').find('span', class_='trans dtrans dtrans-se') if self.language else None examples = [] for example_soup in def_block.find_all('div', class_='examp dexamp'): eng_example = rebuild_string(text.strip() for text in example_soup.find('span', class_='eg deg').find_all(text=True) if text.strip()) trans_example = example_soup.find('span', class_='trans dtrans dtrans-se hdb') if self.language else None examples.append({ 'text': eng_example, 'translate': trans_example.text if trans_example else '無翻譯' }) body_data = { 'define': { 'text': define, 'translate': translate.text if translate else '無翻譯' }, 'examples': examples, 'phrases': [] } if grammar: body_data['grammar'] = ' '.join(g.text for g in grammar) if def_block.find_parent()['class'][0] == 'phrase-body': body_data['type'] = 'phrase' body_data['phrase'] = def_block.find_parent().previous_sibling.find('span', class_='phrase-title dphrase-title').text.strip() else: body_data['type'] = 'define' yield body_data def find_word(self, soup): for word_soup in soup.find_all('div', class_='pr entry-body__el'): word_head = word_soup.find('div', class_='pos-header dpos-h') word_body = word_soup.find('div', class_='pos-body') word = self.extract_head(word_head) word['defines'] = [] for body in self.extract_body(word_body): if body.pop('type') == 'define': word['defines'].append(body) else: if not len(word['defines']): word['defines'].append(body) word['defines'][-1]['phrases'].append(body) yield word def find_phrase(self, soup): for phrase_soup in soup.find_all('div', class_=re.compile('(pv|idiom)-block')): text = phrase_soup.find('h2', class_='headword tw-bw dhw dpos-h_hw').text.strip() pos_pkt = phrase_soup.find('div', class_='pos-header dpos-h') pos = pos_pkt.find('span', class_='pos dpos').text.strip() if pos_pkt else None word = { 'text': text, 'pos': pos, 'defines': [] } for define_soup in phrase_soup.find_all('div', class_=re.compile('^pr dsense')): for body in self.extract_body(define_soup): if body.pop('type') == 'define': word['defines'].append(body) else: if not len(word['defines']): word['defines'].append(body) word['defines'][-1]['phrases'].append(body) yield word def search(self, keyword): target = '-'.join(word.strip() for word in keyword.split()) soup = BeautifulSoup(download_page(self.prefix_url + target), 'html.parser') logging.info('Search the query "{}" in CamBridge'.format(target)) return list(self.find_word(soup)) + list(self.find_phrase(soup)) class MerriamWebster: base_url = 'https://www.merriam-webster.com/dictionary/' def __init__(self): self.prefix_url = self.base_url def extract_body(self, soup, div_id, text_class): data_list = [] anchor = soup.find('div', id=div_id) if anchor: func_labels = (anchor.find_all('p', class_='function-label')) func_label_list = [] if func_labels: for func_label in func_labels: sub_func_labels = func_label.findChildren('p', class_='function-label') for sub_func_label in sub_func_labels: sub_func_label.extract() func_label_list.append(func_label) for func_label in func_label_list: text = func_label.find('p', class_=text_class).text type = func_label.text.replace(text, '').replace(' ', '') data_list.append( { 'type': type, 'text': rebuild_string(text.strip() for text in text.split()) } ) else: data_list.append( { 'type': None, 'text': rebuild_string(text.strip() for text in anchor.find('p', class_=text_class).text.split()) } ) return data_list def search(self, keyword): target = '-'.join(word.strip() for word in keyword.split()) logging.info('Search the query "{}" in Merriam-Webster'.format(target)) soup = BeautifulSoup(download_page(self.prefix_url + target), 'html.parser') return { 'first_known_use': self.extract_body(soup, div_id='first-known-anchor', text_class='ety-sl'), 'etymology': self.extract_body(soup, div_id='etymology-anchor', text_class='et') } class OnlineEtymology: base_url = 'https://www.etymonline.com/word/' def __init__(self): self.prefix_url = self.base_url def search(self, keyword): target = '-'.join(word.strip() for word in keyword.split()) logging.info('Search the query "{}" in OnlineEtymology'.format(target)) soup = BeautifulSoup(download_page(self.prefix_url + target), 'html.parser') word_label = soup.find('section', class_='word__defination--2q7ZH') chart_label = soup.find('div', class_='chart') return { 'text': word_label.text if word_label else None, 'image_url': chart_label.get('data-origin-path') if chart_label else None }
import pandas as pd import numpy as np import shapefile import sys import math import bokeh.plotting as bp # Hack: append common/ to sys.path sys.path.append("../common") sys.path.append("../queries") import anova # Given a shapeObject return a list of list for latitude and longitudes values # - Handle scenarios where there are multiple parts to a shapeObj def getParts ( shapeObj ): points = [] num_parts = len( shapeObj.parts ) end = len( shapeObj.points ) - 1 segments = list( shapeObj.parts ) + [ end ] for i in range( num_parts ): points.append( shapeObj.points[ segments[i]:segments[i+1] ] ) return points # Return a dict with two elements # - list of list representing latitudes # - list of list representing longitudes # # Input: County tuple & ShapeFile Object def getDict ( county, shapefile ): countyDict = {(county[0], county[1]): {} } rec = [] shp = [] points = [] # Select only the records representing the # "state_name" and discard all other for i in shapefile.shapeRecords( ): if i.record[0] == county[0] and i.record[1] == county[1]: rec.append(i.record) shp.append(i.shape) # For each selected shape object get # list of points while considering the cases where there may be # multiple parts in a single record for j in shp: for i in getParts(j): points.append(i) # Prepare the dictionary # Seperate the points into two separate lists of lists (easier for bokeh to consume) # - one representing latitudes # - second representing longitudes lat = [] lng = [] for i in points: lat.append( [j[0] for j in i] ) lng.append( [j[1] for j in i] ) countyDict[(county[0], county[1])]['lat_list'] = lat countyDict[(county[0], county[1])]['lng_list'] = lng return countyDict def get_color(score, min_score, palette): index = int(math.floor((score / min_score) * len(palette))) if score == min_score: index = 8 return palette[index] def county_means(county_results): means = {key: np.mean(val) for key, val in county_results.iteritems()} return means def generate_webpage(): # Read the ShapeFile dat = shapefile.Reader("./shapefile/USCounties.shp") d = anova.average_user_scores() d2 = anova.county_results(d) average_scores = county_means(d2) a2 = dict() for key in average_scores.keys(): new_key = key[0].strip('City').strip('County').strip(' ') a2[(new_key, key[1])] = average_scores[key] min_score = min(a2.values()) palette = ["#FFF7EC", "#FEE8C8", "#FDD49E", "#FDBB84", "#FC8D59", "#EF6548", "#D7301F", "#B30000", "#7F0000"] colors = dict() for county in a2: colors[county] = get_color(a2[county], min_score, palette) # Create a list of county tuples of (County, State) counties = [(i[0], i[1]) for i in dat.iterRecords()] # Create the Plot bp.output_file("us_counties.html") TOOLS="pan,wheel_zoom,box_zoom,reset,previewsave" bp.figure(title="Average county-level sentiment, 2009", tools=TOOLS, plot_width=900, plot_height=800) bp.hold() count = 0 for county in counties: data = getDict(county, dat) if county in colors: fill = colors[county] else: fill = "#20B2AA" bp.patches(data[(county[0], county[1])]['lat_list'], data[(county[0], county[1])]['lng_list'], \ fill_color = fill, line_color = "black") bp.show() def main(): generate_webpage() main()
# Generated by Django 3.1.1 on 2020-09-06 23:11 import cloudinary.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('account', '0004_auto_20200906_1315'), ] operations = [ migrations.AddField( model_name='profile', name='school', field=models.CharField(blank=True, max_length=300), ), migrations.AlterField( model_name='profile', name='avatar', field=cloudinary.models.CloudinaryField(default='avatar.png', max_length=255, verbose_name='avatar'), ), ]
import sys from random import randint def generate_numbers(filename, numbers): #arr = [] s = '' for i in range(int(numbers)): s += str(randint(1, 1000)) + ' ' with open (filename, "w") as f: f.write(s) def main(): generate_numbers(sys.argv[1], int(sys.argv[2])) if __name__ == '__main__': main()
#!/usr/bin/python import collections as col f = open('input', 'r') c = 0 # count def find_hash(str): res = col.Counter(str) res = sorted(res.items(), key=lambda pair: (-pair[1], pair[0])) res = list(c[0] for c in res) return ''.join(res)[:5] for l in f: str = l.strip().replace('[','-').replace(']','').split('-') if find_hash(''.join(str[:-2])) == str[-1]: c += int(str[-2]) print "Total sum is", c
import numpy as np from statistics import mean def calibrate(time, amplitude): ###################################### # Enter calibration code here: ###################################### rail = 0 risingedge = [] fallingedge = [] diffrise = [] i = 0 k = 1 #edge detection and stamp rising edge times in risiing edge[] while (i < (len(time)-1)) : if (amplitude[i] > 4) & (rail == 0): if time[i] > 0.05: risingedge.append(time[i]) rail = 1 if (amplitude[i] < 0.05) & (rail == 1): rail = 0 i = i + 1 #find difference between rising edge times while(k < len(risingedge)): diffrise.append(risingedge[k]-risingedge[k-1]) k = k + 1 bpm = (1/mean(diffrise))*60 ###################################### return round(bpm+0.5)
"""Utilities for getting native speaker audio from Forvo.com. `get_mp3_link` queries Forvo.com for a given word in a given language, and returns the download URL of the audio file from the XML response. """ __author__ = 'shor.joel@gmail.com (Joel Shor)' import logging import urllib2 import xml.etree.ElementTree as ET def _get_forvo_url(word, api_key, language='he'): return 'https://apifree.forvo.com/key/%s/format/xml/action/word-pronunciations/word/%s/language/%s' % ( api_key, word, language) def _get_forvo_xml(word, api_key, language='he'): forvo_url = _get_forvo_url(word, api_key, language) try: response = urllib2.urlopen(forvo_url) except: logging.error('Failed to fetch Forvo URL, possibly because daily limit was reached: %s' % forvo_url) raise return response.read() def get_mp3_link(word, api_key, language='he'): """Returns URL of top rated audio, or None.""" xml_string = _get_forvo_xml(word, api_key, language=language) return _extract_mp3link_from_xml(xml_string) def _extract_mp3link_from_xml(xml_string): tree = ET.fromstring(xml_string) def _get_and_check_unique(item, item_name): sub_items = item.findall(item_name) if len(sub_items) > 1: raise ValueError('Too many `pathmp3` items found.') if not sub_items: raise ValueError('No `pathmp3` item found.') return sub_items[0].text paths_and_ratings = [] for item in tree: pathmp3 = _get_and_check_unique(item, 'pathmp3') rate = int(_get_and_check_unique(item, 'rate')) paths_and_ratings.append((rate, pathmp3)) if paths_and_ratings: # Return highest rated audio. paths_and_ratings.sort(reverse=True) return paths_and_ratings[0][1] else: return None
from sklearn.manifold import TSNE import seaborn as sns import matplotlib.pyplot as plt import numpy as np import matplotlib matplotlib.use('tkagg') def visualize(train_data_before, test_data_before, train_data_after, test_data_after, args): z = np.concatenate((train_data_before, train_data_after, test_data_before, test_data_after), axis=0) z_2d = TSNE(n_components=2).fit_transform(z) n_support = args.meta_val_way * args.eval_shot n_query = args.meta_val_way * args.meta_val_query label_before = ['C1', 'C2', 'C3', 'C4', 'C5'] y_before = [str for str in label_before for iter in range(int(args.eval_shot))] + [str for str in label_before for iter in range(int(n_query / args.meta_val_way))] label_after = ['C1 t', 'C2 t', 'C3 t', 'C4 t', 'C5 t'] y_after = [str for str in label_after for iter in range(int(args.eval_shot))] + [str for str in label_after for iter in range(int(n_query / args.meta_val_way))] y_all = y_before + y_after plt.close('all') sns.scatterplot(x=z_2d[:, 0], y=z_2d[:, 1], hue=y_all, legend='full') plt.show() # s_2d = z_2d[0:n_support] # sw_2d = z_2d[n_support:n_support * 2] # q_2d = z_2d[n_support * 2:n_support * 2 + n_query] # qw_2d = z_2d[n_support * 2 + n_query:] # label_spt = ['C1 s', 'C2 s', 'C3 s', 'C4 s', 'C5 s'] # y_support = [str for str in label_spt for iter in range(int(args.eval_shot))] # label_qry = ['C1 q', 'C2 q', 'C3 q', 'C4 q', 'C5 q'] # y_query = [str for str in label_qry for iter in range(int(n_query / args.meta_val_way))] # # label_spt_t = ['C1 st', 'C2 st', 'C3 st', 'C4 st', 'C5 st'] # y_support_t = [str for str in label_spt_t for iter in range(int(args.eval_shot))] # label_qry_t = ['C1 qt', 'C2 qt', 'C3 qt', 'C4 qt', 'C5 qt'] # y_query_t = [str for str in label_qry_t for iter in range(int(n_query / args.meta_val_way))] # # plt.close('all') # sns.scatterplot(x=s_2d[:, 0], y=s_2d[:, 1], hue=y_support, legend='full') # plt.show() # sns.scatterplot(x=q_2d[:, 0], y=q_2d[:, 1], hue=y_query, legend='full') # plt.show() # sns.scatterplot(x=sw_2d[:, 0], y=sw_2d[:, 1], hue=y_support_t, legend='full') # plt.show() # sns.scatterplot(x=qw_2d[:, 0], y=qw_2d[:, 1], hue=y_query_t, legend='full') # plt.show()
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from proto import user_pb2 as proto_dot_user__pb2 class UserStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Create = channel.unary_unary( '/proto.User/Create', request_serializer=proto_dot_user__pb2.CreateUserInfo. SerializeToString, response_deserializer=proto_dot_user__pb2.CreateUserResult. FromString, ) self.Delete = channel.unary_unary( '/proto.User/Delete', request_serializer=proto_dot_user__pb2.DeleteUserInfo. SerializeToString, response_deserializer=proto_dot_user__pb2.DeleteUserResult. FromString, ) self.Read = channel.unary_unary( '/proto.User/Read', request_serializer=proto_dot_user__pb2.ReadUserInfo. SerializeToString, response_deserializer=proto_dot_user__pb2.ReadUserResult. FromString, ) self.Update = channel.unary_unary( '/proto.User/Update', request_serializer=proto_dot_user__pb2.UpdateUserInfo. SerializeToString, response_deserializer=proto_dot_user__pb2.UpdateUserResult. FromString, ) class UserServicer(object): # missing associated documentation comment in .proto file pass def Create(self, request, context): """create a User """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Delete(self, request, context): """delete a User """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Read(self, request, context): """Read User """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Update(self, request, context): """Update User """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_UserServicer_to_server(servicer, server): rpc_method_handlers = { 'Create': grpc.unary_unary_rpc_method_handler( servicer.Create, request_deserializer=proto_dot_user__pb2.CreateUserInfo.FromString, response_serializer=proto_dot_user__pb2.CreateUserResult. SerializeToString, ), 'Delete': grpc.unary_unary_rpc_method_handler( servicer.Delete, request_deserializer=proto_dot_user__pb2.DeleteUserInfo.FromString, response_serializer=proto_dot_user__pb2.DeleteUserResult. SerializeToString, ), 'Read': grpc.unary_unary_rpc_method_handler( servicer.Read, request_deserializer=proto_dot_user__pb2.ReadUserInfo.FromString, response_serializer=proto_dot_user__pb2.ReadUserResult. SerializeToString, ), 'Update': grpc.unary_unary_rpc_method_handler( servicer.Update, request_deserializer=proto_dot_user__pb2.UpdateUserInfo.FromString, response_serializer=proto_dot_user__pb2.UpdateUserResult. SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'proto.User', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler, ))
# 파일에 쓰기 : 파일을 쓰기 모드(w)로 열고 , # 파일객체의 write() 함수를 이용하여 파일에 출력 # data='hello' # f = open('file2.txt','w') # 파일 객체f # f.write(data) # f.close() data='안녕하세요' f = open('file2.txt','w',encoding='utf-8') # 파일 객체f f.write(data) f.close() # 한글이 깨져 보임 #utf-8 형식으로 저장 : 한글이 깨지지 않음
import pygame as pg import settings from constants import OUT_FSCR, OUT_NONE, OUT_QUIT class Controller(): def __init__(self): self._bdown_events = set() # buttons newly pressed this frame self._bpressed = set() # buttons still pressed right now. def poll(self): """ toggle fullscreen with F11, quit with ESC or alt-F4. returns whether the program should stop """ kmap = settings.kmap valid_keys = settings.kmap.keys() # keys being down kpressed = pg.key.get_pressed() alt_held = kpressed[pg.K_LALT] or kpressed[pg.K_RALT] self._bpressed = set([kmap[k] for k in valid_keys if kpressed[k]]) # keys that were pressed just now. Also included in kpressed. self._bdown_events = set() for event in pg.event.get(): if event.type == pg.QUIT: return OUT_QUIT if event.type == pg.KEYDOWN: if event.key == pg.K_ESCAPE: return OUT_QUIT elif event.key == pg.K_F4 and alt_held: return OUT_QUIT elif event.key in valid_keys: self._bdown_events.add(kmap[event.key]) elif event.key == pg.K_F11: return OUT_FSCR return OUT_NONE def btn_ispressed(self, btn): return btn in self._bpressed def btn_event(self, btn): return btn in self._bdown_events controller = Controller()
# 包含Settings类,这个类只包含方法_init_(),它初始化控制飞船外观与飞船速度的属性 class Settings(): """存储外星人入侵的所有设置的类""" def __init__(self): """初始化游戏设置""" # 屏幕设置 self.screen_width = 1200 # 窗口大小 self.screen_height = 800 # 窗口大小 self.bg_color = (230,230,230) # 窗口背景色 # 飞船设置 self.ship_speed_factor = 1.5 # 将速度值设置为小数值,更方便控制飞moving_right self.ship_limit = 2 # 飞船数量,变为2第三次暂停游戏 # 子弹设置 self.bullet_speed_factor = 3 # 子弹速度,比飞船稍低,提高子弹速度 self.bullet_width = 3 # 子弹宽度 self.bullet_height = 15 # 子弹广度 self.bullet_color = 60,60,60 # 子弹颜色 self.bullets_allowed = 3 # 限制未消失子弹数为3 # 外星人设置 self.alien_speed_factor = 1 # 属性 =1 self.fleet_drop_speed = 10 # 撞到边缘时向下移动的速度 # fleet_direction为1表示向右移,-1表示向左移 self.fleet_direction = 1
import gym import random import numpy as np import pdb from pybrain.tools.shortcuts import buildNetwork from pybrain.structure import SoftmaxLayer,SigmoidLayer from pybrain.datasets import SupervisedDataSet from pybrain.supervised.trainers import BackpropTrainer def getAction(probs): index = np.argmax(probs) return index + 2 env = gym.make('MsPacman-ram-v0') net = buildNetwork(128,32,4, hiddenclass=SigmoidLayer, outclass=SoftmaxLayer) ds = SupervisedDataSet(128, 4) for i_episode in range(5000): observation = env.reset() total_reward = 0 for t in range(5000): env.render() flatObservation = np.array(observation).flatten() probs = net.activate(flatObservation) newobservation, reward, done, info = env.step(getAction(probs)) total_reward += reward output = [] if reward > 0: for i in range(4): if np.argmax(probs) == i: output.append(1) else: output.append(probs[i]) #else: # for i in range(4): # if np.argmax(probs) == i: # output.append(0) # else: # output.append(probs[i]) ds.addSample(observation,output) observation = newobservation if done: if (i_episode + 1) % 10 == 0: trainer = BackpropTrainer(net,ds) trainer.train() ds = SupervisedDataSet(128, 4) print("Episode {} finished with reward {}".format(i_episode + 1, total_reward)) break
''' @Author: your name @Date: 2020-06-17 13:06:18 @LastEditTime: 2020-06-19 15:13:45 @LastEditors: Please set LastEditors @Description: In User Settings Edit @FilePath: /final/task2_crnn/main.py ''' import argparse import os from Trainer import Solver from dataLoader import get_loader from torch.backends import cudnn import random from torch.utils.data import sampler def main(config): cudnn.benchmark = True # to improve the efficiency # Create directories if not exist if not os.path.exists(config.model_path): os.makedirs(config.model_path) if not os.path.exists(config.result_path): os.makedirs(config.result_path) config.result_path = os.path.join(config.result_path, config.model_type) if not os.path.exists(config.result_path): os.makedirs(config.result_path) # lr = random.random()*0.0005 + 0.0000005 # augmentation_prob = random.random()*0.7 # epoch = random.choice([100, 150, 200, 250]) # decay_ratio = random.random()*0.8 # decay_epoch = int(epoch*decay_ratio) # # config.augmentation_prob = augmentation_prob # config.num_epochs = epoch # config.lr = lr # config.num_epochs_decay = decay_epoch print(config) # Notice the difference between these loaders train_loader = get_loader(config = config, image_path=config.train_path, crop_size=config.crop_size, batch_size=config.batch_size, sampler = sampler.SubsetRandomSampler(range(0,100000)), num_workers=config.num_workers, mode='train', augmentation_prob=config.augmentation_prob) valid_loader = get_loader(config = config, image_path=config.valid_path, crop_size=config.crop_size, batch_size=config.batch_size, sampler = sampler.SubsetRandomSampler(range(100000,103943)), num_workers=config.num_workers, mode='valid', augmentation_prob=0.) solver = Solver(config, train_loader, valid_loader) # Train and sample the images if config.mode == 'train': solver.train() elif config.mode == 'val': solver.val() else: solver.detect() # todo: change the test method and write the save prediction function if __name__ == '__main__': parser = argparse.ArgumentParser() # model hyper-parameters parser.add_argument('--crop_size', type=int, default=120) # training hyper-parameters parser.add_argument('--img_ch', type=int, default=3) parser.add_argument('--img_H', type=int, default=32) parser.add_argument('--num_epochs', type=int, default=200) parser.add_argument('--num_epochs_decay', type=int, default=50) parser.add_argument('--batch_size', type=int, default=2*128) parser.add_argument('--num_workers', type=int, default=32) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--beta1', type=float, default=0.5) # momentum1 in Adam parser.add_argument('--beta2', type=float, default=0.999) # momentum2 in Adam parser.add_argument('--augmentation_prob', type=float, default=0.4) parser.add_argument('--log_step', type=int, default=2) parser.add_argument('--val_step', type=int, default=2) # misc parser.add_argument('--mode', type=str, default='val') parser.add_argument('--model_type', type=str, default='CRNN') parser.add_argument('--model_path', type=str, default='./task2_crnn/models') parser.add_argument('--train_path', type=str, default='../data/train/') parser.add_argument('--valid_path', type=str, default='../data/test/') parser.add_argument('--result_path', type=str, default='../result/') parser.add_argument('--text_path', type=str, default='../text/') parser.add_argument('--image_root', type=str, default='./data/result/image/') parser.add_argument('--landmark_root', type=str, default='./data/result/gt/') parser.add_argument('--lstm_hidden', type=int, default=256) # todo: validate image num in these folders parser.add_argument('--cuda_idx', type=int, default=1) config = parser.parse_args() # return a namespace, use the parameters by config.image_size main(config)
class Solution: def calculate(self, s: str) -> int: def update(op, num): if op == '+': stack.append(num) elif op == '-': stack.append(-num) elif op == '*': stack.append(stack.pop() * num) elif op == '/': value = stack.pop() if value // num < 0 and value % num != 0: stack.append(value // num + 1) else: stack.append(value // num) stack = [] num = 0 op = '+' for char in s: if char.isdigit(): num = num * 10 + int(char) elif char in '+-*/': update(op, num) num = 0 op = char update(op, num) return sum(stack)
class Solution: def minSubArrayLen(self, s, nums) -> int: if sum(nums) < s: return 0 #min_len = len(nums) start = 0 result = len(nums) sum_nums = 0 for i in range(len(nums)): sum_nums += nums[i] while (sum_nums >= s): result = min(result,i-start+1) sum_nums -= nums[start] start += 1 return result if __name__ == "__main__": nums = [1,4,4] s = 4 test = Solution() print(test.minSubArrayLen(s,nums))
import threading import time sem = threading.Semaphore() global s s = 2 def thing1(): time.sleep(1) s = s + 3 sem.release() def thing2(): print(s) t = threading.Thread(target = thing1) t.run() t1 = threading.Thread(target = thing2) t1.run()
import logging import random import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from pathlib import Path from random import choice, sample MAX_TOKENS_PER_DOC = 256 def get_train_test_apikeys(df, split=0.20): df = pd.read_pickle(df) # add weights to the apikeys; these are equivalent to the sqrt of the number of posts unique_apikeys = pd.DataFrame(df.groupby(['apikey'])['row_number'].count().reset_index(name='num_posts')) # unique_apikeys['weights'] = unique_apikeys[['num_posts']].apply(np.sqrt) unique_apikeys['weights'] = unique_apikeys[['num_posts']].astype(np.float) # split into train and test based on apikey train_apikeys, test_apikeys = train_test_split(unique_apikeys, test_size=split) return train_apikeys, test_apikeys def training_generator(df, memmap, apikey_weighted_df): df = pd.read_pickle(df) data = df[df['apikey'].isin(apikey_weighted_df['apikey'])].copy(deep=True) word_indices = np.memmap(str(memmap), dtype=np.uint16, mode='r', shape=(len(df), MAX_TOKENS_PER_DOC)) del df # anchor = data_subset.copy(deep=True) # compare = data_subset.copy(deep=True) skip_count = 0 total_count = 0 apikey_list = list(set(data['apikey'])) # Uniquify # Precompute per-apikey dataframes to speed things up later grouped_rows = dict() for apikey in apikey_list: apikey_df = data[data['apikey'] == apikey].copy(deep=True).drop(columns=['apikey']) apikey_df['section'] = apikey_df['section'].cat.remove_unused_categories() per_apikey_sections = apikey_df.groupby(['section'])['row_number'].apply(list).reset_index() per_apikey_sections['weights'] = [float(len(row_number)) for row_number in per_apikey_sections['row_number']] per_apikey_sections['weights'] = per_apikey_sections['weights'] / np.sum(per_apikey_sections['weights']) per_apikey_sections = per_apikey_sections.set_index('section') grouped_rows[apikey] = per_apikey_sections # We convert the weights to a probability distribution, then sample rapidly from it # by converting the distribution to a cumulative sum and using searchsorted # This gives us a random sample from O(n) weights in O(log n) time. apikey_choices = apikey_weighted_df['apikey'].tolist() apikey_weights = np.array(apikey_weighted_df['weights'].tolist()) apikey_weights /= np.sum(apikey_weights) apikey_cumweights = np.cumsum(apikey_weights) while True: # Sample an apikey with the weights array apikey_idx = np.searchsorted(apikey_cumweights, np.random.rand(), side='left') apikey = apikey_choices[apikey_idx] apikey_subset = grouped_rows[apikey] # Choose a row from the sections in this apikey anchor_section_row = apikey_subset.sample(1, weights='weights') posts_in_section = anchor_section_row.iloc[0].row_number anchor_section = anchor_section_row.index[0] anchor_rownum, positive_rownum = sample(posts_in_section, 2) anchor_vector = word_indices[anchor_rownum] positive_vector = word_indices[positive_rownum] # Sample 2 potential negatives in case we get the same section again negative_sections = apikey_subset.sample(2, weights='weights').index.tolist() if negative_sections[0] == anchor_section: negative_section = negative_sections[1] else: negative_section = negative_sections[0] negative_rownum = choice(apikey_subset.loc[negative_section].row_number) negative_vector = word_indices[negative_rownum] # We store the data as np.uint16 to save space, but we definitely want a more normal # data type before it goes to Pytorch yield np.stack([anchor_vector, positive_vector, negative_vector]).astype(np.int) def main(): MEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl/') train, _ = get_train_test_apikeys(memmap_directory=MEMMAP_DIRECTORY, split=0.20) batch = training_generator(MEMMAP_DIRECTORY, train) print(next(batch)[0].shape) if __name__ == '__main__': main()
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import base64 import logging from typing import Any, Iterable, List, Mapping, Optional, Set import pendulum import requests from airbyte_cdk.models import SyncMode from cached_property import cached_property from facebook_business.adobjects.abstractobject import AbstractObject from facebook_business.adobjects.adaccount import AdAccount as FBAdAccount from facebook_business.adobjects.adimage import AdImage from facebook_business.adobjects.user import User from .base_insight_streams import AdsInsights from .base_streams import FBMarketingIncrementalStream, FBMarketingReversedIncrementalStream, FBMarketingStream logger = logging.getLogger("airbyte") def fetch_thumbnail_data_url(url: str) -> Optional[str]: """Request thumbnail image and return it embedded into the data-link""" try: response = requests.get(url) if response.status_code == requests.status_codes.codes.OK: _type = response.headers["content-type"] data = base64.b64encode(response.content) return f"data:{_type};base64,{data.decode('ascii')}" else: logger.warning(f"Got {repr(response)} while requesting thumbnail image.") except requests.exceptions.RequestException as exc: logger.warning(f"Got {str(exc)} while requesting thumbnail image.") return None class AdCreatives(FBMarketingStream): """AdCreative is append only stream doc: https://developers.facebook.com/docs/marketing-api/reference/ad-creative """ entity_prefix = "adcreative" enable_deleted = False def __init__(self, fetch_thumbnail_images: bool = False, **kwargs): super().__init__(**kwargs) self._fetch_thumbnail_images = fetch_thumbnail_images @cached_property def fields(self) -> List[str]: """Remove "thumbnail_data_url" field because it is computed field and it's not a field that we can request from Facebook""" return [f for f in super().fields if f != "thumbnail_data_url"] def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """Read with super method and append thumbnail_data_url if enabled""" for record in super().read_records(sync_mode, cursor_field, stream_slice, stream_state): if self._fetch_thumbnail_images: record["thumbnail_data_url"] = fetch_thumbnail_data_url(record.get("thumbnail_url")) yield record def list_objects(self, params: Mapping[str, Any]) -> Iterable: return self._api.account.get_ad_creatives(params=params) class CustomConversions(FBMarketingStream): """doc: https://developers.facebook.com/docs/marketing-api/reference/custom-conversion""" entity_prefix = "customconversion" enable_deleted = False def list_objects(self, params: Mapping[str, Any]) -> Iterable: return self._api.account.get_custom_conversions(params=params) class Ads(FBMarketingIncrementalStream): """doc: https://developers.facebook.com/docs/marketing-api/reference/adgroup""" entity_prefix = "ad" def list_objects(self, params: Mapping[str, Any]) -> Iterable: return self._api.account.get_ads(params=params) class AdSets(FBMarketingIncrementalStream): """doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign""" entity_prefix = "adset" def list_objects(self, params: Mapping[str, Any]) -> Iterable: return self._api.account.get_ad_sets(params=params) class Campaigns(FBMarketingIncrementalStream): """doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign-group""" entity_prefix = "campaign" def list_objects(self, params: Mapping[str, Any]) -> Iterable: return self._api.account.get_campaigns(params=params) class Activities(FBMarketingIncrementalStream): """doc: https://developers.facebook.com/docs/marketing-api/reference/ad-activity""" entity_prefix = "activity" cursor_field = "event_time" primary_key = None def list_objects(self, fields: List[str], params: Mapping[str, Any]) -> Iterable: return self._api.account.get_activities(fields=fields, params=params) def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """Main read method used by CDK""" loaded_records_iter = self.list_objects(fields=self.fields, params=self.request_params(stream_state=stream_state)) for record in loaded_records_iter: if isinstance(record, AbstractObject): yield record.export_all_data() # convert FB object to dict else: yield record # execute_in_batch will emmit dicts def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]: """Additional filters associated with state if any set""" state_value = stream_state.get(self.cursor_field) since = self._start_date if not state_value else pendulum.parse(state_value) potentially_new_records_in_the_past = self._include_deleted and not stream_state.get("include_deleted", False) if potentially_new_records_in_the_past: self.logger.info(f"Ignoring bookmark for {self.name} because of enabled `include_deleted` option") since = self._start_date return {"since": since.int_timestamp} class Videos(FBMarketingReversedIncrementalStream): """See: https://developers.facebook.com/docs/marketing-api/reference/video""" entity_prefix = "video" def list_objects(self, params: Mapping[str, Any]) -> Iterable: # Remove filtering as it is not working for this stream since 2023-01-13 return self._api.account.get_ad_videos(params=params, fields=self.fields) class AdAccount(FBMarketingStream): """See: https://developers.facebook.com/docs/marketing-api/reference/ad-account""" use_batch = False enable_deleted = False def get_task_permissions(self) -> Set[str]: """https://developers.facebook.com/docs/marketing-api/reference/ad-account/assigned_users/""" res = set() me = User(fbid="me", api=self._api.api) for business_user in me.get_business_users(): assigned_users = self._api.account.get_assigned_users(params={"business": business_user["business"].get_id()}) for assigned_user in assigned_users: if business_user.get_id() == assigned_user.get_id(): res.update(set(assigned_user["tasks"])) return res @cached_property def fields(self) -> List[str]: properties = super().fields # https://developers.facebook.com/docs/marketing-apis/guides/javascript-ads-dialog-for-payments/ # To access "funding_source_details", the user making the API call must have a MANAGE task permission for # that specific ad account. if "funding_source_details" in properties and "MANAGE" not in self.get_task_permissions(): properties.remove("funding_source_details") if "is_prepay_account" in properties and "MANAGE" not in self.get_task_permissions(): properties.remove("is_prepay_account") return properties def list_objects(self, params: Mapping[str, Any]) -> Iterable: """noop in case of AdAccount""" return [FBAdAccount(self._api.account.get_id())] class Images(FBMarketingReversedIncrementalStream): """See: https://developers.facebook.com/docs/marketing-api/reference/ad-image""" def list_objects(self, params: Mapping[str, Any]) -> Iterable: return self._api.account.get_ad_images(params=params, fields=self.fields) def get_record_deleted_status(self, record) -> bool: return record[AdImage.Field.status] == AdImage.Status.deleted class AdsInsightsAgeAndGender(AdsInsights): breakdowns = ["age", "gender"] class AdsInsightsCountry(AdsInsights): breakdowns = ["country"] class AdsInsightsRegion(AdsInsights): breakdowns = ["region"] class AdsInsightsDma(AdsInsights): breakdowns = ["dma"] class AdsInsightsPlatformAndDevice(AdsInsights): breakdowns = ["publisher_platform", "platform_position", "impression_device"] # FB Async Job fails for unknown reason if we set other breakdowns # my guess: it fails because of very large cardinality of result set (Eugene K) action_breakdowns = ["action_type"] class AdsInsightsActionType(AdsInsights): breakdowns = [] action_breakdowns = ["action_type"]
import discord import os import requests import json import random from replit import db import asyncio import logging from discord.ext import commands import random class Fun(commands.Cog): def __init__(self, client): self.client = client #hi @commands.command() async def hi(self, ctx): embed=discord.Embed(title="Hello", color=0x00f900) await ctx.send(embed=embed) def setup(client): client.add_cog(Fun(client))
valores = list() for cont in range(0,10): valores.append(int(input(f"Informe o valor {cont+1}: "))) print(valores) for indice,cont in range(0,10): if valores[itens] % 2 == 0: valores.pop(itens) #valores remove (cont) print(f"Lista sem os valores pares: {valores}\n\n")
from django import forms from rbac import models from django.utils.safestring import mark_safe # 角色的Form class RoleForm(forms.ModelForm): class Meta: model = models.Role fields = ['name'] widgets = { 'name': forms.widgets.Input(attrs={"class": 'form-control'}) } ICON_LIST = [[i[0], mark_safe(i[1])] for i in [ ['fa-address-book', '<i aria-hidden="true" class="fa fa-address-book"></i>'], ['fa-address-book-o', '<i aria-hidden="true" class="fa fa-address-book-o"></i>'], ['fa-address-card', '<i aria-hidden="true" class="fa fa-address-card"></i>'], ['fa-address-card-o', '<i aria-hidden="true" class="fa fa-address-card-o"></i>'], ['fa-adjust', '<i aria-hidden="true" class="fa fa-adjust"></i>'], ['fa-american-sign-language-interpreting', '<i aria-hidden="true" class="fa fa-american-sign-language-interpreting"></i>'], ['fa-anchor', '<i aria-hidden="true" class="fa fa-anchor"></i>'], ['fa-archive', '<i aria-hidden="true" class="fa fa-archive"></i>'], ['fa-area-chart', '<i aria-hidden="true" class="fa fa-area-chart"></i>'], ['fa-arrows', '<i aria-hidden="true" class="fa fa-arrows"></i>'] ]] # 菜单的Form class MenuForm(forms.ModelForm): class Meta: model = models.Menu fields = ['title', 'weight', 'icon', ] widgets = { 'title': forms.widgets.Input(attrs={"class": 'form-control'}), 'weight': forms.widgets.Input(attrs={"class": 'form-control'}), 'icon': forms.widgets.RadioSelect(choices=ICON_LIST), } class PermissionForm(forms.ModelForm): class Meta: model = models.Permission # fields = '__all__' fields = ['title', 'url', 'name', 'parent', 'menu'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for field in self.fields.values(): field.widget.attrs.update({'class': 'form-control'})
a=[3,1,2,4] c=[] d=[] for i in range(len(a)): if(a[i]%2==0): c.append(a[i]) c.sort() else: d.append(a[i]) d.sort() c=c+d print(c)
from flask import Flask, jsonify, request from flasgger import Swagger app = Flask(__name__) app.config['SWAGGER'] = { 'title': 'API doc', 'uiversion': 3, 'openapi' : '3.0.2' } @app.route('/ping', methods=['GET']) def ping(): return "The service is up and running :)" @app.route('/api', methods=['POST']) def api(): """This endpoint runs an API in the backgraound and return corresponding response in the form of json --- definitions: output: type: object properties: output1: type: string output2: type: string output3: type: string input: type: object properties: input1: type: string input2: type: string input3: type: string requestBody: content: application/json: schema: $ref: '#/definitions/input' responses: 200: description: schema: $ref: '#/definitions/output' """ # read input input = request.json # Call the api <<<here>>> output_sample = { 'output1': input["input1"], 'output2': input["input2"], 'output3': input["input3"], } return jsonify(output_sample) swagger = Swagger(app)
# -*- coding: utf-8 -*- from django.conf.urls import url import events.views urlpatterns = [ url(r'^accounts/lists', events.views.edit_subscription_lists), url(r'^events/_edit', events.views.edit_subscription_list), url('^events/_load_events$', events.views.events_list_items, name='events_list_items'), url('^events/events.rss$', events.views.events_rss), url('^events/embed_legacy$', events.views.events_embed_legacy), url('^events/_save_list_note$', events.views.save_list_note), url('^events/([\w\-]+)$', events.views.events_show_feed), ]
# -*- coding: utf-8 -*- """ Created on Thu Apr 25 10:36:13 2019 @author: LXI-294-VINU """ from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import QObject, pyqtSlot from MainWindowV13 import Ui_MainWindow import sys from Model import Model class MainWindowUIClass( Ui_MainWindow ): def __init__( self ): '''Initialize the super class ''' super().__init__() self.model = Model() self.debug = True def setupUi( self, MW ): ''' Setup the UI of the super class, and add here code that relates to the way we want our UI to operate. ''' super().setupUi( MW ) # close the lower part of the splitter to hide the # debug window under normal operations ##self.splitter.setSizes([300, 0]) def runCifarTest(self): self.model.runCifarTest() self.displayMessage("CIFAR Test Ran Successfully "+self.model.testOutput) def runDisplayStatistics(self): self.model.displayStats(self.textBrowser, self.spinBoxBatch.value(), self.spinBoxSample.value(), self.sPlot) self.displayMessage("DisplayStatistics Ran Successfully") def runDataTests(self): self.model.runDataTests() self.displayMessage("DataTests Ran Successfully") def runImageInputTests(self): self.model.runImageInputTests() self.displayMessage("ImageInputTests Ran Successfully") def runKeepProbTests(self): self.model.runKeepProbTests() self.displayMessage("KeepProbTests Ran Successfully") def runLabelInputTests(self): self.model.runLabelInputTests() self.displayMessage("LabelInputTests Ran Successfully") def runNormalisationTests(self): self.model.runNormalisationTests() self.displayMessage("NormalisationTests Ran Successfully") def runOneHotEncodeTests(self): self.model.runOneHotEncodeTests() self.displayMessage("OneHotEncodeTests Ran Successfully") def runFullyConvLayerTests(self): self.model.runFullyConvLayerTests() self.displayMessage("FullyConvLayerTests Ran Successfully") def runConvMaxLayerTest(self): self.model.runConvMaxLayerTest() self.displayMessage("ConvMaxLayerTest Ran Successfully") def runOutputLayerTests(self): self.model.runOutputLayerTests() self.displayMessage("OutputLayerTests Ran Successfully") def runFlattenLayerTests(self): self.model.runFlattenLayerTests() self.displayMessage("FlattenLayerTests Ran Successfully") def runCNNNetworkTests(self): self.model.runCNNNetworkTests() self.displayMessage("CNNNetworkTests Ran Successfully") def runTrainingTests(self): self.model.runTrainingTests() self.displayMessage("TrainingTests Ran Successfully") def runPreProcessAndSave(self): self.model.runPreProcessAndSave() self.displayMessage("PreProcessAndSave Ran Successfully") def runShowStats(self): self.model.runShowStats() self.displayMessage("ShowStats Ran Successfully") def runTrainOnSingleBatch(self): self.model.runTrainOnSingleBatch() self.displayMessage("TrainOnSingleBatch Ran Successfully") def runFullyTrainModel(self): self.model.runFullyTrainModel(int(self.lineEditEpoch.text()), int(self.lineEditBatchSize.text()),float( self.lineEditKeepProb.text())) self.displayMessage("FullyTrainModel Ran Successfully") def runClassificationOnTestData(self): self.model.runClassificationOnTestData(self.cPlot) self.displayMessage("Classification on Test Data Completed Successfully") def refreshAll(self ): self.lineEditFolder.setText( self.model.get_cifar10_dataset_folder_path()) self.lineEditTar.setText(self.model.get_floyd_cifar10_location()) self.lineEditURL.setText(self.model.get_url()) def debugPrint( self, msg ): '''Print the message in the text edit at the bottom of the horizontal splitter. ''' self.splitter.setSizes([300, 300]) self.textBrowserInfo.append( msg ) # slot def returnPressedFolderSlot( self ): ''' Called when the user enters a string in the line edit and presses the ENTER key. ''' self.debugPrint( "RETURN key pressed in LineEditFolder widget" ) folderName = self.lineEditFolder.text() if self.model.isValidFolder( folderName ): self.model.setFolderName( self.lineEditFolder.text() ) self.debugPrint( "CIFAR Folder Identified" ) self.refreshAll() else: m = QtWidgets.QMessageBox() m.setText("Invalid folder!\n" + folderName ) m.setIcon(QtWidgets.QMessageBox.Warning) m.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel) m.setDefaultButton(QtWidgets.QMessageBox.Cancel) ret = m.exec_() self.lineEditFolder.setText( "" ) self.debugPrint( "Invalid folder specified: " + folderName ) def returnPressedTarSlot( self ): ''' Called when the user enters a string in the line edit and presses the ENTER key. ''' self.debugPrint( "RETURN key pressed in LineEditTar widget" ) tarName = self.lineEditTar.text() if self.model.isValid( tarName ): self.model.setTarName( self.lineEditTar.text() ) self.model.extractTar() self.debugPrint( "Tar Extracted" ) self.refreshAll() else: m = QtWidgets.QMessageBox() m.setText("Invalid Tar File!\n" + tarName ) m.setIcon(QtWidgets.QMessageBox.Warning) m.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel) m.setDefaultButton(QtWidgets.QMessageBox.Cancel) ret = m.exec_() self.lineEditTar.setText( "" ) self.debugPrint( "Invalid Tar file specified: " + tarName ) def returnPressedURLSlot( self ): ''' Called when the user enters a string in the line edit and presses the ENTER key. ''' self.debugPrint( "RETURN key pressed in LineEditURL widget" ) URLName = self.lineEditURL.text() if self.model.isValidURL( URLName ): self.model.URLName = self.lineEditURL.text() self.model.downloadTar(self.textBrowserInfo, self); self.debugPrint( "Tar Downloaded" ) self.refreshAll() else: m = QtWidgets.QMessageBox() m.setText("Invalid URL!\n" + URLName ) m.setIcon(QtWidgets.QMessageBox.Warning) m.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel) m.setDefaultButton(QtWidgets.QMessageBox.Cancel) ret = m.exec_() self.lineEditURL.setText( "" ) self.debugPrint( "Invalid URL specified: " + URLName ) if URLName: self.debugPrint( "setting URL: " + URLName ) self.model.URLName = URLName self.refreshAll() # slot def writeDocSlot( self ): ''' Called when the user presses the Write-Doc button. ''' self.model.writeDoc( self.textEdit.toPlainText() ) self.debugPrint( "Write-Doc button pressed" ) # slot def browseFolderSlot( self ): ''' Called when the user presses the Browse button ''' options = QtWidgets.QFileDialog.Options() options |= QtWidgets.QFileDialog.DontUseNativeDialog folderName = QtWidgets.QFileDialog.getExistingDirectory( None, "QFileDialog.getOpenFileName()", "", options=options) if folderName: self.debugPrint( "setting CIFAR Data folder name: " + folderName ) self.model.setFolderName( folderName ) self.refreshAll() def browseTarSlot( self ): ''' Called when the user presses the Browse button ''' options = QtWidgets.QFileDialog.Options() options |= QtWidgets.QFileDialog.DontUseNativeDialog tarName, _ = QtWidgets.QFileDialog.getOpenFileName( None, "QFileDialog.getOpenFileName()", "", "Tar Files (*.tar.gz)", options=options) if tarName: self.debugPrint( "setting tar file name: " + tarName ) self.model.setTarName( tarName ) self.model.extractTar(self.textBrowserInfo) self.refreshAll() # self.model.displayStats( self.debugTextBrowser, 1, 5 ) def displayMessage(self, text): m = QtWidgets.QMessageBox() m.setText(text) m.setIcon(QtWidgets.QMessageBox.Information) m.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel) m.setDefaultButton(QtWidgets.QMessageBox.Cancel) ret = m.exec_() def main(): """ This is the MAIN ENTRY POINT of our application. The code at the end of the mainwindow.py script will not be executed, since this script is now our main program. We have simply copied the code from mainwindow.py here since it was automatically generated by '''pyuic5'''. """ app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = MainWindowUIClass() ui.setupUi(MainWindow) ui.refreshAll() MainWindow.show() sys.exit(app.exec_()) main()
# coding: utf-8 import sys sys.path.insert(0, '/srv/http/LINE-bot') from chat import app as application
# !pip -q install nltk requests # # TODO please clean me up, OO me. This is raw notebook sludge. # import requests import os import random import numpy as np import pandas as pd import nltk import json import re from tqdm import tqdm import sys from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from nltk.tokenize import sent_tokenize, word_tokenize from google.cloud import bigquery from sklearn.decomposition import LatentDirichletAllocation from collections import Counter from sklearn import metrics import keras from keras.layers import Input, Dense from keras.models import Model from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') if len(sys.argv) > 1: a = sys.argv a.pop(0) # skip command name city_name = ' '.join(a) else: city_name = 'San Francisco' print("Creating news cluster for "+city_name) # hyperparameters project_id = 'octo-news' no_topics = 32 # number of sentence topics of LDA C = 16 # Number of sentence clusters of [LDA,Bert] no_features = 256 # Bert BERT_HTTP = 'http://bert.scott.ai/encode' BERT_TENSOR_SIZE = 768 BERT_TENSOR_DISTANCE = "angular" BERT_TENSOR_DB = "bert.db" BERT_API_COUNT = 0 #--------------- print("Downloading data for "+city_name) # get our sentence tokenizer nltk.download('punkt') client = bigquery.Client(project=project_id) query = """ SELECT distinct(url_orig) as url, keyimage as image, domain_root as domain, page_title as title, score, date, z, page_ftxt as text FROM ( SELECT *, ROW_NUMBER() OVER (PARTITION BY keyimage ORDER BY date desc) rn FROM `octo-news.gdelt_sa.daily_reputable_refresh` where length(keyimage) > 0 and lower(city) like '%$CITY%' limit 50000 ) t WHERE rn = 1 """ q = client.query(query.replace('$CITY',city_name.lower())) for row in q: print("url={} title={}".format(row['url'], row['title'])) break data_df = q.to_dataframe() # Download all #setup some constants to configure the Bert service data = [] db = {'doc_id': [], 'url': [], 'z': [], 'city': [], 'title': [], 'image_url': [], 'author': [], 'domain': [], 'date': [], 'vindex': []} vecdb = {'sid': [], 'doc_id': [], 'text': [], 'tensor': []} # Simple BERT utilities def bertify_array(text_array): "Turn an array of text, text_array, into an array of tensors. Sentences are best." global BERT_API_COUNT # eid is our encoding id, which we really don't use as # bert is synchronous over http. r = requests.post(BERT_HTTP, json={"id": BERT_API_COUNT, "texts": text_array, "is_tokenized": False}) v = r.json() BERT_API_COUNT += 1 try: if (v['status'] == 200): return np.array(v['result']) else: print("Unexpected Bert status: ",v['status']) except: print("Unexpected Bert error: ",sys.exc_info()[0]) return None def bertify(text): "Turn text into a tensor, sentences are best." ans = bertify_array([text]) if ans is not None: ans = ans[0] return ans # make sure our images are clean def valid_image(url): "Return True if url is a valid image" r = requests.get(url) if r.status_code == 200: kind = r.headers.get('Content-Type','') return kind.lower().startswith('image') return False def insert_db_entry(entry): global city_name n = len(db['doc_id']) info = {'doc_id': n, 'url': entry['url'], 'title': entry['title'], 'image_url': entry['image'], 'city': city_name, 'author':'', 'z': entry['z'], 'domain': entry['domain'], 'date': entry['date'], 'vindex': len(vecdb)} for key in info: db[key].append(info[key]) return n def insert_vecdb_entries(doc_id, sents, vecs): for i in range(0,len(vecs)): # get our sentence id sid = len(vecdb['sid']) # record our sentence and point to the db entry # that tells us more about the article from which # it came vecdb['sid'].append(sid) vecdb['doc_id'].append(doc_id) vecdb['text'].append(sents[i]) vecdb['tensor'].append(vecs[i]) def process_entry(entry): if (not valid_image(entry['image'])): return None sents = first_clean_sentences(entry['text']) clean_title_sent = sentences(entry['title']) if (len(clean_title_sent) > 0): # we've seen blank titles, and entire docs as titles clean_title = clean_title_sent[0] if (len(clean_title) > 0): sents.insert(0,clean_title) vecs = bertify_array(sents) if vecs is None: return None n = insert_db_entry(entry) insert_vecdb_entries(n, sents, vecs) return sents def first_clean_sentences(text, k=50): sent = sentences(text) valid = [] # k clean sentences while len(sent) > 0 and k > 0: s = sent.pop(0) if s.find('EOP') < 0 and len(s) > 10: valid.append(s) k = k-1 return valid def sentences(text): text = clean_text(text) return sent_tokenize(text) def clean_text(text): r1 = re.compile(r' (\w+)\.(\w+) ') r2 = re.compile(r' - ') text = text.replace("\n\n"," EOP ") #text = text.replace(".",". ") text = text.replace("\t"," ") text = text.replace("\n"," ") text = remove_html_tags(text) text = re.sub(r1,r' \1. \2 ',text,99) text = re.sub(r2,'. ',text,99) return text def remove_html_tags(text): """Remove html tags from a string""" clean = re.compile('<.*?>') return re.sub(clean, '', text) def showv(v): n = vecdb['doc_id'][v] sentence = vecdb['text'][v] print(" "+db['domain'][n]+": "+db['title'][n]) print(" \""+sentence+"\"") print(" "+db['url'][n]) #--------------- # load sample data print("Loading data into memory...\n") data=[] from urllib.parse import urlparse for index, row in data_df.iterrows(): who = urlparse(row['url']).netloc if index < 10: print(who,':',row['title']) data.append(row) print("Loaded",len(data),"items.") # process the sample data of ~1000 articles (5 min) def bert_do(n=1000): count = 0 for i in tqdm(range(0,min(n*2,len(data)))): #print(data[i]['page_title']) if process_entry(data[i]) is not None: count += 1 if (count == n): break #--------------- print("Pulling the most recent 2000 articles with valid images") bert_do(2000) # From github.com/scottspace/contextual_topic_identification class Autoencoder: """ Simple autoencoder for learning latent space representation architecture simplified for only one hidden layer """ def __init__(self, latent_dim=32, activation='relu', epochs=200, batch_size=128): self.latent_dim = latent_dim self.activation = activation self.epochs = epochs self.batch_size = batch_size self.autoencoder = None self.encoder = None self.decoder = None self.his = None def _compile(self, input_dim): """ compile the computational graph """ input_vec = Input(shape=(input_dim,)) encoded = Dense(self.latent_dim, activation=self.activation)(input_vec) decoded = Dense(input_dim, activation=self.activation)(encoded) self.autoencoder = Model(input_vec, decoded) self.encoder = Model(input_vec, encoded) encoded_input = Input(shape=(self.latent_dim,)) decoder_layer = self.autoencoder.layers[-1] self.decoder = Model(encoded_input, self.autoencoder.layers[-1](encoded_input)) self.autoencoder.compile(optimizer='adam', loss=keras.losses.mean_squared_error) def fit(self, X, verbose=0): if not self.autoencoder: self._compile(X.shape[1]) X_train, X_test = train_test_split(X) self.his = self.autoencoder.fit(X_train, X_train, epochs=200, batch_size=128, shuffle=True, validation_data=(X_test, X_test), verbose=verbose) # create dataframes for testing #--------------- print("Setting up for analysis with Pandas") doc_df = pd.DataFrame(data=db) sent_df = pd.DataFrame(data=vecdb) #--------------- print("Creating document corpus") nDocs = np.max(doc_df['doc_id']) DocText = [" "]*nDocs for i in range(nDocs): text_i = "\n ".join(sent_df[sent_df.doc_id == i]['text'].values.flatten()) DocText[i] = text_i #--------------- print("Creating sentence corpus") nSents = np.max(sent_df['sid']) SentText = [" "]*nSents for i in range(nSents): text_i = "\n ".join(sent_df[sent_df.sid == i]['text'].values.flatten()) SentText[i] = text_i ## Create word frequency ## We want words that are in at least 3 articles, ## but no more than 25% of the corpus. ## #--------------- print("Counting word frequencies") corpus = DocText #corpus = SentText vectorizer = CountVectorizer(min_df=3, max_df=0.25) DocX = vectorizer.fit_transform(corpus) print("Found",len(vectorizer.get_feature_names()),"interesting document words") #--------------- print("Calculating TF/IDF values") tfv = TfidfVectorizer(min_df=3, max_df=0.25, stop_words='english') doc_tf = tfv.fit_transform(corpus) def describe_doc(d_i): df = doc_df[doc_df.doc_id == d_i] print(df['title'].values[0]) print(df['url'].values[0]) top = np.argsort(doc_tf[d_i].toarray()[0])[::-1][0:8] names = [[key for key, value in tfv.vocabulary_.items() if value == t_i][0] for t_i in top] print(' '+' '.join(names)) ##LDA ## Compute our distribution of topics, as well as the distribution ## of words for each of those topics. ## def lda_v(lda, text, features): return np.array([word_tokenize(text).count(f) for f in features]) def display_topics(model, feature_names, no_top_words): print("\nDisplaying LDA Topics") for topic_idx, topic in enumerate(model.components_): chosen = topic.argsort()[:-no_top_words - 1:-1] print("Topic %d:" % (topic_idx), \ " ".join([feature_names[i] \ for i in chosen])) # LDA can only use raw term counts for LDA because it is a probabilistic graphical model tf = DocX tf_feature_names = vectorizer.get_feature_names() #--------------- print("Calculating Dirichlet distribution of docs (LDA)") # Run LDA lda = LatentDirichletAllocation(n_components=no_topics, max_iter=5, \ learning_method='online', \ learning_offset=50., random_state=42) tf_lda = lda.fit_transform(tf) #no_top_words = 10 #display_topics(lda, tf_feature_names, 8) #--------------- print("Documenting topics as weighted word distributions") topics = [] for topic_idx, topic in enumerate(lda.components_): chosen = topic.argsort()[:-7:-1] topics.append(" ".join([tf_feature_names[i] \ for i in chosen])) for idx, t in enumerate(topics): print(idx,t) def lda2vec(lda, text, feature_names): vv = lda_v(lda, text, tf_feature_names) vv = vv.reshape(1,-1) return lda.transform(vv) ## Create word frequency #--------------- print("Calculating interesting LDA word freqency in each sentence") txt = ["This is my sample text"] vv = CountVectorizer(vocabulary=tf_feature_names) SentX = vv.fit_transform(SentText) SentVec = lda.transform(SentX) #--------------- print("Analyzing sentences for intersting-ness") nDocs = np.max(doc_df['doc_id'].values) nSents = SentX.shape[0] SentStats = np.zeros((nSents,5)) for d_i in tqdm(range(nDocs)): df_i = sent_df[sent_df['doc_id'] == d_i] for idx, s_i in enumerate(df_i['sid'].values): toks = word_tokenize(df_i[df_i['sid'] == s_i]['text'].values[0]) SentStats[s_i,0] = idx # sentence id sid SentStats[s_i,1] = len(toks) #all words SentStats[s_i,2] = np.sum(SentX[s_i,:]) #meaningful words SentStats[s_i,3] = SentStats[s_i,2]/(SentStats[s_i,1]+1) #relevance info_m = np.mean(SentStats[:,3]) info_std = np.std(SentStats[:,3]) hi_locs = SentStats[np.argwhere(SentStats[:,3] > info_m+2*info_std),0].flatten() lo_locs = SentStats[np.argwhere(SentStats[:,3] < info_m-1*info_std),0].flatten() ok_locs = SentStats[np.argwhere(SentStats[:,3] > info_m+1*info_std),0].flatten() #--------------- print("Choosing the most interesting sentences in each doc") # Let's indicate which sentences we want to keep - relevant sentences SentStats[:,4] = 0 SentStats[np.argwhere(SentStats[:,3] > info_m+0*info_std),4] = 1 # OK, let's compute doc centroids #--------------- print("Calculating the centroid for a doc's interesting sentences") tensor_size = sent_df['tensor'].values[0].shape[0] doc_centroids = np.zeros((tf_lda.shape[0], tensor_size)) def centroid(arr): length, dim = arr.shape return np.array([np.sum(arr[:, i])/length for i in range(dim)]) dead_docs = [] for d_i in tqdm(range(nDocs)): vecs = [] df_i = sent_df[sent_df.doc_id == d_i] for s_i in df_i['sid'].values: # only extract relevant sentences, longer than 3 words if (SentStats[s_i,4] > 0) and (SentStats[s_i,1] > 3): vecs.append(df_i['tensor'].values[0]) if (len(vecs) < 1): # use null for dead docs dead_docs.append(d_i) vecs.append(np.zeros(tensor_size)) vecs = np.array(vecs).reshape((len(vecs),tensor_size)) doc_centroids[d_i] = centroid(vecs) #concatenate both vectors, first the gamma scaled LDA #encoding, then the BERT centroid for topical sentences #--------------- print("Combining LDA and Bert centroid for each document") gamma = 20 doc_both = np.c_[(gamma*tf_lda, doc_centroids)] #--------------- print("Distilling the combined vector to 32 key dimensions") # create an autoencoder to distill the document vector information # to 3 dimensions ae = Autoencoder(32) ae.fit(doc_both) # now predict new dense embeddings for every document doc_dense = ae.encoder.predict(doc_both) #--------------- print("Clustering the dense, 32-dim information vectors for every doc") # cluster our documents using k-means from sklearn.cluster import KMeans doc_kmeans = KMeans(n_clusters=C, random_state=42).fit_transform(doc_dense) doc_clusters = np.argmin(doc_kmeans,axis=1) doc_centers = np.argmin(doc_kmeans,axis=0) # well, how well did we do? #--------------- print("Evaluating our technique") ss_score = metrics.silhouette_score(doc_dense, doc_clusters, metric='euclidean') ch_score = metrics.calinski_harabasz_score(doc_dense, doc_clusters) cb_score = metrics.davies_bouldin_score(doc_dense, doc_clusters) # [-1,1] higher is better, measure of separation using euc distance print("Silhouette score:", ss_score) #higher is better, tighter variance print("Calinski_harabasz:", ch_score) #lower is better, for bigger, more separate clusters print("Davies-Bouldin:",cb_score) #--------------- print("Creating a breadth-first walk of all clusters, starting at centers") # create a breadth-first-search feed for topics doc_bfs = np.zeros(doc_kmeans.shape) doc_bfs.fill(-1) topic_counts = [] for c_i in range(doc_bfs.shape[1]): ci_docs = np.argwhere(doc_clusters == c_i) print("Cluster",c_i,"has",len(ci_docs),'docs') topic_counts.append(len(ci_docs)) ci_doc_distances = doc_kmeans[ci_docs,c_i].flatten() for idx,ci_doc in enumerate(np.argsort(ci_doc_distances)): doc_bfs[idx,c_i] = ci_docs[ci_doc] feed = doc_bfs.flatten() feed = np.array([np.int(f) for f in feed[feed >= 0]]) ## Summarize cluster terms by calculating the mean ## of all docs in a cluster. #--------------- print("Summarizing a cluster as the average of all TF/IDF values") cluster_terms = [] for c_i in range(doc_bfs.shape[1]): ci_docs = np.argwhere(doc_clusters == c_i).flatten() avg_tfidf = np.array(np.mean(doc_tf[ci_docs],axis=0)) best_terms = np.array(np.argsort(avg_tfidf[0,:]))[::-1][0:4] names = [[key for key, value in tfv.vocabulary_.items() if value == t_i][0] for t_i in best_terms] cluster_terms.append(' '.join(names)) print(cluster_terms) ## Summarize cluster terms by calculating the mean ## of k docs closest to the centroid of each cluster. #--------------- print("Summarizing a cluster as the average of 10 best documents and their TF/IDF values") kcluster_terms = [] for c_i in range(doc_bfs.shape[1]): ci_docs = np.argwhere(doc_clusters == c_i).flatten() ci_doc_distances = doc_kmeans[ci_docs,c_i].flatten() closest_k_docs = ci_docs[np.argsort(ci_doc_distances)[0:10]] avg_tfidf = np.array(np.mean(doc_tf[closest_k_docs],axis=0)) best_terms = np.array(np.argsort(avg_tfidf[0,:]))[::-1][0:4] names = [[key for key, value in tfv.vocabulary_.items() if value == t_i][0] for t_i in best_terms] kcluster_terms.append(' '.join(names)) print(kcluster_terms) #--------------- print("Uploading our feed for "+city_name+" as a breadth-first walk of clusters") # Create a nice summary dataframe tp = {'date': [], 'index': [], 'doc_id': [], 'city': [], 'z': [], 'topic': [], 'who': [], 'url': [], 'title': [], 'image': [], 'distance': [], 'snippet': []} def create_feed(db, feed_ids, cluster_info): # we expect a list of doc_ids, one for # each topic, from topic 0 to topic C for idx in tqdm(range(len(feed_ids))): d_i = int(feed_ids[idx]) #print("Hi",d_i,"there") df_i = doc_df[doc_df.doc_id == d_i] df_j = sent_df[sent_df.doc_id == d_i] topic = cluster_info[d_i] sent_nums = df_j['sid'].values sent_relevance = SentStats[sent_nums,3] best_sentence = np.argmax(sent_relevance) snippet = '' if best_sentence > 0: snippet = df_j[df_j.sid == sent_nums[np.argmax(sent_relevance)]]['text'].values[0] db['date'].append(df_i['date'].values[0]) db['index'].append(idx) db['doc_id'].append(d_i) db['topic'].append(topic) db['who'].append(df_i['domain'].values[0]) db['z'].append(df_i['z'].values[0]) db['url'].append(df_i['url'].values[0]) db['city'].append(df_i['city'].values[0]) db['title'].append(df_i['title'].values[0]) db['image'].append(df_i['image_url'].values[0]) db['distance'].append(doc_kmeans[d_i,topic]) # distance from this topic db['snippet'].append(snippet) create_feed(tp,feed,doc_clusters) #save our feed from google.cloud import bigquery client = bigquery.Client(project=project_id) client.create_dataset('gdelt_sa',exists_ok=True) table_id = project_id+'.gdelt_sa.daily_feed' # clean slate client.delete_table(table_id, not_found_ok=True) # Since string columns use the "object" dtype, pass in a (partial) schema # to ensure the correct BigQuery data type. job_config = bigquery.LoadJobConfig(schema=[ bigquery.SchemaField("index", "INT64"), bigquery.SchemaField("date", "INT64"), bigquery.SchemaField("doc_id", "INT64"), bigquery.SchemaField("city","STRING"), bigquery.SchemaField("z", "FLOAT64"), bigquery.SchemaField("topic", "INT64"), bigquery.SchemaField("who", "STRING"), bigquery.SchemaField("url", "STRING"), bigquery.SchemaField("title", "STRING"), bigquery.SchemaField("image", "STRING"), bigquery.SchemaField("distance", "FLOAT64"), bigquery.SchemaField("snippet", "STRING") ]) feed_df = pd.DataFrame(data=tp) job = client.load_table_from_dataframe( feed_df, table_id, job_config=job_config ) # Wait for the load job to complete. job.result() #--------------- print("Uploading the cluster descriptions to BigQuery for "+city_name) #save our topics as 'themes' that are used to build high-order topics for users table_id = project_id+'.gdelt_sa.themes' # clean slate client.delete_table(table_id, not_found_ok=True) # Since string columns use the "object" dtype, pass in a (partial) schema # to ensure the correct BigQuery data type. top_dict = {'index': [], 'name': [], 'city': []} for idx,name in enumerate(kcluster_terms): top_dict['index'].append(idx) top_dict['name'].append(name) top_dict['city'].append(city_name) topic_df = pd.DataFrame(data=top_dict) job_config = bigquery.LoadJobConfig(schema=[ bigquery.SchemaField("index", "INT64"), bigquery.SchemaField("name", "STRING"), bigquery.SchemaField("city", "STRING") ]) job = client.load_table_from_dataframe( topic_df, table_id, job_config=job_config ) # Wait for the load job to complete. job.result() #--------------- print("Finis!")
from django.http import Http404 from django.shortcuts import render, get_object_or_404 from .models import Album # Create your views here. def index(request): all_albums = Album.objects.all() return render(request, 'music/index.html', {'all_albums': all_albums,}) def detail(request, album_id): album = get_object_or_404(Album, pk=album_id) return render(request, 'music/detail.html', {'album':album})
# create function for determining prime number def is_prime(number): for i in range(2,number): if (number % i) == 0: return False return True print(is_prime(3)) print(is_prime(25)) #list empty_list = [] my_list1 = [1,2,3] print(my_list1) my_list1.append(4) print(my_list1) empty_list.append(10) print(empty_list) def primes(limit): result = [] for i in range(1,limit + 1): if is_prime(i) is True: #sau if is_prime(i) limit.append(i) return result print(primes(10))
from django import forms from django.contrib.auth.forms import UserCreationForm from .models import StoreUser class StoreUserCreationForm(UserCreationForm): def __init__(self, *args, **kwargs): super(StoreUserCreationForm, self).__init__(*args, **kwargs) for visible in self.visible_fields(): visible.field.widget.attrs['class'] = 'input' class Meta(UserCreationForm): model = StoreUser fields = ('username', 'email', 'password1', 'password2', 'age') password1 = forms.CharField(widget=forms.PasswordInput()) password2 = forms.CharField(widget=forms.PasswordInput()) class LoginForm(forms.Form): def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) for visible in self.visible_fields(): visible.field.widget.attrs['class'] = 'input' username = forms.CharField(label='Username') password = forms.CharField(label='Password', widget=forms.PasswordInput)
#!/usr/bin/python # -*- coding:utf8 -*- # @Author : MrTuo # @Time : 2017/10/4 下午9:04 # @File : Save2JSONbyBeautifulSoup.py # @Software : PyCharm # 使用BeautifulSoup解析网页,存储为json格式,解析盗墓笔记首页'http://seputu.com/'为例 import json import requests import sys from bs4 import BeautifulSoup reload(sys) sys.setdefaultencoding('utf-8') user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; MAC OS)' headers = {'User-Agent': user_agent} r = requests.get('http://seputu.com/', headers=headers) # print r.text soup = BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')# html.parsre content=[] for mulu in soup.find_all(class_="mulu"): h2 = mulu.find('h2') #print h2 if h2!=None: h2_title = h2.string #获取标题 # print h2_title list=[] for a in mulu.find(class_='box').find_all('a'): href = a.get('href') box_title = a.get('title') # print href,box_title # 获取到了标题和链接,转存为json list.append({'href':href,'box_title':box_title}) content.append({'title':h2_title,'content':list}) with open('qiye.json','wb') as fp: json.dump(content,fp=fp,ensure_ascii=False,indent=4)
# -*- coding: utf-8 -*- from reportlab.lib.enums import TA_JUSTIFY from reportlab.graphics.barcode import code39, code128, code93 from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.cidfonts import UnicodeCIDFont from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib import colors from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer,Image,Table,TableStyle, PageBreak from reportlab.lib.units import mm class DeliveryList: def __init__(self): pdfmetrics.registerFont(UnicodeCIDFont('STSong-Light')) stylesheet = getSampleStyleSheet() self._normalStyle = stylesheet['Normal'] stylesheet.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY)); self._justifyStyle = stylesheet['Justify'] self._story = [] def appendParagraph(self, element): self._story.append(Paragraph(element, self._normalStyle)) def createDoc(self, lading_data, owner_name, file_name, isFinace=False): # 标题 title = ''' <para autoLeading="off" fontSize=15 align=center> <b> <font face="STSong-Light">%s送货清单</font> </b> <br /><br /> </para> ''' % (lading_data['seller_name']) self.appendParagraph(title) barcode_val = file_name barcode = code128.Code128(value=barcode_val, barHeight=8 * mm) self._story.append(barcode) barcode_context = ''' <para autoLeading="off" fontSize=9 face="STSong-Light" align=left> 送货单号: %s &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; 日期: %s <br /> </para> ''' % (barcode_val, lading_data['effective_date']) self.appendParagraph(barcode_context) table_data = [ ['收货单位', lading_data['buyer_name'], '', '', '', '', '订单号', lading_data['order_id'], ''], ['起运地', lading_data['depot_address'], '', '', '', '', '目的地', lading_data['delivery_address'], ''], ['车牌号', lading_data['plate_number'], '', '', '', '', '司机信息', '', ''], ['提货方式', lading_data['way_of_receiving'], '', '', '', '', '收款方式', lading_data['way_of_paid'], ''], ['品牌', '品类', '等级', '厚度', '规格', '包装', '片数/包', '包数 ', '面积(㎡)', '备注'], # ['南玻', '白玻', '一等品', '4', '3660 * 1900', '裸包', '45', '6 ', '', '1877.58', '18.4' if isFinace else '***', '34547.47' if isFinace else '***'], # ['南玻', '其他', '一等品', '4', '3660 * 1900', '裸包', '45', '6 ', '', '1877.58', '18.4' if isFinace else '***', '34547.47' if isFinace else '***'] ] for item in lading_data['lading_item_list']: str_spec = str(int(item['width'])) + ' * ' + str(int(item['height'])) table_data.append([ item['brand'], item['category'], item['level'], str(float(item['thickness'])), str_spec, item['packing'], str(item['chip']), str(item['packing_number']), str(item['area']), '' ]) table_style = [ ('FONTNAME',(0,0),(-1,-1),'STSong-Light'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('SPAN',(1,0),(5,0)), ('SPAN',(7,0),(9,0)), ('SPAN',(1,1),(5,1)), ('SPAN',(7,1),(9,1)), ('SPAN',(1,2),(5,2)), ('SPAN',(7,2),(9,2)), ('SPAN',(1,3),(5,3)), ('SPAN',(7,3),(9,3)), ('SPAN',(0,-2),(9,-2)), ('SPAN',(0,-1),(4,-1)), ('SPAN',(5,-1),(9,-1)), ] table_data += [ ['备注: 以上回执单用于证明发货、提货以及到货签收事实,便于供需双方对账之用。请收货方签字盖章后返回一份签字件'], ['送货人(签字)', '', '', '', '', '收货人(签字)', '', '', '', ''], ] order_item_table = Table(table_data, colWidths=[50, 45, 45, 35, 65, 45, 50, 35, 45, 145]) order_item_table.setStyle(TableStyle(table_style)) self._story.append(order_item_table) self.appendParagraph('<para autoLeading="off"><br /><br /><br /></para>') def build(self, lading_data, path, lading_id): file_name = 'DB' + lading_id file_type = 'pdf' path_all = path + file_name + '.' + file_type doc = SimpleDocTemplate(path_all, topMargin=0, leftMargin=10, rightMargin=10, bottomMargin=0) self.createDoc(lading_data, '财务', file_name) self.createDoc(lading_data, '运营', file_name) # self._story.append(PageBreak()) self.createDoc(lading_data, '司机', file_name) doc.build(self._story) return {'file_name': file_name, 'file_path': path, 'file_type': file_type, 'path_all': path_all}
# python自己实现 # import cv2 as cv # from scipy import signal # import numpy as np # import math # # # # n阶的二项展开式系数,构建一维高斯平滑矩阵 # def getsmooth(n): # smooth = np.zeros([1, n], np.float32) # for i in range(n): # smooth[0][i] = math.factorial(n - 1) / (math.factorial(i) * math.factorial(n - i - 1)) # return smooth # # # def getdiff(n): # diff = np.zeros([1, n], np.float32) # smooth = getsmooth(n - 1) # for i in range(n): # if i == 0: # diff[0][i] = smooth[0][i] # 恒等于1 # elif i == n - 1: # diff[0][i] = -smooth[0][i - 1] # 恒等于-1 # else: # diff[0][i] = smooth[0][i] - smooth[0][i - 1] # return diff # # # def sobel(img, size, boundary="symm", fillvalue=0): # smooth = getsmooth(size) # diff = getdiff(size) # print(smooth, diff) # print(np.dot(smooth.transpose(), diff)) # print(np.dot(diff.transpose(), smooth)) # rows, cols = img.shape[:2] # # 水平方向的sobel算子:先进行垂直方向的高斯平滑,再进行水平方向的差分 # gaussian_y = signal.convolve2d(img, smooth.transpose(), mode="same", boundary=boundary, fillvalue=fillvalue) # sobel_x = signal.convolve2d(gaussian_y, diff, mode="same", boundary=boundary, fillvalue=fillvalue) # # # 垂直方向的sobel算子:先进行水平方向的高斯平滑,再进行垂直方向的差分 # gaussian_x = signal.convolve2d(img, smooth, mode="same", boundary=boundary, fillvalue=fillvalue) # sobel_y = signal.convolve2d(gaussian_x, diff.transpose(), mode="same", boundary=boundary, fillvalue=fillvalue) # # return (sobel_x, sobel_y) # # # if __name__ == "__main__": # img = cv.imread(r"./Pics/test.jpg", 0) # sobel_x, sobel_y = sobel(img, size=5) # sobel_x = np.abs(sobel_x) # sobel_edge_x = sobel_x.copy() # sobel_edge_x = sobel_edge_x / np.max(sobel_edge_x) # sobel_edge_x = sobel_edge_x * 255 # 进行归一化处理 # sobel_edge_x = sobel_edge_x.astype(np.uint8) # # sobel_y = np.abs(sobel_y) # sobel_edge_y = sobel_y.copy() # sobel_edge_y = sobel_edge_y / np.max(sobel_edge_y) # sobel_edge_y = sobel_edge_y * 255 # sobel_edge_y = sobel_edge_y.astype(np.uint8) # # sobel_edge = np.sqrt(np.power(sobel_x, 2.0), np.power(sobel_y, 2.0)) # sobel_edge = sobel_edge / np.max(sobel_edge) # sobel_edge = sobel_edge * 255 # sobel_edge = sobel_edge.astype(np.uint8) # # cv.namedWindow("Original", 0) # cv.resizeWindow("Original", 360, 480) # # cv.namedWindow("sobel_edge_x", 0) # cv.resizeWindow("sobel_edge_x", 360, 480) # # cv.namedWindow("sobel_edge_y ", 0) # cv.resizeWindow("sobel_edge_y ", 360, 480) # # cv.namedWindow("sobel_edge", 0) # cv.resizeWindow("sobel_edge", 360, 480) # # cv.imshow("Original", img) # cv.imshow("sobel_edge_x", sobel_edge_x) # cv.imshow("sobel_edge_y ", sobel_edge_y) # cv.imshow("sobel_edge", sobel_edge) # cv.waitKey(0) # cv.destroyAllWindows() # opencv自带sobel import cv2 as cv import numpy as np img = cv.imread(r"./Pics/test.jpg") #注意此处的ddepth不要设为-1,要设为cv.CV_32F或cv.CV_64F,否则会丢失太多信息 sobel_edge_x = cv.Sobel(img,ddepth=cv.CV_32F,dx=1,dy=0,ksize=5) sobel_edge_x = np.abs(sobel_edge_x) sobel_edge_x = sobel_edge_x/np.max(sobel_edge_x) sobel_edge_x = sobel_edge_x*255 #进行归一化处理 sobel_edge_x = sobel_edge_x.astype(np.uint8) sobel_edge_y = cv.Sobel(img,ddepth=cv.CV_32F,dx=0,dy=1,ksize=5) sobel_edge_y = np.abs(sobel_edge_y) sobel_edge_y = sobel_edge_y/np.max(sobel_edge_y) sobel_edge_y = sobel_edge_y*255 sobel_edge_y = sobel_edge_y.astype(np.uint8) sobel_edge1 = cv.addWeighted(sobel_edge_x,0.5,sobel_edge_y,0.5,0) sobel_edge = cv.Sobel(img,ddepth=cv.CV_32F,dx=1,dy=1,ksize=5) sobel_edge = np.abs(sobel_edge) sobel_edge = sobel_edge/np.max(sobel_edge) sobel_edge = sobel_edge*255 sobel_edge = sobel_edge.astype(np.uint8) cv.namedWindow("Original", 0) cv.resizeWindow("Original", 360, 480) cv.namedWindow("sobel_edge_x", 0) cv.resizeWindow("sobel_edge_x", 360, 480) cv.namedWindow("sobel_edge_y ", 0) cv.resizeWindow("sobel_edge_y ", 360, 480) cv.namedWindow("sobel_edge", 0) cv.resizeWindow("sobel_edge", 360, 480) cv.namedWindow("sobel_edge1", 0) cv.resizeWindow("sobel_edge1", 360, 480) cv.imshow("Original",img) cv.imshow("sobel_edge_x",sobel_edge_x) cv.imshow("sobel_edge_y ",sobel_edge_y ) cv.imshow("sobel_edge",sobel_edge) cv.imshow("sobel_edge1",sobel_edge1) cv.waitKey(0) cv.destroyAllWindows()
# coding: utf-8 __author__ = "sunxr" __version__ = "V1.0" class Workbench: """友工程后台外框架元素定位""" SELECT = ("xpath", ".//*[@id='username']/span[3]") # 下拉按钮 USERNAME = ("xpath", ".//*[@id='username']/span[2]") # 当前登录用户名 LOGOUT = ("xpath", ".//*[@id='moreMenu']/li[5]/a") # 注销 APPCENTER = ("xpath", "html/body/div[1]/div/ul/li[1]/a") # 应用中心 PROJECT = ("xpath", "html/body/div[1]/div/ul/li[2]/a") # 项目档案 BASICARCHIVES = ("xpath", "html/body/div[1]/div/ul/li[3]/a") # 基础档案 PRODUCE = ("xpath", "html/body/div[1]/div/ul/li[3]/div/ul/li[1]/a") # 工序 WORKQUALITY = ("xpath", "html/body/div[1]/div/ul/li[3]/div/ul/li[2]/a") # 质量标准 PROBLEMREASON = ("xpath", "html/body/div[1]/div/ul/li[3]/div/ul/li[3]/a") # 问题原因 CONTACTTYPE = ("xpath", "html/body/div[1]/div/ul/li[3]/div/ul/li[4]/a") # 联系类型 IFRAME = ("xpath", "/html/body/div[2]/div[1]/div/div/iframe") # 内嵌网页
from csv import DictWriter import cPickle as pickle pfields = ['id', 'title', 'nickname', 'fname', 'mname', 'lname', 'suffix'] pgmfields = ['person_id', 'program'] divfields = ['person_id', 'division'] pfile = open('people.csv', 'w') pwriter = DictWriter(pfile, fieldnames=pfields, extrasaction='ignore') pgmfile = open('people-in-programs.csv', 'w') pgmwriter = DictWriter(pgmfile, fieldnames=pgmfields, extrasaction='ignore') divfile = open('people-in-divisions.csv', 'w') divwriter = DictWriter(divfile, fieldnames=divfields, extrasaction='ignore') writers = [pwriter, pgmwriter, divwriter] for writer in writers: writer.writeheader() def gen_person(people): for person in people: person.__dict__['person_id'] = person.id yield person.__dict__ def people_to_csv(people): for person in gen_person(people): pwriter.writerow(person) divwriter.writerow(person) for pgm in person['programs']: pgmwriter.writerow({'person_id': person['id'], 'program': pgm}) if __name__ == "__main__": with open('people.pickle', 'r') as f: people = pickle.load(f) people_to_csv(people)
# !/usr/bin/env python # -*- coding:utf-8 -*- __author__ = 'bit4' __github__ = 'https://github.com/bit4woo' import httplib import re import time import requests #https://developer.github.com/v3/search/#search-repositories class search_github: def __init__(self, word, limit, useragent, proxy=None): self.engine_name = "Github" self.word = word.replace(' ', '%20') self.results = "" self.totalresults = "" self.server = "github.com" self.limit = int(limit)/10 self.counter = 1 #页码 self.headers = {"User-Agent":useragent} self.proxies = proxy self.github_user = "" self.github_password ="" self.search_type = "" self.session = requests.session() def login(self): if hasattr(self,'github_user') and self.github_user !="": pass else: self.github_user = raw_input("Please Input Github User:") if hasattr(self,'github_password') and self.github_password != "": pass else: self.github_password = raw_input("Please Input Github Password:") self.session.get() def search_config(self): #keyword = raw_input("search what:") search_type =raw_input("Please Chose Search Type:" \ "1. Repositories" \ "2. Code(Defualt)" \ "3. Commits" \ "4. Issues" \ "5. Wikis" \ "6. Users") choice_dict = {'1':'Repositories','2':'Code','3':'Commits','4':'Issues','5':'Wikis','6':'Users'} if search_type.strip() in choice_dict.keys(): self.search_type = choice_dict[search_type.strip()] def do_search(self): try: #https://github.com/search?p=2&q=api.map.baidu.com%2Fhighacciploc%2Fv1&type=Code&utf8=%E2%9C%93 url = "https://{0}/search?p={1}&q={2}&type={3}&utf8=%E2%9C%93".format(self.server,self.counter,self.word,self.search_type)# 这里的pn参数是条目数 print url except Exception, e: print e try: r = self.session.get(url, headers = self.headers, proxies = self.proxies,verify=False) self.results = r.content self.totalresults += self.results except Exception,e: print e def check_next(self): if "<a class=\"next_page\"" in self.results: return True else: return False def findkeys(self): links = re.compile(r"&#8211;"\ "<a href=\"(.+?)\"") return links.findall(self.totalresults) # list def process(self): while (self.counter < self.limit):#and self.check_next() self.do_search() #self.do_search_vhost() time.sleep(1) self.counter += 1 #print "\tSearching " + str(self.counter) + " results..." def run(self): # define this function,use for threading, define here or define in child-class both should be OK self.process() self.d = self.findkeys() return self.d if __name__ == "__main__": print "[-] Searching in github:" useragent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6" proxy = {"https": "https://127.0.0.1:8080"} search = search_github(keyword, 100, useragent,proxy) search.login() search.process() all_links = search.findkeys() print all_links
#coding:utf-8 from PyQt4.QtCore import pyqtSignal, QObject from collections import deque from threading import Lock class QTypeSignal(QObject): sendmsg = pyqtSignal(object)#定义一个信号槽,传入一个参数位参数 def __init__(self): QObject.__init__(self)#用super初始化会出错 def run(self): self.sendmsg.emit('send')#发信号 class QTypeSlot(object): def get(self, msg):#槽对象里的槽函数 print 'Qslot get msg', msg class MySignal(object): def __init__(self): self.collection = deque() self.lock = Lock() def connect(self, fun): if fun not in self.collection: self.collection.append(fun) def emit(self, *args, **kwargs): self.lock.acquire() for fun in set(self.collection): fun(*args, **kwargs) self.lock.release() class MyTypeSignal(object): sendmsg = MySignal()#实例化 def run(self): self.sendmsg.emit('send')#发送 class MyTypeSlot(object): def get(self, msg):#槽对象里的槽函数 print 'My slot get msg', msg if __name__ == "__main__": send = MyTypeSignal() slot = MyTypeSlot() send.sendmsg.connect(slot.get)#链接信号槽 send.run() #>>get msg send send = QTypeSignal() slot = QTypeSlot() send.sendmsg.connect(slot.get) # 链接信号槽 send.run()
from keras.models import Model from keras.layers import Conv2D, MaxPool2D, Input, concatenate, Dense, Flatten, BatchNormalization from keras.optimizers import Adam, RMSprop from keras.callbacks import ReduceLROnPlateau from keras_applications.resnet import ResNet101 from keras import backend, layers, utils, models # required for ResNet101 model import class Models: ''' This class contains the implementation of both AlexNet and ResNet101 HyperFace models as well as the R-CNN model Methods implemented in this class are - # def R_CNN(self) # def hyperFace_AlexNet(self) # def get_HyperFace_AlexNet_parametres(self) # def get_HyperFace_ResNet101_parametres(self) # def hyperFace_ResNet101(self) # def plot_models(self, model_name, show_shapes = True, show_layer_names = True, rankdir = 'TB') # def train_RCNN(self, train_data, validation_data) # def train_HyperFace_AlexNet(self, train_data, validation_data) # def train_HyperFace_ResNet101(self) # def get_model_summary(self, model_name) ''' def __init__(self, lr, epochs, batch_size): self.lr = lr self.epochs = epochs self.batch_size = batch_size def R_CNN(self): ''' Input image size : (227, 227, 3) Output : out_face(2) ''' inputs = Input(shape=(227, 227, 3), name='input_tensor') conv1 = Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid', activation='relu', name='conv1')(inputs) pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(conv1) pool1 = BatchNormalization(name = 'batch_norm_1')(pool1) conv2 = Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu', name='conv2')(pool1) pool2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool2')(conv2) pool2 = BatchNormalization(name = 'batch_norm_2')(pool2) conv3 = Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu', name='conv3')(pool2) conv3 = BatchNormalization(name = 'batch_norm_3')(conv3) conv4 = Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu', name='conv4')(conv3) conv4 = BatchNormalization(name = 'batch_norm_4')(conv4) conv5 = Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu', name='conv5')(conv4) pool5 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(conv5) pool5 = BatchNormalization(name = 'batch_norm_5')(pool5) flatten = Flatten(name='flatten')(pool5) fully_connected = Dense(4096, activation='relu', name='fully_connected')(flatten) face_detection = Dense(512, activation='relu', name='detection')(fully_connected) out_face = Dense(2, name='face_detection_output')(face_detection) model = Model(inputs = inputs, outputs = out_face) model.compile(optimizer = Adam(lr = self.lr), loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model def hyperFace_AlexNet(self): ''' Input image size : (227, 227, 3) Output: out_detection(2), out_landmarks(42), out_visibility(21), out_pose(3), out_gender(2) ''' input_layer = Input(shape = (227, 227, 3), name = 'input_layer') conv1 = Conv2D(96, (11,11), strides = 4, activation = 'relu', padding = 'valid', name = 'conv1') (input_layer) max1 = MaxPool2D((3,3), strides = 2, padding = 'valid',name = 'max1')(conv1) max1 = BatchNormalization(name = 'batch_norm_1')(max1) conv1a = Conv2D(256, (4,4), strides = 4, activation = 'relu', padding = 'valid', name = 'conv1a')(max1) conv1a = BatchNormalization(name = 'batch_norm_1a')(conv1a) conv2 = Conv2D(256, (5,5), strides = 1, activation = 'relu', padding = 'same', name = 'conv2')(max1) max2 = MaxPool2D((3,3), strides = 2, padding = 'valid', name = 'max2')(conv2) max2 = BatchNormalization(name = 'batch_norm_2')(max2) conv3 = Conv2D(384, (3,3), strides = 1, activation = 'relu', padding = 'same', name = 'conv3')(max2) conv3 = BatchNormalization(name = 'batch_norm_3')(conv3) conv3a = Conv2D(256, (2,2), strides = 2, activation = 'relu', padding = 'valid', name = 'conv3a')(conv3) conv3a = BatchNormalization(name = 'batch_norm_3a')(conv3a) conv4 = Conv2D(384, (3,3), strides = 1, activation = 'relu', padding = 'same', name = 'conv4')(conv3) conv4 = BatchNormalization(name = 'batch_norm_4')(conv4) conv5 = Conv2D(256, (3,3), strides = 1, activation = 'relu', padding = 'same', name = 'conv5')(conv4) pool5 = MaxPool2D((3,3), strides = 2, padding = 'valid', name = 'pool5')(conv5) pool5 = BatchNormalization(name = 'batch_norm_5')(pool5) concat = concatenate([conv1a, conv3a, pool5], name = 'concat') concat = BatchNormalization(name = 'batch_norm_concat')(concat) conv_all = Conv2D(192, (1,1), strides = 1, activation = 'relu', padding = 'valid', name = 'conv_all')(concat) flatten = Flatten(name = 'flatten')(conv_all) fc_full = Dense(3072, activation = 'relu', name = 'fc_full')(flatten) fc_detection = Dense(512, activation = 'relu', name = 'fc_detection')(fc_full) fc_landmarks = Dense(512, activation = 'relu', name = 'fc_landmarks')(fc_full) fc_visibility = Dense(512, activation = 'relu', name = 'fc_visibility')(fc_full) fc_pose = Dense(512, activation = 'relu', name = 'fc_pose')(fc_full) fc_gender = Dense(512, activation = 'relu', name = 'fc_gender')(fc_full) out_detection = Dense(1, activation = 'softmax', name = 'out_detection')(fc_detection) out_landmarks = Dense(42, activation = 'softmax', name = 'out_landmarks')(fc_landmarks) out_visibility = Dense(21, activation = 'sigmoid', name = 'out_visibility')(fc_visibility) out_pose = Dense(3, activation = 'softmax', name = 'out_pose')(fc_pose) out_gender = Dense(1, activation = 'softmax', name = 'out_gender')(fc_gender) model = Model(inputs = input_layer, outputs = [out_detection, out_landmarks, out_visibility, out_pose, out_gender]) losses, loss_weights, optimizer, callbacks = self.getHyperFace_AlexNet_parametres() model.compile(optimizer = optimizer, loss = losses, loss_weights = loss_weights, metrics = ['accuracy'], callbacks = callbacks) return model def get_HyperFace_AlexNet_parametres(self): ''' Returns losses, loss_weights, and optimizer used in HyperFace_Alexnet ''' losses = { "out_detection" : "sparse_categorical_crossentropy", "out_landmarks" : "mean_squared_error", "out_visibility" : "mean_squared_error", "out_pose" : "mean_squared_error", "out_gender" : "binary_crossentropy" } loss_weights = { "out_detection" : 1.0, "out_landmarks" : 5.0, "out_visibility" : 0.5, "out_pose" : 5.0, "out_gender" : 2.0 } optimizer = RMSprop( lr = self.lr, rho = 0.9, epsilon = None, decay = 0.0 ) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.2, patience = 3, min_lr = 0.00001) callbacks = [reduce_lr] return losses, loss_weights, optimizer, callbacks def hyperFace_ResNet101(self): ''' This function returns ResNet101 model with conv layers trained on ImageNet dataset. keras_applications.resnet - change this function in future when ResNet101 is implemented in a future version of keras. Input image size : (227, 227, 3) Output: out_detection(2), out_landmarks(42), out_visibility(21), out_pose(3), out_gender(2) ''' # DO the implementation model = ResNet101(include_top = False, weights='imagenet', input_tensor = None, input_shape = (227,227, 3), pooling = 'avg', backend = backend, layers = layers, models = models, utils = utils ) return model def get_HyperFace_ResNet101_parametres(self): ''' Returns losses, loss_weights, and optimizer used in HyperFace_ResNet101 ''' losses = { "out_detection" : "sparse_categorical_crossentropy", "out_landmarks" : "mean_squared_error", "out_visibility" : "mean_squared_error", "out_pose" : "mean_squared_error", "out_gender" : "binary_crossentropy" } loss_weights = { "out_detection" : 1.0, "out_landmarks" : 1.0, "out_visibility" : 1.0, "out_pose" : 1.0, "out_gender" : 1.0 } optimizer = RMSprop( lr = self.lr, rho = 0.9, epsilon = None, decay = 0.0 ) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.2, patience = 3, min_lr = 0.00001) callbacks = [reduce_lr] return losses, loss_weights, optimizer, callbacks def plot_models(self, model_name, show_shapes = True, show_layer_names = True, rankdir = 'TB'): ''' Uses keras.utils.plot_model to plot a model ''' if model_name == 'R_CNN': model = self.R_CNN() elif model_name == 'HyperFace_AlexNet': model = self.hyperFace_AlexNet() elif model_name == 'HyperFace_ResNet101': model = self.hyperFace_ResNet101() else: print("Please choose from the following options :") print("1) R_CNN") print("2) HyperFace_AlexNet") print("3) HyperFace_ResNet101") return utils.plot_model(model, to_file='model.png', show_shapes = True, show_layer_names = True, rankdir = 'TB' ) def train_RCNN(self, train_data, validation_data): ''' Trains the model on the train_data and validates it on validation_data ''' print("Processing train and validation data...") x_train, y_train = train_data x_val, y_val = validation_data print("Training...") model = self.R_CNN() model.fit(x_train, y_train, validation_data = validation_data, batch_size = self.batch_size, epochs = self.epochs, verbose = 1) print("Finished training") return def train_HyperFace_AlexNet(self, train_data, validation_data): ''' Trains the model on the train_data and validates it on validation_data ''' print("Processing train and validation data...") x_train, y_train_landmarks, y_train_visibility, y_train_pose, y_train_gender = train_data x_test, y_test_landmarks, y_test_visibility, y_test_pose, y_test_gender = validation_data x, y, z, callbacks = self.get_HyperFace_ResNet101_parametres print("Training...") model = self.hyperFace_AlexNet() model.fit(x_train, { "out_face" : y_train_face, "out_landmarks" : y_train_landmarks, "out_visibility" : y_train_visibility, "out_pose" : y_train_pose, "out_gender" : y_train_gender }, validation_data = (x_test, { "out_face" : y_test_face "out_landmarks" : y_test_landmarks, "out_visibility" : y_test_visibility, "out_pose" : y_test_pose, "out_gender" : y_test_gender} ), epochs = self.epochs, verbose = 1, batch_size = self.batch_size, callbacks = callbacks ) print("Finished training") return def train_HyperFace_ResNet101(self, train_data, validation_data): ''' Trains the model on the train_data and validates it on validation_data ''' print("Processing train and validation data...") x_train, y_train_face, y_train_landmarks, y_train_visibility, y_train_pose, y_train_gender = train_data x_test, y_test_face, y_test_landmarks, y_test_visibility, y_test_pose, y_test_gender = validation_data x, y, z, callbacks = self.getHyperFace_AlexNet_parametres() print("Training...") model = self.hyperFace_ResNet101 model.fit(x_train, { "out_face" : y_train_face, "out_landmarks" : y_train_landmarks, "out_visibility" : y_train_visibility, "out_pose" : y_train_pose, "out_gender" : y_train_gender }, validation_data = (x_test, { "out_face" : y_test_face, "out_landmarks" : y_test_landmarks, "out_visibility" : y_test_visibility, "out_pose" : y_test_pose, "out_gender" : y_test_gender} ), epochs = self.epochs, verbose = 1, batch_size = self.batch_size, callbacks = callbacks ) print("Finished training") return def get_model_summary(self, model_name): ''' Prints the selected model summary ''' if model_name == 'R_CNN': model = self.R_CNN() elif model_name == 'HyperFace_AlexNet': model = self.hyperFace_AlexNet() elif model_name == 'HyperFace_ResNet101': model = self.hyperFace_ResNet101() else: print("Please choose from the following options :") print("1) R_CNN") print("2) HyperFace_AlexNet") print("3) HyperFace_ResNet101") return model.summary()
from rest_framework import serializers from .models import Profile, Project, countries, categories, technologies, colors class ProfileSerializer(serializers.ModelSerializer): class Meta: model = Profile fields = '__all__' class ProjectSerializer(serializers.ModelSerializer): class Meta: model = Project fields = '__all__' class countriesSerializer(serializers.ModelSerializer): class Meta: model = countries fields = '__all__' class categoriesSerializer(serializers.ModelSerializer): class Meta: model = categories fields = '__all__' class technologiesSerializer(serializers.ModelSerializer): class Meta: model = technologies fields = '__all__' class colorsSerializer(serializers.ModelSerializer): class Meta: model = colors fields = '__all__'
class Fighter: def __init__(self): self.health = 100 self.attacks = {} def receiveEffects(self, effects): self.health -= effects def getAttackOptions(self): return self.attacks.keys() def getAttackEffects(self, attack): return self.attacks[attack] class Tree(Fighter): def __init__(self): super().__init__() self.attacks = {'punch': 10, 'grow': 0} def __str__(self): return 'health: ' + str(self.health)
# 导入 HttpResponse 模块 from django.http import HttpResponse from .models import PeopleRequest from django.shortcuts import render from django.http import JsonResponse
alpha = 1 CHOSUNGS = [u'ㄱ',u'ㄲ',u'ㄴ',u'ㄷ',u'ㄸ',u'ㄹ',u'ㅁ',u'ㅂ',u'ㅃ',u'ㅅ',u'ㅆ',u'ㅇ',u'ㅈ',u'ㅉ',u'ㅊ',u'ㅋ',u'ㅌ',u'ㅍ',u'ㅎ'] JOONGSUNGS = [u'ㅏ',u'ㅐ',u'ㅑ',u'ㅒ',u'ㅓ',u'ㅔ',u'ㅕ',u'ㅖ',u'ㅗ',u'ㅘ',u'ㅙ',u'ㅚ',u'ㅛ',u'ㅜ',u'ㅝ',u'ㅞ',u'ㅟ',u'ㅠ',u'ㅡ',u'ㅢ',u'ㅣ'] JONGSUNGS = [u'',u'ㄱ',u'ㄲ',u'ㄳ',u'ㄴ',u'ㄵ',u'ㄶ',u'ㄷ',u'ㄹ',u'ㄺ',u'ㄻ',u'ㄼ',u'ㄽ',u'ㄾ',u'ㄿ',u'ㅀ',u'ㅁ',u'ㅂ',u'ㅄ',u'ㅅ',u'ㅆ',u'ㅇ',u'ㅈ',u'ㅊ',u'ㅋ',u'ㅌ',u'ㅍ',u'ㅎ'] NUM_CHOSUNGS = 19 NUM_JOONGSUNGS = 21 NUM_JONGSUNGS = 28 FIRST_HANGUL_UNICODE = 0xAC00 #'가' LAST_HANGUL_UNICODE = 0xD7A3 #'힣' def decompose(hangul_letter): """This function returns letters by decomposing the specified Hangul letter.""" dec = ord(hangul_letter) if ord('ㄱ') <= dec and dec <= ord('ㅎ'): return (hangul_letter, '', '') if ord('ㅏ') <= dec and dec <= ord('ㅣ'): return ('', hangul_letter, '') code = ord(hangul_letter) - FIRST_HANGUL_UNICODE jongsung_index = int(code % NUM_JONGSUNGS) code /= NUM_JONGSUNGS joongsung_index = int(code % NUM_JOONGSUNGS) code /= NUM_JOONGSUNGS chosung_index = int(code) return (CHOSUNGS[chosung_index], JOONGSUNGS[joongsung_index], JONGSUNGS[jongsung_index]) def get_char_phoneme_len(c): len = 0 list = decompose(c) for el in list: if el != '': len += 1 return len def get_str_phoneme_len(str): len = 0 for c in str: len += get_char_phoneme_len(c) return len def SEO(c1, c2): p_list1 = decompose(c1) p_list2 = decompose(c2) diff_count = 0 for i in range(3): if p_list1[i] != p_list2[i]: diff_count = diff_count + 1 return alpha * diff_count def get_SED(s1, s2): len1 = len(s1) len2 = len(s2) dp = [[0]*(len2+1) for i in range(len1+1)] for i in range(len1+1): for j in range(len2+1): dp[i][j] = -1 def SED(i, j): if dp[i][j] != -1: return dp[i][j] if i == 0: return get_str_phoneme_len(s2[:j]) * alpha if j == 0: return get_str_phoneme_len(s1[:i]) * alpha v1 = SED(i, j-1) + get_char_phoneme_len(s2[j-1]) * alpha v2 = SED(i-1, j) + get_char_phoneme_len(s1[i-1]) * alpha v3 = SED(i-1, j-1) + SEO(s1[i-1], s2[j-1]) m = min(v1, v2, v3) dp[i][j] = m return m return SED(len1, len2)
import renmas.core scene = renmas.core.Scene() geometry = renmas.core.ShapeDatabase() mat_db = renmas.core.MaterialDatabase() light_db = renmas.core.LightDatabase() import renmas.integrators renderer = renmas.core.Renderer() ren = renmas.core.RendererUtils(renderer) log = renmas.core.log
#N 枚のカードがあります. i 枚目のカードには, a iという数が書かれています. #Alice と Bob は, これらのカードを使ってゲームを行います. ゲームでは, Alice と Bob が交互に 1 枚ずつカードを取っていきます. Alice が先にカードを取ります. #2 人がすべてのカードを取ったときゲームは終了し, 取ったカードの数の合計がその人の得点になります. 2 人とも自分の得点を最大化するように最適な戦略を取った時, Alice は Bob より何点多く取るか求めてください. n = int(input()) i = map(int, input().split()) i_list = [num for num in i] i_list.sort(reverse=True) alice_draws = i_list[0::2] bob_draws = i_list[1::2] ans = sum(alice_draws) - sum(bob_draws) print(ans)
__author__ = 'thor' import numpy as np import io import pandas as pd import itertools from collections import Counter from nltk.corpus import wordnet as wn def print_word_definitions(word): print(word_definitions_string(word)) def word_definitions_string(word): return '\n'.join( [ '%d: %s (%s)' % (i, x.definition(), x.name()) for i, x in enumerate(wn.synsets(word)) ] ) def print_word_lemmas(word): t = Counter([l.name for s in wn.synsets(word) for l in s.lemmas]) print( pd.Series(index=list(t.keys()), data=list(t.values())).sort( inplace=False, ascending=False ) ) def _lemma_names_str(syn): return '(' + ', '.join(syn.lemma_names) + ')' def print_hypos_with_synset(syn, tab=''): print(tab + syn.name) h = syn.hyponyms() if len(h) > 0: for hi in h: print_hypos_with_synset(hi, tab + ' ') else: print(tab + ' ' + _lemma_names_str(syn)) def pprint_hypos(syn, tab=''): print(tab + _lemma_names_str(syn)) h = syn.hyponyms() if len(h) > 0: for hi in h: pprint_hypos(hi, tab + ' ') class iTree(object): def __init__(self, value=None): self.value = value self.children = [] self.default_node_2_str = lambda node: str(node.value) def __iter__(self): for v in itertools.chain(*map(iter, self.children)): yield v yield self def tree_info_str( self, node_2_str=None, # default info is node value tab_str=2 * ' ', # tab string depth=0, ): node_2_str = node_2_str or self.default_node_2_str s = depth * tab_str + node_2_str(self) + '\n' new_depth = depth + 1 for child in self.children: s += child.tree_info_str(node_2_str, tab_str, new_depth) return s class HyponymTree(iTree): def __init__(self, value=None): if isinstance(value, str): value = wn.synset(value) super(HyponymTree, self).__init__(value=value) for hypo in value.hyponyms(): self.children.append(HyponymTree(hypo)) self.set_default_node_2_str('name') def __str__(self): return self.value.name() def __repr__(self): return self.value.name() def print_lemmas(self, tab=''): print(tab + _lemma_names_str(self.value)) for c in self.children: pprint_hypos(c, tab + ' ') def leafs(self): return [x for x in self] @classmethod def of_hyponyms(cls, syn): tree = cls(syn) for hypo in syn.hyponyms(): tree.children.append(cls.of_hyponyms(hypo)) return tree @staticmethod def get_node_2_str_function(method='name', **kwargs): """ returns a node_2_str function (given it's name) method could be * 'name': The synset name (example sound.n.01) * 'lemma_names': A parenthesized list of lemma names * 'name_and_def': The synset name and it's definition * 'lemmas_and_def': The lemma names and definition """ if method == 'name': return lambda node: node.value.name elif method == 'lemma_names' or method == 'lemmas': lemma_sep = kwargs.get('lemma_sep', ', ') return lambda node: '(' + lemma_sep.join(node.value.lemma_names) + ')' elif method == 'name_and_def': return lambda node: node.value.name + ': ' + node.value.definition elif method == 'lemmas_and_def': lemma_sep = kwargs.get('lemma_sep', ', ') def_sep = kwargs.get('def_sep', ': ') return ( lambda node: '(' + lemma_sep.join(node.value.lemma_names) + ')' + def_sep + node.value.definition ) elif method == 'all': lemma_sep = kwargs.get('lemma_sep', ', ') def_sep = kwargs.get('def_sep', ': ') return ( lambda node: '(' + lemma_sep.join(node.value.lemma_names) + ')' + def_sep + node.value.name + def_sep + node.value.definition ) else: raise ValueError('Unknown node_2_str_function method') def set_default_node_2_str(self, method='name'): """ will set the default string representation of a synset (used as a default ny the tree_info_str function for example) from the name of the method to use (see get_node_2_str_function(method)) method could be * 'name': The synset name (example sound.n.01) * 'lemma_names': A parenthesized list of lemma names * 'name_and_def': The synset name and it's definition * 'lemmas_and_def': The lemma names and definition """ self.default_node_2_str = HyponymTree.get_node_2_str_function(method) def _df_for_excel_export(self, method='all', method_args={}): method_args['def_sep'] = ':' method_args['tab_str'] = method_args.get('tab_str', '* ') s = '' # s = 'lemmas' + method_args['def_sep'] + 'synset' + method_args['def_sep'] + 'definition' + '\n' s += self.tree_info_str( node_2_str=self.get_node_2_str_function(method=method), tab_str=method_args['tab_str'], ) return pd.DataFrame.from_csv( io.StringIO(str(s)), sep=method_args['def_sep'], header=None, index_col=None ) def export_info_to_excel( self, filepath, sheet_name='hyponyms', method='all', method_args={} ): d = self._df_for_excel_export(method=method, method_args=method_args) d.to_excel(filepath, sheet_name=sheet_name, header=False, index=False) class HyponymForest(object): def __init__(self, tree_list): assert len(tree_list) == len( np.unique(tree_list) ), 'synsets in list must be unique' for i, ss in enumerate(tree_list): if not isinstance(ss, HyponymTree): tree_list[i] = HyponymTree(ss) self.tree_list = tree_list def leafs(self): return np.unique([xx for x in self.tree_list for xx in x.leafs()]) def export_info_to_excel( self, filepath, sheet_name='hyponyms', method='all', method_args={} ): d = pd.DataFrame() for dd in self.tree_list: d = pd.concat( [d, dd._df_for_excel_export(method=method, method_args=method_args)] ) d.to_excel(filepath, sheet_name=sheet_name, header=False, index=False)
import datetime def get_interval(day): distance_to_friday = day.isoweekday() - 5 if distance_to_friday > 0: start = day - datetime.timedelta(days=(distance_to_friday + 7)) # start of interval end = day - datetime.timedelta(days=distance_to_friday) # end of interval else: start = day - datetime.timedelta(days=(distance_to_friday + 7 + 7)) # start of interval end = day - datetime.timedelta(days=(distance_to_friday + 7)) # end of interval start = date_to_str(start) end = date_to_str(end) return f'{start}--{end}' def date_to_str(date): return f'{date.year}-{date.month:02d}-{date.day:02d}' def get_3weeks(): last_3weeks = [] day = datetime.date.today() for _ in range(3): last_3weeks.append(get_interval(day)) day = day - datetime.timedelta(days=7) return last_3weeks
from tqdm import tqdm from environment.config import * from environment.which_arff_dataset import which_arff_dataset from tqdm import tqdm from strategies.pyHard.pyhard_unlabeled_framework import pyhard_unlabeled_framework from copy import deepcopy from environment.results_to_file import result_to_file from threading import Thread, Barrier from pathlib import Path barrier = Barrier(6) def unlabeled_pyhard_thread(ds, X_raw, y_raw, idx_data, dataset_name, classifier, idx_bag, n_splits, ph_strategy, init_size, cost): if ('upyhard' + dataset_name + classifier + ph_strategy + str(idx_bag) + '.csv' not in list( os.listdir(Path('.') / 'output'))): tqdm.write("Testando: " + str(ds[:-5]) + " " + str(classifier) + " " + str(idx_bag) + "/" + str( n_splits) + " " + ph_strategy) result = pyhard_unlabeled_framework(deepcopy(X_raw), deepcopy(y_raw), idx_data, idx_bag, classifier, init_size, cost, ph_strategy) result['dataset'] = ds[:-5] result_to_file(result, dataset_name, classifier, ph_strategy, "upyhard", idx_bag) # CRIAR FUNCAO PARA SALVAR NO ARQUIVO PATH('.') tqdm.write("Passou: " + str(ds[:-5]) + " " + str(classifier) + " " + str(idx_bag) + "/" + str( n_splits) + " " + ph_strategy) else: print('upyhard' + dataset_name + classifier + ph_strategy + str(idx_bag) + '.csv already exists!') barrier.wait() def run_unlabeled_pyhard(datasets, n_splits = 5, init_size = 50, cost = 18): for ds in tqdm(datasets, desc ="Dataset"): X_raw, y_raw, idx_data, dataset_name = which_arff_dataset(ds, n_splits=n_splits) for classifier in classifiers: # para cada i em idx_bag ("n_splits") (1 a 5) for ph_strategy in pyhard_unlabeled_strategies: thr_list = [] for idx_bag in range(n_splits): thr_list.append(Thread(target=unlabeled_pyhard_thread, args=(ds, X_raw, y_raw, idx_data, dataset_name, classifier, idx_bag, n_splits, ph_strategy, init_size, cost,))) for thr in thr_list: thr.start() barrier.wait() for thr in thr_list: thr.join()
"""DAG to execute data extract, transform, and load pipeline from Amazon S3 to Amazon Redshift""" from datetime import datetime, timedelta import os from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from airflow.operators import (StageToRedshiftOperator, LoadFactOperator, LoadDimensionOperator, DataQualityOperator) from helpers import SqlQueries # AWS_KEY = os.environ.get('AWS_KEY') # AWS_SECRET = os.environ.get('AWS_SECRET') default_args = { 'owner': 'udacity', 'start_date': datetime(2019, 6, 29), 'depends_on_past': False, 'retries': 3, 'retry_delay': timedelta(minutes=5), 'email_on_retry': False, 'catchup_by_default': False } dag = DAG('etl_dag', default_args=default_args, description='Load and transform data in Redshift with Airflow', schedule_interval='0 * * * *' #schedule_interval="@monthly", #start_date=datetime(2020,5,1) ) start_operator = DummyOperator(task_id='Begin_execution', dag=dag) stage_events_to_redshift = StageToRedshiftOperator( task_id='Stage_events', dag=dag, redshift_conn_id='redshift', aws_credentials_id = 'aws_credentials', table_name='public.staging_events', s3_bucket='s3://udacity-dend/log_data', json_path = 's3://udacity-dend/log_json_path.json', use_paritioning = False, execution_date = '{{ execution_date }}', truncate_table=True ) stage_songs_to_redshift = StageToRedshiftOperator( task_id='Stage_songs', dag=dag, redshift_conn_id='redshift', aws_credentials_id = 'aws_credentials', table_name='public.staging_songs', s3_bucket='s3://udacity-dend/song_data', json_path = 'auto', use_paritioning = False, execution_date = '{{ execution_date }}', truncate_table=True ) load_songplays_table = LoadFactOperator( task_id='Load_songplays_fact_table', dag=dag, redshift_conn_id = 'redshift', fact_insert_sql = SqlQueries.songplay_table_insert, fact_table_name = 'public.songplays', fact_insert_columns = 'playid, start_time, userid, level, songid, artistid, sessionid, location, user_agent', truncate_table = True ) load_user_dimension_table = LoadDimensionOperator( task_id='Load_user_dim_table', dag=dag, redshift_conn_id = 'redshift', dimension_insert_sql = SqlQueries.user_table_insert, dimension_table_name = 'public.users', dimension_insert_columns = 'userid, first_name, last_name, gender, level', truncate_table = True ) load_song_dimension_table = LoadDimensionOperator( task_id='Load_song_dim_table', dag=dag, redshift_conn_id = 'redshift', dimension_insert_sql = SqlQueries.song_table_insert, dimension_table_name = 'public.songs', dimension_insert_columns = 'songid, title, artistid, year, duration', truncate_table = True ) load_artist_dimension_table = LoadDimensionOperator( task_id='Load_artist_dim_table', dag=dag, redshift_conn_id = 'redshift', dimension_insert_sql = SqlQueries.artist_table_insert, dimension_table_name = 'public.artists', dimension_insert_columns = 'artistid, name, location, lattitude, longitude', truncate_table = True ) load_time_dimension_table = LoadDimensionOperator( task_id='Load_time_dim_table', dag=dag, redshift_conn_id = 'redshift', dimension_table_name = 'public."time"', dimension_insert_sql = SqlQueries.time_table_insert, dimension_insert_columns = 'start_time, hour, day, week, month, year, weekday', truncate_table = True ) run_quality_checks = DataQualityOperator( task_id='Run_data_quality_checks', dag=dag, redshift_conn_id='redshift', sql_check_queries=["SELECT COUNT(*) FROM songs WHERE songid IS NULL", \ "SELECT COUNT(*) FROM songs", \ "SELECT COUNT(*) FROM songplays", \ "SELECT COUNT(*) FROM artists", \ "SELECT COUNT(*) FROM artists", \ "SELECT COUNT(*) FROM time" \ ], expected_results=[lambda num_records: num_records==0, \ lambda num_records: num_records>0, \ lambda num_records: num_records>0, \ lambda num_records: num_records>0, \ lambda num_records: num_records>0, \ lambda num_records: num_records>0] ) end_operator = DummyOperator(task_id='Stop_execution', dag=dag) start_operator >> stage_events_to_redshift start_operator >> stage_songs_to_redshift stage_events_to_redshift >> load_songplays_table stage_songs_to_redshift >> load_songplays_table load_songplays_table >> load_song_dimension_table load_songplays_table >> load_user_dimension_table load_songplays_table >> load_artist_dimension_table load_songplays_table >> load_time_dimension_table load_song_dimension_table >> run_quality_checks load_user_dimension_table >> run_quality_checks load_artist_dimension_table >> run_quality_checks load_time_dimension_table >> run_quality_checks run_quality_checks >> end_operator
from django.conf.urls import url, include from django.urls import path from .models import UnmatchedAutamaResource, AccountsResource, RegistrationResource, MessagingResource, MyMatchesResource from tastypie.api import Api v1_api = Api(api_name='v1') v1_api.register(UnmatchedAutamaResource()) v1_api.register(AccountsResource()) v1_api.register(RegistrationResource()) v1_api.register(MessagingResource()) v1_api.register(MyMatchesResource()) urlpatterns = [ # The normal jazz here... path('', include(v1_api.urls)), ]
import unittest from threading_tools import SynchronizedNumber NUM_TRIALS = 2500 class TestSynchronizedBasicMath(unittest.TestCase): # # testing __neg__() # def test_sync_neg(self): sync_num1 = SynchronizedNumber(50.0) res_sync_num = -sync_num1 assert res_sync_num == -50, 'The sum should be -50. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the original obj.' # # testing __add__() and __radd__() # def test_sync_add(self): sync_num1 = SynchronizedNumber(50.0) sync_num2 = SynchronizedNumber(50.0) res_sync_num = sync_num1 + sync_num2 assert res_sync_num == 100, 'The sum should be 100. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the addend obj.' assert res_sync_num is not sync_num2, 'The result obj should not be the addend obj.' def test_non_sync_add(self): sync_num1 = SynchronizedNumber(50.0) res_sync_num = sync_num1 + 50 assert res_sync_num == 100, 'The sum should be 100. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the addend obj.' def test_reverse_non_sync_add(self): sync_num1 = SynchronizedNumber(50.0) res_sync_num = 50 + sync_num1 assert res_sync_num == 100, 'The sum should be 100. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the addend obj.' # # testing __sub__() and __rsub__() # def test_sync_sub(self): sync_num1 = SynchronizedNumber(50.0) sync_num2 = SynchronizedNumber(50.0) res_sync_num = sync_num1 - sync_num2 assert res_sync_num == 0, 'The sum should be 0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the minuend obj.' assert res_sync_num is not sync_num2, 'The result obj should not be the subtrahend obj.' def test_non_sync_sub(self): sync_num1 = SynchronizedNumber(50.0) res_sync_num = sync_num1 - 50 assert res_sync_num == 0, 'The sum should be 0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the minuend obj.' def test_reverse_non_sync_sub(self): sync_num1 = SynchronizedNumber(50.0) res_sync_num = 60 - sync_num1 assert res_sync_num == 10, 'The sum should be 10. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the subtrahend obj.' # # testing __mul__() and __rmul__() # def test_sync_mul(self): sync_num1 = SynchronizedNumber(4.0) sync_num2 = SynchronizedNumber(2.0) res_sync_num = sync_num1 * sync_num2 assert res_sync_num == 8.0, 'The sum should be 8.0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the multiplicand obj.' assert res_sync_num is not sync_num2, 'The result obj should not be the multiplicand obj.' def test_non_sync_mul(self): sync_num1 = SynchronizedNumber(4.0) res_sync_num = sync_num1 * 2 assert res_sync_num == 8.0, 'The sum should be 8.0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the multiplicand obj.' def test_reverse_non_sync_mul(self): sync_num1 = SynchronizedNumber(4.0) res_sync_num = 2 * sync_num1 assert res_sync_num == 8.0, 'The sum should be 8.0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the multiplicand obj.' # # testing __div__() and __rdiv__() # def test_sync_div(self): sync_num1 = SynchronizedNumber(4.0) sync_num2 = SynchronizedNumber(2.0) res_sync_num = sync_num1 / sync_num2 assert res_sync_num == 2.0, 'The sum should be 2.0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the dividend obj.' assert res_sync_num is not sync_num2, 'The result obj should not be the divisor obj.' def test_non_sync_div(self): sync_num1 = SynchronizedNumber(4.0) res_sync_num = sync_num1 / 2 assert res_sync_num == 2.0, 'The sum should be 2.0. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the dividend obj.' def test_reverse_non_sync_div(self): sync_num1 = SynchronizedNumber(4.0) res_sync_num = 2 / sync_num1 assert res_sync_num == 0.5, 'The sum should be 0.5. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the divisor obj.' # # testing __pow__() and __rpow__() # def test_sync_pow(self): sync_num1 = SynchronizedNumber(4.0) sync_num2 = SynchronizedNumber(2.0) res_sync_num = sync_num1 ** sync_num2 assert res_sync_num == 16, 'The sum should be 16. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the base obj.' assert res_sync_num is not sync_num2, 'The result obj should not be the exponent obj.' def test_non_sync_pow(self): sync_num1 = SynchronizedNumber(4.0) res_sync_num = sync_num1 ** 2 assert res_sync_num == 16, 'The sum should be 16. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the base obj.' def test_reverse_non_sync_pow(self): sync_num1 = SynchronizedNumber(4.0) res_sync_num = 2 ** sync_num1 assert res_sync_num == 16, 'The sum should be 16. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the exponent obj.' # # testing __mod__() and __rmod__() # def test_sync_mod(self): sync_num1 = SynchronizedNumber(5.0) sync_num2 = SynchronizedNumber(2.0) res_sync_num = sync_num1 % sync_num2 assert res_sync_num == 1, 'The sum should be 1. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the dividend obj.' assert res_sync_num is not sync_num2, 'The result obj should not be the divisor obj.' def test_non_sync_mod(self): sync_num1 = SynchronizedNumber(5.0) res_sync_num = sync_num1 % 2 assert res_sync_num == 1, 'The sum should be 1. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the dividend obj.' def test_reverse_non_sync_mod(self): sync_num1 = SynchronizedNumber(2.0) res_sync_num = 5 % sync_num1 assert res_sync_num == 1, 'The sum should be 1. Instead it is {0}'.format(res_sync_num) assert res_sync_num is not sync_num1, 'The result obj should not be the divisor obj.'
import tensorflow as tf from tensorflow.keras.layers import Conv2D def block(num_filter, input_shape): block1 = tf.keras.Sequential() block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block1.add(Conv2D(num_filter,(3,3),strides=2,padding="same",activation='relu')) block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block2 = tf.keras.Sequential() block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block3 = tf.keras.Sequential() block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) block1.add(Conv2D(num_filter,(1,1),strides=1,activation='relu')) input_layer = tf.keras.Input(shape=input_shape) residual = Conv2D(num_filter,(1,1),strides=2,activation='relu')(input_layer) x = block1(input_layer) x = x + residual residual = Conv2D(num_filter, (1,1),strides=1,activation='relu')(x) x = block2(x) x = x + residual residual = Conv2D(num_filter, (1,1),strides=1,activation='relu')(x) x = block3(x) x = x + residual resnet_block = tf.keras.Model(inputs=input_layer, outputs=x) return resnet_block
''' Created on Nov 12, 2018 @author: hols ''' import sys a, b = sys.argv[1:3] fa = open(a, 'r') fb = open(b, 'r') la = fa.readlines() lb = fb.readlines() inter_ab = [] a_w_b = [] b_w_a = [] ka = [] kb = [] for l in la: if not l in ka: ka.append(l) if l in lb: if not (l in inter_ab): inter_ab.append(l) else: a_w_b.append(l) for l in lb: if not l in kb: kb.append(l) if l in la: if not (l in inter_ab): inter_ab.append(l) if not (l in la): b_w_a.append(l) print (len(la), len(ka), len(a_w_b), len(inter_ab)) print (len(lb), len(kb), len(b_w_a), len(inter_ab)) print ( a_w_b)
# coding=utf-8 # Smallest multiple # Problem 5 # https://projecteuler.net/problem=5 # # 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder. # What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20? from functools import reduce from math import ceil def main(): print(smallestMultiple(10)) print(smallestMultiple(20)) def smallestMultiple(n): return int(reduce(lambda a, b: a * b / gcf(a, b), range(n, 3, -1), 2)) def gcf(a, b): greater = max(a, b) smaller = min(a, b) while greater - smaller > 0: diff = greater - smaller greater -= ceil(diff / smaller) * smaller greater, smaller = (smaller, greater) return smaller if __name__ == '__main__': main()
import boto3 import json import os import requests import sys import tempfile import urllib3 sas_omm_protocol = 'http' def s3_client(access_key, secret_key, url): if s3_client.s3_client != None: return s3_client.s3_client s3_client.s3_client = boto3.client('s3', endpoint_url=url, aws_access_key_id=access_key, aws_secret_access_key=secret_key, verify=False) return s3_client.s3_client s3_client.s3_client = None def get_s3_object(access_key, secret_key, bucket, url, object_name): s3 = s3_client(access_key, secret_key, url) print("Bucket contents:") for key in s3.list_objects(Bucket=bucket)['Contents']: print(key['Key']) if key['Key'] == object_name: return key break return None def get_s3_object_file(access_key, secret_key, bucket, url, object_name): s3 = s3_client(access_key, secret_key, url) fp = tempfile.TemporaryFile() s3.download_fileobj(Bucket=bucket, Key=object_name, Fileobj=fp) fp.seek(0) return fp def sas_omm_login(server, user, password): # Login to REST services auth_uri = '/SASLogon/oauth/token' headers = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded' } payload = 'grant_type=password&username=' + user + '&password=' + password auth_return = requests.post(sas_omm_protocol + '://' + server + auth_uri , auth=('sas.ec', ''), data=payload, headers=headers); print("OMM Login:", auth_return) auth_json = json.loads(auth_return.content.decode('utf-8')) return auth_json['access_token'] def sas_omm_get_model_repository(server, auth_token): # Get Model Repository headers = {'Authorization': 'Bearer ' + auth_token} url = sas_omm_protocol + '://' + server + "/modelRepository/repositories?filter=eq(name,'Public')" repo_list = requests.get(url, headers=headers) repo = repo_list.json()['items'][0] repo_id = repo['id'] repo_folder_id = repo['folderId'] return repo_id, repo_folder_id def sas_omm_find_project(server, auth_token, project_name): project_id = -1 project = None headers={ 'content-type': 'application/vnd.sas.models.project+json', 'Authorization': 'Bearer ' + auth_token } url = sas_omm_protocol + '://' + server + "/modelRepository/projects?filter=eq(name, '" + project_name + "')" project_result = requests.get(url, headers = headers) if project_result: print("OMM Find Project Result JSON Model", project_result.json()) if project_result.json()['count'] > 0: project = project_result.json()['items'][0] project_id = project['id'] else: project_result = False return project_result, project_id, project def sas_omm_create_project(server, auth_token, project_name, repo_id, repo_folder_id): project_id = -1 headers={ 'content-type': 'application/vnd.sas.models.project+json', 'Authorization': 'Bearer ' + auth_token } new_project={ 'name': project_name, 'repositoryId': repo_id, 'folderId': repo_folder_id } url = sas_omm_protocol + '://' + server + '/modelRepository/projects' project_result = requests.post(url, data=json.dumps(new_project), headers=headers) if project_result: project = project_result.json() project_id = project['id'] return project_result, project_id def sas_omm_create_model(server, auth_token, project_id, model_name): # Create the model headers={ 'content-type': 'application/vnd.sas.models.model+json', 'Authorization': 'Bearer ' + auth_token } new_model={ 'name': model_name, 'projectId': project_id, 'function': 'classification', 'scoreCodeType': 'python' } url = sas_omm_protocol + '://' + server + '/modelRepository/models' model_result = requests.post(url, data=json.dumps(new_model), headers=headers) if model_result.status_code == requests.codes.created: model = model_result.json() model_id = model['items'][0]['id'] return model_result, model_id, model return model_result, None, None def sas_omm_import_model(server, auth_token, model_id, model_file): # Import files into the model headers={ 'Content-Type': 'application/octet-stream', 'Authorization': 'Bearer ' + auth_token } url = sas_omm_protocol + '://' + server + '/modelRepository/models/' + model_id + '/contents?name=model_final.pth' model_file_result = requests.post(url, data=model_file, headers=headers) def sas_omm_publish_model(server, user, password, project_name, model_name, model_file): auth_token = sas_omm_login(server, user, password) if auth_token == None: return False repo_id, repo_folder_id = sas_omm_get_model_repository(server, auth_token) if repo_id == None or repo_folder_id == None: return False print("OMM Repository ID: " + repo_id) print("OMM Repository Folder ID: " + repo_folder_id) print("OMM Project Name: " + project_name) project_result, project_id, project = sas_omm_find_project(server, auth_token, project_name) print("OMM Find Project Result:", project_result) if project_result == False: project_result, project_id = sas_omm_create_project(server, auth_token, project_name, repo_id, repo_folder_id) print("OMM Create Project Result:", project_result) print("OMM Project ID", project_id) else: # Found existing project, so use it. # FIXME: Return False as the next create model fails with 401 conflict # if model already exists. Need to add update model logic. print("OMM Project ID", project_id) print("OMM Update Model Result:", project_result) return False model_result, model_id, model = sas_omm_create_model(server, auth_token, project_id, model_name) print("OMM Create Model Result:", model_result) if model_result.status_code != requests.codes.created: return False print("OMM JSON Model", model) print("OMM Model ID: " + model_id) model_file_result = sas_omm_import_model(server, auth_token, model_id, model_file) if model_file_result == None: return False print("OMM Model File Import Result ", model_file_result) return True def main(): urllib3.disable_warnings() s3_access_key = os.environ['ACCESS_KEY_ID'] s3_secret_key = os.environ['SECRET_ACCESS_KEY'] s3_bucket_name = os.environ['S3_BUCKET'] s3_endpoint_url = os.environ['S3_ENDPOINT_URL'] model_filename = os.environ['MODEL_FILENAME'] sas_omm_server = os.environ['SAS_OMM_SERVER'] sas_omm_user = os.environ['SAS_OMM_USER'] sas_omm_password = os.environ['SAS_OMM_PASSWORD'] sas_omm_project_name = os.environ['SAS_OMM_PROJECT_NAME'] sas_omm_model_name = os.environ['SAS_OMM_MODEL_NAME'] model_object = get_s3_object(s3_access_key, s3_secret_key, s3_bucket_name, s3_endpoint_url, model_filename) if model_object == None: print('Error: S3 object name ' + model_filename + ' not found') sys.exit(1) model_file = get_s3_object_file(s3_access_key, s3_secret_key, s3_bucket_name, s3_endpoint_url, model_filename) if model_file == None: print('Error: Unable to download S3 object file ' + model_filename) sys.exit(1) success = sas_omm_publish_model(sas_omm_server, sas_omm_user, sas_omm_password, sas_omm_project_name, sas_omm_model_name, model_file) #if not success: # print('Error: unable to publish AI model ' + model_filename + ' to SAS OMM') # model_file.close() # sys.exit(1) print('Successfully published AI model ' + model_filename + ' to SAS OMM') model_file.close() if __name__ == '__main__': main()
import logging import socket import numpy as np from asml.autogen.services import StreamService from asml.autogen.services.ttypes import ComponentType from asml.network.stream import StreamClient from asml.network.registry import RegistryClient from asml.network.server import Server from asml.parser.factory import ParserFactory from asml.eval.factory import EvaluatorFactory from asml.util.utils import Utils class LearnerHandler: def __init__(self, client_address, registry, parser, evaluator, dao, clf, classes, warmup_examples, id, checkpoint, prequential, checkpointed, test): self._stream_client = StreamClient(client_address) self._registry = registry self._parser = parser self._evaluator = evaluator self._dao = dao self._clf = clf self._classes = classes self._warmup_examples = warmup_examples self._id = id self._checkpoint = checkpoint self._is_prequential = prequential assert self._is_prequential == (test == None) if test: self._test = test self._batches = 0 self._checkpointed = checkpointed self._is_first = True self._streaming_metric = -1 def notify(self, addresses): pass def emit(self, data): try: X, y, timestamps = self._parser.parse_feature(data) self._batches += 1 if timestamps[-1] < self._warmup_examples: # just train print 'warming up at %s' % timestamps[-1] if self._is_first: self._clf.partial_fit(X, y, classes=self._classes) self._is_first = False else: streaming_predictions = self._clf.predict_proba(X) self._clf.partial_fit(X, y, classes=self._classes) self._streaming_metric = self._evaluator.stream_evaluate(y, streaming_predictions[:,1]) # means that we are still catching up elif self._streaming_metric == -1: print 'still catching up at %s' % timestamps[-1] if self._is_first: self._clf.partial_fit(X, y, classes=self._classes) self._is_first = False else: streaming_predictions = self._clf.predict_proba(X) self._clf.partial_fit(X, y, classes=self._classes) self._streaming_metric = self._evaluator.stream_evaluate(y, streaming_predictions[:,1]) else: # predict streaming data streaming_predictions = self._clf.predict_proba(X) # prequential predictions if self._is_prequential: logging.debug('%s:%s', timestamps[-1], self._streaming_metric) self._build_and_send(self._streaming_metric, timestamps, y, streaming_predictions) # holdout predictions else: self._holdout(timestamps, y, streaming_predictions) # now update the model (we assume we receive the label "later", though it comes in the batch) self._clf.partial_fit(X, y, classes=self._classes) if self._is_prequential: # also update the streaming metric to use in the next batch self._streaming_metric = self._evaluator.stream_evaluate(y, streaming_predictions[:,1]) # checkpointing model every x number of batches if self._batches >= self._checkpoint: print 'checkpointing model at %s' % timestamps[-1] if self._checkpointed: self._dao.update_model(self._id, timestamps[-1], self._clf) else: self._dao.save_model(self._id, timestamps[-1], self._clf) self._checkpointed = True self._batches = 0 except Exception, ex: print 'ex %s' % ex.message def _build_and_send(self, metric, timestamps, y, streaming_predictions): header = '%s %s %s' % (self._id, metric, timestamps[-1]) body = self._stack(timestamps, y, streaming_predictions[:,1]) body.insert(0, header) # emit message, deployer will see who is the overall best self._stream_client.emit(body) def _prequential(self, timestamps, y, streaming_predictions): logging.debug('%s:%s', timestamps[-1], self._streaming_metric) self._build_and_send(previous_metric, timestamps, streaming_predictions) def _holdout(self, timestamps, y, streaming_predictions): # predict on offline data offline_predictions = self._clf.predict_proba(self._test[0]) # evaluate on the offline data offline_metric = self._evaluator.evaluate(self._test[1], offline_predictions[:,1]) # debug logging.debug('%s:%s', timestamps[-1], offline_metric) # build message and emit self._build_and_send(offline_metric, timestamps, y, streaming_predictions) def _stack(self, timestamps, y, y_hat, log=True): stacked = [] for i, t in enumerate(timestamps): line = '%s %s %s' % (t, y[i], y_hat[i]) stacked.append(line) if log: logging.debug(line) return stacked class Learner: def __init__(self, module_properties, dao, clf): self._dao = dao self._id = module_properties['id'] self._warmup_examples = module_properties['warmup_examples'] self._checkpoint = module_properties['checkpoint'] self._is_prequential = True if module_properties['eval_mode'] == 'prequential' else False self._parser = ParserFactory.new_parser(module_properties) self._evaluator = EvaluatorFactory.new_evaluator(module_properties) self._offline_test = None if self._is_prequential == False: self._offline_test = self._parser.parse(module_properties['offline_test']) self._classes = np.array(map(int, module_properties['classes'].split(','))) self._server_port = module_properties['server_port'] ##### Recovery and adding learners on the fly ######### self._clf, timestamp = self._dao.get_model(self._id) self._checkpointed = None # if there was a checkpoint, then see if there are some historical points beyond that and train if self._clf: self._checkpointed = True examples = self._dao.get_examples_greater_than(timestamp) if examples: print 'catching up checkpointed model with historical points...' X, y, timestamps = self._parser.parse_feature(examples) self._clf.partial_fit(X, y) else: print 'no historical points to catch up the checkpointed model' # will use the last metric saved else: self._checkpointed = False self._clf = clf examples = self._dao.get_examples() if examples: print 'catching up new model with historical points' X, y, timestamps = self._parser.parse_feature(examples) self._clf.partial_fit(X, y, self._classes) else: print 'no historical points to catch up the new model' ####################################### self._registry = RegistryClient(module_properties['registry']) hostname = socket.gethostname() address = Utils.get_address(hostname, self._server_port) self._stream_client_address = self._registry.reg(ComponentType.LEARNER, address)[0] self._handler = LearnerHandler(self._stream_client_address, self._registry, self._parser, self._evaluator, self._dao, self._clf, self._classes, self._warmup_examples, self._id, self._checkpoint, self._is_prequential, self._checkpointed, self._offline_test) self._processor = StreamService.Processor(self._handler) self._stream_server = Server(self._processor, self._server_port, module_properties['multi_threading']) def run(self): self._stream_server.start()
''' namespace src.finance.SelectOrdering ''' ######################################################################## # SelectOrdering.Date ######################################################################## class Date(): def title(self): return 'Date' def sortOrder(self): return list([ 'Date', 'Category', 'Subcategory', 'Account', 'AccountAlias', ]) def sectionChange(self): return list([ '' ]) ######################################################################## # SelectOrdering.CategoryDate ######################################################################## class CategoryDate(): def title(self): return 'Category' def sortOrder(self): return list([ 'Category', 'Date', 'Subcategory', 'Account', 'AccountAlias', ]) def sectionChange(self): return list([ 'Category' ]) ######################################################################## # SelectOrdering.CategorySubcategoryDate ######################################################################## class CategorySubcategoryDate(): def title(self): return 'Subcategory' def sortOrder(self): return list([ 'Category', 'Subcategory', 'Date', 'Account', 'AccountAlias', ]) def sectionChange(self): return list([ 'Category', 'Subcategory' ]) ######################################################################## # SelectOrdering.AccountDate ######################################################################## class AccountDate(): def title(self): return 'Account' def sortOrder(self): return list([ 'Account', 'Date', 'AccountAlias', ]) def sectionChange(self): return list([ 'Account', ]) #=============================================================================== # Special Camp #=============================================================================== class ByCamper(): def title(self): return 'SectionCamper' def sortOrder(self): return ['camper', 'ordered_day', 'ordered_time', 'medication'] def sectionChange(self): return list([ 'camper', ]) class EachDaySeparate(): def title(self): return 'SectionEachDaySeparate' def sortOrder(self): return ['camper', 'ordered_day', 'ordered_time', 'medication'] def sectionChange(self): return list([ 'camper', 'day' ]) class DaysCombined(): def title(self): return 'SectionDaysCombined' def sortOrder(self): return ['camper', 'ordered_time', 'medication'] def sectionChange(self): return list([ 'camper' ]) class TimeMed(): def title(self): return 'SectionTimeMed' def sortOrder(self): return [ 'ordered_time', 'medication', 'camper', 'ordered_day'] def sectionChange(self): return list([ 'camper' ])
# -*- coding: utf-8 -*- # # Copyright (c) 2019, Intel Corporation. All rights reserved. # SPDX-License-Identifier: BSD-2-Clause # import platform COPYRIGHT = "2020" def banner(name, ver_str, extra=""): """Create a simple header with version and host information""" print("\n" + "#" * 75) print("Intel (R) {}. Version: {}".format(name, ver_str)) print("Copyright (c) {}, Intel Corporation. All rights reserved.\n"\ .format(COPYRIGHT)) print("Running on {} with Python {}".format(platform.platform(), platform.python_version())) print("#" * 75 + "\n") print(extra)
listOfStrings = ["Nibbler", "Bender", "Fry", "Leela"] for i in range(0, len(listOfStrings)): print str(i) + " = " + listOfStrings[i] # more magical version print "Part 2" for name in listOfStrings: print name
from refactor.tilde_essentials.evaluation import TestEvaluator from refactor.tilde_essentials.example import Example try: from src.ClauseWrapper import ClauseWrapper, HypothesisWrapper from src.subsumption_checking import check_subsumption except ImportError as err: from refactor.query_testing_back_end.django.django_wrapper.ClauseWrapper import ClauseWrapper, HypothesisWrapper from refactor.query_testing_back_end.django.django_wrapper.subsumption_checking import check_subsumption class DjangoQueryEvaluator(TestEvaluator): def evaluate(self, example: Example, test: HypothesisWrapper) -> bool: example_clause_wrapper = example.data # type: ClauseWrapper does_subsume, run_time_ms = check_subsumption(test, example_clause_wrapper) return does_subsume
from haizea.core.leases import Lease from common import * def get_config(): c = load_configfile("base_config_simulator.conf") c.add_section("pricing") return c def test_pricing1(): c = get_config() c.set("scheduling", "policy-pricing", "free") h = load_tracefile(c, "price1.lwf") h.start() verify_done(h, [1]) def test_pricing2(): c = get_config() c.set("scheduling", "policy-pricing", "constant") c.set("pricing", "rate", "0.10") h = load_tracefile(c, "price1.lwf") h.start() verify_done(h, [1]) def test_pricing3(): c = get_config() c.set("scheduling", "policy-pricing", "constant") c.set("pricing", "rate", "1.00") h = load_tracefile(c, "price2.lwf") h.start() verify_rejected_by_user(h, [1]) def test_pricing_surcharge(): c = get_config() c.set("scheduling", "mapper", "deadline") c.set("scheduling", "policy-preemption", "deadline") c.set("scheduling", "suspension", "all") c.set("scheduling", "policy-pricing", "constant") c.set("pricing", "rate", "0.10") h = load_tracefile(c, "pricedeadline.lwf") h.start() verify_done(h, [1])
import cv2 import pytesseract pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' #img = cv2.imread(r'C:\Users\muska\Downloads\111.jpg ') img = cv2.imread("Resources/111.jpg") img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) #img = cv2.resize(img,(540,320)) img1 = pytesseract.image_to_string(img) print(img1) ############################### #####Detecting Character####### ############################### # hi,wi,_ = img.shape # boxes = pytesseract.image_to_boxes(img) # for b in boxes.splitlines(): # b = b.split(' ') # #print(b) # x,y,w,h = int(b[1]),int(b[2]),int(b[3]),int(b[4]) # cv2.rectangle(img,(x,hi-y),(w,hi-h),(0,0,255),1) # cv2.putText(img,b[0],(x,hi-y+5),cv2.FONT_HERSHEY_PLAIN,1,(50,50,255),2) ############################### #####Detecting Words####### ############################### hi,wi,_ = img.shape boxes = pytesseract.image_to_data(img) for x,b in enumerate(boxes.splitlines()): if x!=0: b = b.split() #print(b) if len(b)==12: x,y,w,h = int(b[6]),int(b[7]),int(b[8]),int(b[9]) cv2.rectangle(img,(x,y),(x+w,h+y),(0,0,255),1) cv2.putText(img,b[11],(x,y),cv2.FONT_HERSHEY_PLAIN,1,(50,50,255),2) ############################### #####Detecting Digits####### ############################### # hi,wi,_ = img.shape # cong = r'--oem 3 --psm 6 outputbase digits' # boxes = pytesseract.image_to_data(img,config=cong) # for x,b in enumerate(boxes.splitlines()): # if x!=0: # b = b.split() # #print(b) # if len(b)==12: # x,y,w,h = int(b[6]),int(b[7]),int(b[8]),int(b[9]) # cv2.rectangle(img,(x,y),(x+w,h+y),(0,0,255),1) # cv2.putText(img,b[11],(x,y),cv2.FONT_HERSHEY_PLAIN,1,(50,50,255),2) cv2.imshow("Results",img) cv2.waitKey(0)
species( label = '[CH2]C(C[CH]C)CCC(515)', structure = SMILES('[CH2]C(C[CH]C)CCC'), E0 = (157.756,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3025,407.5,1350,352.5,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0942141,0.0854547,-5.26203e-05,1.77098e-08,-2.60227e-12,19114,36.8279], Tmin=(100,'K'), Tmax=(1467.65,'K')), NASAPolynomial(coeffs=[10.919,0.0559519,-2.24667e-05,4.01261e-09,-2.69057e-13,15936.7,-19.5483], Tmin=(1467.65,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(157.756,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(Isobutyl)"""), ) species( label = 'C=CCCC(134)', structure = SMILES('C=CCCC'), E0 = (-40.302,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,434.463,434.474],'cm^-1')), HinderedRotor(inertia=(0.0736945,'amu*angstrom^2'), symmetry=1, barrier=(9.87096,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.073698,'amu*angstrom^2'), symmetry=1, barrier=(9.87114,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0736934,'amu*angstrom^2'), symmetry=1, barrier=(9.87114,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (70.1329,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3052.11,'J/mol'), sigma=(5.53315,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=476.73 K, Pc=40.88 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.9015,0.0362536,1.29132e-05,-3.55767e-08,1.43068e-11,-4763.1,19.8563], Tmin=(100,'K'), Tmax=(1027.61,'K')), NASAPolynomial(coeffs=[9.28067,0.0304042,-1.19376e-05,2.20664e-09,-1.55067e-13,-7487.42,-21.8213], Tmin=(1027.61,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-40.302,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""), ) species( label = 'C3H6(72)', structure = SMILES('C=CC'), E0 = (5.9763,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')), HinderedRotor(inertia=(0.497558,'amu*angstrom^2'), symmetry=1, barrier=(11.4398,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (42.0797,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(2218.31,'J/mol'), sigma=(4.982,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.31912,0.00817959,3.34736e-05,-4.36194e-08,1.58213e-11,749.325,9.54025], Tmin=(100,'K'), Tmax=(983.754,'K')), NASAPolynomial(coeffs=[5.36755,0.0170743,-6.35108e-06,1.1662e-09,-8.2762e-14,-487.138,-4.54468], Tmin=(983.754,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(5.9763,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), label="""C3H6""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = 'H(3)', structure = SMILES('[H]'), E0 = (211.792,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (1.00794,'amu'), collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""), ) species( label = 'C=C(C[CH]C)CCC(637)', structure = SMILES('C=C(C[CH]C)CCC'), E0 = (68.6635,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2950,3100,1380,975,1025,1650,3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,350,440,435,1725,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (111.205,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0745385,0.0817005,-4.77667e-05,1.40468e-08,-1.70763e-12,8402.66,35.0658], Tmin=(100,'K'), Tmax=(1806.57,'K')), NASAPolynomial(coeffs=[15.9908,0.04646,-1.85067e-05,3.24929e-09,-2.13434e-13,2651.85,-51.1344], Tmin=(1806.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(68.6635,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(532.126,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(RCCJC)"""), ) species( label = '[CH2]C(C=CC)CCC(638)', structure = SMILES('[CH2]C(C=CC)CCC'), E0 = (73.2223,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (111.205,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.337214,0.0840209,-4.86246e-05,1.18129e-08,-3.52572e-13,8972.12,35.7336], Tmin=(100,'K'), Tmax=(1232.02,'K')), NASAPolynomial(coeffs=[14.8729,0.04672,-1.792e-05,3.15779e-09,-2.11424e-13,4307.33,-44.5416], Tmin=(1232.02,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(73.2223,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(532.126,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Isobutyl)"""), ) species( label = '[CH2]C(CC=C)CCC(639)', structure = SMILES('[CH2]C(CC=C)CCC'), E0 = (87.7453,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (111.205,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.313772,0.0833494,-4.34095e-05,4.48695e-09,2.56068e-12,10718.2,36.0467], Tmin=(100,'K'), Tmax=(1123.18,'K')), NASAPolynomial(coeffs=[14.7289,0.0471109,-1.81612e-05,3.24023e-09,-2.1998e-13,6245.81,-43.1398], Tmin=(1123.18,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(87.7453,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(532.126,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl)"""), ) species( label = 'C=CC[CH]C(380)', structure = SMILES('C=CC[CH]C'), E0 = (154.144,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,283.439,1383.06],'cm^-1')), HinderedRotor(inertia=(0.082013,'amu*angstrom^2'), symmetry=1, barrier=(4.7229,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0832769,'amu*angstrom^2'), symmetry=1, barrier=(4.73611,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0814303,'amu*angstrom^2'), symmetry=1, barrier=(4.72911,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (69.125,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.09235,0.03644,-5.91514e-06,-8.60019e-09,3.57549e-12,18612.4,21.9142], Tmin=(100,'K'), Tmax=(1213.84,'K')), NASAPolynomial(coeffs=[6.96893,0.0310549,-1.24641e-05,2.24839e-09,-1.52423e-13,16641.4,-5.79986], Tmin=(1213.84,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(154.144,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJC)"""), ) species( label = 'npropyl(83)', structure = SMILES('[CH2]CC'), E0 = (87.0621,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000],'cm^-1')), HinderedRotor(inertia=(0.0928812,'amu*angstrom^2'), symmetry=1, barrier=(2.13552,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.092914,'amu*angstrom^2'), symmetry=1, barrier=(2.13628,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (43.0877,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(2218.31,'J/mol'), sigma=(4.982,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.02815,0.0147023,2.4051e-05,-3.66738e-08,1.38611e-11,10512.1,12.4699], Tmin=(100,'K'), Tmax=(984.464,'K')), NASAPolynomial(coeffs=[6.16543,0.0184495,-6.79029e-06,1.23049e-09,-8.63866e-14,9095.06,-6.67607], Tmin=(984.464,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(87.0621,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), label="""npropyl""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = 'C3H6(T)(143)', structure = SMILES('[CH2][CH]C'), E0 = (284.865,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000],'cm^-1')), HinderedRotor(inertia=(0.238389,'amu*angstrom^2'), symmetry=1, barrier=(5.48103,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00909639,'amu*angstrom^2'), symmetry=1, barrier=(22.1005,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (42.0797,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.93778,0.0190991,4.26842e-06,-1.44873e-08,5.74941e-12,34303.2,12.9695], Tmin=(100,'K'), Tmax=(1046.81,'K')), NASAPolynomial(coeffs=[5.93909,0.0171892,-6.69152e-06,1.21546e-09,-8.39795e-14,33151.2,-4.14888], Tmin=(1046.81,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(284.865,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), label="""C3H6(T)""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = '[CH2][CH]CCC(137)', structure = SMILES('[CH2][CH]CCC'), E0 = (231.608,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,2520.07,2520.09],'cm^-1')), HinderedRotor(inertia=(0.115877,'amu*angstrom^2'), symmetry=1, barrier=(4.7946,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.115947,'amu*angstrom^2'), symmetry=1, barrier=(4.79475,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(1.32425,'amu*angstrom^2'), symmetry=1, barrier=(54.7442,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00289323,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (70.1329,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.95927,0.043164,-1.89531e-05,3.2896e-09,-1.25701e-13,27931,23.7177], Tmin=(100,'K'), Tmax=(1936.29,'K')), NASAPolynomial(coeffs=[12.6506,0.0264639,-1.01885e-05,1.70855e-09,-1.07054e-13,22781,-37.5335], Tmin=(1936.29,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(231.608,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(RCCJC)"""), ) species( label = 'C[CH]C[C](C)CCC(640)', structure = SMILES('C[CH]C[C](C)CCC'), E0 = (138.097,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.181989,0.0908122,-8.5598e-05,6.65633e-08,-2.48414e-11,16740.2,35.8823], Tmin=(100,'K'), Tmax=(753.264,'K')), NASAPolynomial(coeffs=[2.45853,0.0713078,-3.19915e-05,6.05051e-09,-4.20425e-13,16607.6,26.9408], Tmin=(753.264,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(138.097,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(Tertalkyl)"""), ) species( label = '[CH2]C([CH]CC)CCC(557)', structure = SMILES('[CH2]C([CH]CC)CCC'), E0 = (157.852,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3025,407.5,1350,352.5,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.142882,0.0865654,-5.26266e-05,1.65984e-08,-2.19711e-12,19137.4,37.5097], Tmin=(100,'K'), Tmax=(1663.82,'K')), NASAPolynomial(coeffs=[14.9004,0.0503993,-2.0021e-05,3.5337e-09,-2.3403e-13,14131.6,-42.7241], Tmin=(1663.82,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(157.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_S) + radical(Isobutyl)"""), ) species( label = '[CH2]CCC([CH2])CCC(505)', structure = SMILES('[CH2]CCC([CH2])CCC'), E0 = (168.557,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.509091,0.0909303,-5.98095e-05,2.09364e-08,-3.06143e-12,20441.5,38.0622], Tmin=(100,'K'), Tmax=(1553.67,'K')), NASAPolynomial(coeffs=[16.0829,0.0482135,-1.85685e-05,3.24027e-09,-2.13968e-13,15285.8,-49.2952], Tmin=(1553.67,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(168.557,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(Isobutyl)"""), ) species( label = 'C[CH]CC(C)[CH]CC(641)', structure = SMILES('C[CH]CC(C)[CH]CC'), E0 = (147.216,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.52536,0.0796429,-4.15263e-05,1.0164e-08,-9.96152e-13,17826.3,36.0388], Tmin=(100,'K'), Tmax=(2190.74,'K')), NASAPolynomial(coeffs=[18.5806,0.0466761,-1.89537e-05,3.29485e-09,-2.12259e-13,9915.51,-65.2269], Tmin=(2190.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(147.216,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_S) + radical(RCCJC)"""), ) species( label = 'C[CH][CH]C(C)CCC(642)', structure = SMILES('C[CH][CH]C(C)CCC'), E0 = (147.216,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.52536,0.0796429,-4.15263e-05,1.0164e-08,-9.96152e-13,17826.3,36.0388], Tmin=(100,'K'), Tmax=(2190.74,'K')), NASAPolynomial(coeffs=[18.5806,0.0466761,-1.89537e-05,3.29485e-09,-2.12259e-13,9915.51,-65.2269], Tmin=(2190.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(147.216,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_S) + radical(RCCJC)"""), ) species( label = '[CH2][C](CCC)CCC(643)', structure = SMILES('[CH2][C](CCC)CCC'), E0 = (148.733,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.152329,0.0902483,-7.00314e-05,3.65332e-08,-9.14836e-12,18022.1,34.3573], Tmin=(100,'K'), Tmax=(888.974,'K')), NASAPolynomial(coeffs=[5.63449,0.0655796,-2.8405e-05,5.31477e-09,-3.68582e-13,17047.5,8.5545], Tmin=(888.974,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(148.733,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Isobutyl) + radical(Tertalkyl)"""), ) species( label = 'C[CH]CC(C)C[CH]C(644)', structure = SMILES('C[CH]CC(C)C[CH]C'), E0 = (147.12,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.867032,0.0776075,-3.93727e-05,9.53104e-09,-9.47463e-13,17797.1,34.2627], Tmin=(100,'K'), Tmax=(2018.53,'K')), NASAPolynomial(coeffs=[12.239,0.0550724,-2.26266e-05,4.00024e-09,-2.62462e-13,13206.1,-28.5879], Tmin=(2018.53,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(147.12,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(RCCJC)"""), ) species( label = '[CH2]CCC(C)C[CH]C(645)', structure = SMILES('[CH2]CCC(C)C[CH]C'), E0 = (157.921,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3025,407.5,1350,352.5,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.167664,0.0838765,-4.82379e-05,1.39915e-08,-1.69964e-12,19130.4,36.5652], Tmin=(100,'K'), Tmax=(1760.97,'K')), NASAPolynomial(coeffs=[14.0928,0.0522463,-2.12954e-05,3.79172e-09,-2.51626e-13,14226.1,-38.4953], Tmin=(1760.97,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(157.921,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(RCCJC)"""), ) species( label = '[CH2][CH]CC(C)CCC(646)', structure = SMILES('[CH2][CH]CC(C)CCC'), E0 = (157.921,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3025,407.5,1350,352.5,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.167669,0.0838764,-4.82378e-05,1.39914e-08,-1.69963e-12,19130.4,36.5652], Tmin=(100,'K'), Tmax=(1761,'K')), NASAPolynomial(coeffs=[14.0929,0.052246,-2.12953e-05,3.79169e-09,-2.51624e-13,14226,-38.4962], Tmin=(1761,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(157.921,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(RCCJ)"""), ) species( label = 'C=C(CCC)CCC(647)', structure = SMILES('C=C(CCC)CCC'), E0 = (-125.783,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.457484,0.0853299,-4.14912e-05,2.43659e-09,2.8206e-12,-14957.2,33.5554], Tmin=(100,'K'), Tmax=(1183.71,'K')), NASAPolynomial(coeffs=[15.6631,0.0495615,-1.98701e-05,3.6101e-09,-2.46884e-13,-20084.2,-52.4715], Tmin=(1183.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-125.783,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(557.07,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH)"""), ) species( label = 'CC=CC(C)CCC(648)', structure = SMILES('CC=CC(C)CCC'), E0 = (-131.86,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.279862,0.0814576,-3.30945e-05,-4.56557e-09,4.89987e-12,-15694.6,33.7454], Tmin=(100,'K'), Tmax=(1157.89,'K')), NASAPolynomial(coeffs=[14.6537,0.050769,-2.04143e-05,3.72274e-09,-2.55492e-13,-20553.9,-46.5399], Tmin=(1157.89,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-131.86,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(557.07,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH)"""), ) species( label = 'C=CCC(C)CCC(649)', structure = SMILES('C=CCC(C)CCC'), E0 = (-117.337,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.308524,0.0813491,-2.96096e-05,-9.9804e-09,7.13492e-12,-13946.1,34.2489], Tmin=(100,'K'), Tmax=(1116.01,'K')), NASAPolynomial(coeffs=[15.074,0.0502709,-2.01707e-05,3.69539e-09,-2.55227e-13,-18877.5,-48.3629], Tmin=(1116.01,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-117.337,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(557.07,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""), ) species( label = 'CH2(S)(23)', structure = SMILES('[CH2]'), E0 = (419.862,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (14.0266,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""), ) species( label = '[CH2]C(CC)C[CH]C(448)', structure = SMILES('[CH2]C(CC)C[CH]C'), E0 = (181.537,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3025,407.5,1350,352.5,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,208.447,816.894,1667.58],'cm^-1')), HinderedRotor(inertia=(0.146575,'amu*angstrom^2'), symmetry=1, barrier=(3.56342,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.146575,'amu*angstrom^2'), symmetry=1, barrier=(3.56342,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.146575,'amu*angstrom^2'), symmetry=1, barrier=(3.56342,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.146575,'amu*angstrom^2'), symmetry=1, barrier=(3.56342,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.146575,'amu*angstrom^2'), symmetry=1, barrier=(3.56342,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.146575,'amu*angstrom^2'), symmetry=1, barrier=(3.56342,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (98.1861,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.660991,0.0715562,-4.16095e-05,1.2975e-08,-1.74726e-12,21954.9,32.5515], Tmin=(100,'K'), Tmax=(1590.35,'K')), NASAPolynomial(coeffs=[10.4815,0.046856,-1.83125e-05,3.20901e-09,-2.12057e-13,18831.3,-19.3828], Tmin=(1590.35,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(181.537,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(482.239,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(Isobutyl)"""), ) species( label = 'C[CH]C[CH]CCCC(493)', structure = SMILES('C[CH]C[CH]CCCC'), E0 = (149.48,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.51281,0.0740924,-3.46704e-05,7.09733e-09,-5.52996e-13,18045.9,32.6775], Tmin=(100,'K'), Tmax=(2946.12,'K')), NASAPolynomial(coeffs=[34.208,0.0297017,-1.20691e-05,1.98299e-09,-1.19008e-13,-1218.93,-160.385], Tmin=(2946.12,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(149.48,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJCC) + radical(RCCJC)"""), ) species( label = 'C[CH]CC[CH]CCC(501)', structure = SMILES('C[CH]CC[CH]CCC'), E0 = (149.48,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3050,390,425,1340,1360,335,370,180,180,780.333,1552.78,3200],'cm^-1')), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.107337,'amu*angstrom^2'), symmetry=1, barrier=(2.4679,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3496.45,'J/mol'), sigma=(6.67923,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=546.14 K, Pc=26.63 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.51281,0.0740924,-3.46704e-05,7.09733e-09,-5.52996e-13,18045.9,32.6775], Tmin=(100,'K'), Tmax=(2946.07,'K')), NASAPolynomial(coeffs=[34.2074,0.0297024,-1.20694e-05,1.98305e-09,-1.19012e-13,-1218.51,-160.381], Tmin=(2946.07,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(149.48,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(RCCJCC)"""), ) species( label = '[CH2]C(C)C([CH2])CCC(516)', structure = SMILES('[CH2]C(C)C([CH2])CCC'), E0 = (162.698,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.213,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3539.78,'J/mol'), sigma=(6.74357,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=552.91 K, Pc=26.19 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.568573,0.0900911,-5.4943e-05,1.39233e-08,-2.68243e-14,19741.3,37.9849], Tmin=(100,'K'), Tmax=(1097.42,'K')), NASAPolynomial(coeffs=[14.4816,0.0491891,-1.81105e-05,3.13525e-09,-2.08879e-13,15597.7,-39.8514], Tmin=(1097.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(162.698,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(552.912,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + longDistanceInteraction_noncyclic(CsCs-TT) + group(Cs-CsCsCsH) + longDistanceInteraction_noncyclic(CsCs-TT) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Isobutyl) + radical(Isobutyl)"""), ) species( label = 'CCCC1CC(C)C1(517)', structure = SMILES('CCCC1CC(C)C1'), E0 = (-99.331,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.213,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.456317,0.0548005,5.64783e-05,-1.04093e-07,4.13171e-11,-11798.7,29.6168], Tmin=(100,'K'), Tmax=(991.124,'K')), NASAPolynomial(coeffs=[16.1214,0.0478487,-1.81608e-05,3.39392e-09,-2.44081e-13,-17667.6,-59.7609], Tmin=(991.124,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-99.331,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(565.384,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclobutane)"""), ) species( label = 'C[CH]C[CH]CCC(182)', structure = SMILES('C[CH]C[CH]CCC'), E0 = (173.26,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3050,390,425,1340,1360,335,370,209.055,308.939,1269.19,3466.82],'cm^-1')), HinderedRotor(inertia=(0.0559878,'amu*angstrom^2'), symmetry=1, barrier=(1.65535,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0559878,'amu*angstrom^2'), symmetry=1, barrier=(1.65535,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0559878,'amu*angstrom^2'), symmetry=1, barrier=(1.65535,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0559878,'amu*angstrom^2'), symmetry=1, barrier=(1.65535,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0559878,'amu*angstrom^2'), symmetry=1, barrier=(1.65535,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0559878,'amu*angstrom^2'), symmetry=1, barrier=(1.65535,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (98.1861,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.89222,0.0617409,-2.69496e-05,4.8154e-09,-2.88352e-13,20898.4,29.1332], Tmin=(100,'K'), Tmax=(2576.31,'K')), NASAPolynomial(coeffs=[33.2951,0.0215418,-8.52681e-06,1.33743e-09,-7.5963e-14,1877.83,-157.597], Tmin=(2576.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(173.26,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(482.239,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(RCCJCC)"""), ) species( label = 'CH2(19)', structure = SMILES('[CH2]'), E0 = (381.563,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (14.0266,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""), ) species( label = '[CH2]C([CH2])CCC(453)', structure = SMILES('[CH2]C([CH2])CCC'), E0 = (212.606,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,282.908,282.921],'cm^-1')), HinderedRotor(inertia=(1.50122,'amu*angstrom^2'), symmetry=1, barrier=(85.2679,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00828994,'amu*angstrom^2'), symmetry=1, barrier=(85.267,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.000839525,'amu*angstrom^2'), symmetry=1, barrier=(8.63533,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00210608,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(1.50198,'amu*angstrom^2'), symmetry=1, barrier=(85.2677,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (84.1595,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.946288,0.0603019,-2.78545e-05,-1.90127e-09,4.43179e-12,25686.5,27.643], Tmin=(100,'K'), Tmax=(992.809,'K')), NASAPolynomial(coeffs=[10.2432,0.036692,-1.31042e-05,2.24241e-09,-1.49184e-13,23158.1,-20.5791], Tmin=(992.809,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(212.606,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Isobutyl) + radical(Isobutyl)"""), ) species( label = 'CHCH3(T)(95)', structure = SMILES('[CH]C'), E0 = (343.893,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')), HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (28.0532,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = 'N2', structure = SMILES('N#N'), E0 = (-8.69489,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (28.0135,'amu'), collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""), ) species( label = 'Ne', structure = SMILES('[Ne]'), E0 = (-6.19738,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (20.1797,'amu'), collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""), ) transitionState( label = 'TS1', E0 = (157.756,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS2', E0 = (280.456,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS3', E0 = (290.747,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS4', E0 = (302.048,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS5', E0 = (268.57,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS6', E0 = (264.224,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS7', E0 = (250.051,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS8', E0 = (260.436,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS9', E0 = (316.844,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS10', E0 = (316.252,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS11', E0 = (299.176,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS12', E0 = (275.696,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS13', E0 = (301.797,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS14', E0 = (244.365,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS15', E0 = (210.639,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS16', E0 = (212.103,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS17', E0 = (516.473,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS18', E0 = (221.157,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS19', E0 = (221.157,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS20', E0 = (182.73,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS21', E0 = (601.399,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS22', E0 = (317.692,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS23', E0 = (317.692,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS24', E0 = (322.633,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS25', E0 = (166.041,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS26', E0 = (554.823,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS27', E0 = (556.498,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) reaction( label = 'reaction1', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C=CCCC(134)', 'C3H6(72)'], transitionState = 'TS1', kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ] Euclidian distance = 0 family: 1,4_Linear_birad_scission"""), ) reaction( label = 'reaction2', reactants = ['H(3)', 'C=C(C[CH]C)CCC(637)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS2', kinetics = Arrhenius(A=(0.0051739,'m^3/(mol*s)'), n=2.82163, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 102 used for Cds-CsCs_Cds-HH;HJ Exact match found for rate rule [Cds-CsCs_Cds-HH;HJ] Euclidian distance = 0 family: R_Addition_MultipleBond Ea raised from -4.8 to 0 kJ/mol."""), ) reaction( label = 'reaction3', reactants = ['H(3)', '[CH2]C(C=CC)CCC(638)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS3', kinetics = Arrhenius(A=(1.46e+08,'cm^3/(mol*s)'), n=1.64, Ea=(5.73208,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2555 used for Cds-CsH_Cds-CsH;HJ Exact match found for rate rule [Cds-CsH_Cds-CsH;HJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction4', reactants = ['H(3)', '[CH2]C(CC=C)CCC(639)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS4', kinetics = Arrhenius(A=(3.36e+08,'cm^3/(mol*s)'), n=1.56, Ea=(2.5104,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 9 used for Cds-HH_Cds-CsH;HJ Exact match found for rate rule [Cds-HH_Cds-CsH;HJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction5', reactants = ['C=CC[CH]C(380)', 'npropyl(83)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS5', kinetics = Arrhenius(A=(1020,'cm^3/(mol*s)'), n=2.41, Ea=(27.3634,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 418 used for Cds-CsH_Cds-HH;CsJ-CsHH Exact match found for rate rule [Cds-CsH_Cds-HH;CsJ-CsHH] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction6', reactants = ['C=CCCC(134)', 'C3H6(T)(143)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS6', kinetics = Arrhenius(A=(0.00168615,'m^3/(mol*s)'), n=2.52599, Ea=(19.6608,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-CsH_Cds-HH;CJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction7', reactants = ['[CH2][CH]CCC(137)', 'C3H6(72)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS7', kinetics = Arrhenius(A=(0.00620445,'m^3/(mol*s)'), n=2.46568, Ea=(12.4666,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-HH_Cds-Cs\H3/H;CJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction8', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C[CH]C[C](C)CCC(640)'], transitionState = 'TS8', kinetics = Arrhenius(A=(5.265e-07,'s^-1'), n=5.639, Ea=(102.68,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 38 used for R2H_S;C_rad_out_2H;Cs_H_out_Cs2 Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_Cs2] Euclidian distance = 0 family: intra_H_migration"""), ) reaction( label = 'reaction9', reactants = ['[CH2]C([CH]CC)CCC(557)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS9', kinetics = Arrhenius(A=(6.76e+09,'s^-1'), n=0.88, Ea=(158.992,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 357 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction10', reactants = ['[CH2]CCC([CH2])CCC(505)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS10', kinetics = Arrhenius(A=(6.48e+07,'s^-1'), n=1.57, Ea=(147.695,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 106 used for R2H_S;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs) Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction11', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C[CH]CC(C)[CH]CC(641)'], transitionState = 'TS11', kinetics = Arrhenius(A=(1.064e+06,'s^-1'), n=1.93, Ea=(141.419,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 108 used for R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs) Exact match found for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction12', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C[CH][CH]C(C)CCC(642)'], transitionState = 'TS12', kinetics = Arrhenius(A=(166690,'s^-1'), n=2.17519, Ea=(117.939,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;C_rad_out_2H;XH_out] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction13', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['[CH2][C](CCC)CCC(643)'], transitionState = 'TS13', kinetics = Arrhenius(A=(588307,'s^-1'), n=1.79367, Ea=(144.041,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;C_rad_out_H/NonDeC;XH_out] Euclidian distance = 0 family: intra_H_migration"""), ) reaction( label = 'reaction14', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C[CH]CC(C)C[CH]C(644)'], transitionState = 'TS14', kinetics = Arrhenius(A=(6.44e+09,'s^-1'), n=0.13, Ea=(86.6088,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 131 used for R4H_SSS;C_rad_out_2H;Cs_H_out_H/NonDeC Exact match found for rate rule [R4H_SSS;C_rad_out_2H;Cs_H_out_H/NonDeC] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction15', reactants = ['[CH2]CCC(C)C[CH]C(645)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS15', kinetics = Arrhenius(A=(68850,'s^-1'), n=1.68, Ea=(52.7184,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 111 used for R5H_CCC;C_rad_out_2H;Cs_H_out_2H Exact match found for rate rule [R5H_CCC;C_rad_out_2H;Cs_H_out_2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction16', reactants = ['[CH2][CH]CC(C)CCC(646)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS16', kinetics = Arrhenius(A=(91273.5,'s^-1'), n=1.79, Ea=(54.1828,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;Cs_H_out_2H] for rate rule [R5HJ_1;C_rad_out_2H;Cs_H_out_2H] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction17', reactants = ['[CH2][CH]CCC(137)', 'C3H6(T)(143)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS17', kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad] Euclidian distance = 0 family: R_Recombination Ea raised from -14.4 to 0 kJ/mol."""), ) reaction( label = 'reaction18', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C=C(CCC)CCC(647)'], transitionState = 'TS18', kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad_NDe;XH_Rrad] Euclidian distance = 0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction19', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['CC=CC(C)CCC(648)'], transitionState = 'TS19', kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad_NDe] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction20', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C=CCC(C)CCC(649)'], transitionState = 'TS20', kinetics = Arrhenius(A=(6.37831e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction21', reactants = ['CH2(S)(23)', '[CH2]C(CC)C[CH]C(448)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS21', kinetics = Arrhenius(A=(1.31021e+06,'m^3/(mol*s)'), n=0.189, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [carbene;C_pri] for rate rule [carbene;C_pri/NonDeC] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: 1,2_Insertion_carbene Ea raised from -1.5 to 0 kJ/mol."""), ) reaction( label = 'reaction22', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C[CH]C[CH]CCCC(493)'], transitionState = 'TS22', kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C] Euclidian distance = 1.0 family: 1,2_shiftC"""), ) reaction( label = 'reaction28', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['C[CH]CC[CH]CCC(501)'], transitionState = 'TS23', kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C] Euclidian distance = 1.0 family: 1,2_shiftC"""), ) reaction( label = 'reaction26', reactants = ['[CH2]C(C)C([CH2])CCC(516)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS24', kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C] Euclidian distance = 1.0 family: 1,2_shiftC"""), ) reaction( label = 'reaction25', reactants = ['[CH2]C(C[CH]C)CCC(515)'], products = ['CCCC1CC(C)C1(517)'], transitionState = 'TS25', kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC] Euclidian distance = 2.0 family: Birad_recombination"""), ) reaction( label = 'reaction26', reactants = ['C[CH]C[CH]CCC(182)', 'CH2(19)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS26', kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/NonDeC;Birad] Euclidian distance = 3.0 family: Birad_R_Recombination Ea raised from -3.5 to 0 kJ/mol."""), ) reaction( label = 'reaction27', reactants = ['[CH2]C([CH2])CCC(453)', 'CHCH3(T)(95)'], products = ['[CH2]C(C[CH]C)CCC(515)'], transitionState = 'TS27', kinetics = Arrhenius(A=(2.13464e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/Cs;Birad] Euclidian distance = 3.0 Multiplied by reaction path degeneracy 2.0 family: Birad_R_Recombination Ea raised from -3.5 to 0 kJ/mol."""), ) network( label = '147', isomers = [ '[CH2]C(C[CH]C)CCC(515)', ], reactants = [ ('C=CCCC(134)', 'C3H6(72)'), ], bathGas = { 'N2': 0.5, 'Ne': 0.5, }, ) pressureDependence( label = '147', Tmin = (300,'K'), Tmax = (2000,'K'), Tcount = 8, Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'), Pmin = (0.01,'bar'), Pmax = (100,'bar'), Pcount = 5, Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'), maximumGrainSize = (0.5,'kcal/mol'), minimumGrainCount = 250, method = 'modified strong collision', interpolationModel = ('Chebyshev', 6, 4), activeKRotor = True, activeJRotor = True, rmgmode = True, )
# -*- coding: utf-8 -*- import local_config as config import tornado import vk from tornado.httpserver import HTTPServer from tornado.ioloop import PeriodicCallback, IOLoop from tornado.queues import Queue, QueueEmpty from telebot import TeleBot, types import pdb # periodic launch of async tasks to process tasks in queue class CustomPeriodicCallback(PeriodicCallback): def __init__(self, request_queue, response_queue, callback_time, io_loop=None): if callback_time <= 0: raise ValueError("Periodic callback time must be positive") self.callback_time = callback_time self.io_loop = io_loop or IOLoop.current() self._running = False self._timeout = None self.request_queue = request_queue self.response_queue = response_queue # queue processing, single thread work with DB # taske task from queue, process, write changes into DB def queue_callback(self): try: message = self.request_queue.get_nowait() except QueueEmpty: pass else: start = False is_reset = False if message['text'] == 'telegram_cmd': self.response_queue.put({ 'chat_id':message['chat_id'], 'wait_message_id': message['wait_message_id'], 'message_text': question, 'markup': markup }) self.request_queue.task_done() def _run(self): if not self._running: return try: return self.queue_callback() except Exception: self.io_loop.handle_callback_exception(self.queue_callback) finally: self._schedule_next() # periodic lanch of requests reciever and reply sender class BotPeriodicCallback(PeriodicCallback): def __init__(self, bot, callback_time, io_loop=None): if callback_time <= 0: raise ValueError("Periodic callback time must be positive") self.callback_time = callback_time self.io_loop = io_loop or IOLoop.current() self._running = False self._timeout = None self.bot = bot def bot_callback(self, timeout=1): if self.bot.skip_pending: self.bot.skip_pending = False updates = self.bot.get_updates(offset=(self.bot.last_update_id + 1), timeout=timeout) self.bot.process_new_updates(updates) self.bot.send_response_messages() def _run(self): if not self._running: return try: return self.bot_callback() except Exception: self.io_loop.handle_callback_exception(self.bot_callback) finally: self._schedule_next() # periodic check of updates from VK class VkPeriodicCallback(PeriodicCallback): def __init__(self, bot, user_dict, callback_time, io_loop=None): if callback_time <= 0: raise ValueError("Periodic callback time must be positive") self.callback_time = callback_time self.io_loop = io_loop or IOLoop.current() self._running = False self._timeout = None self.user_dict = user_dict self.bot = bot def update_user_dict(self, dialog_id, last_message_id, user_id): """Update dictionary where last messages and dialogs id are stored. Helps to prevent downloading already downloaded messages. """ self.dialog_id = str(dialog_id) self.last_message_id = int(last_message_id) self.user_id = user_id self.user_dict[self.user_id]['dialog_dict'][self.dialog_id] = {'last_message_id':self.last_message_id, 'dialog_id' : int(dialog_id)} def get_contacts(bot, tele_id): """Returns list of your contacts.""" if tele_id == config.USERID: session = vk.AuthSession(app_id=config.APPID, user_login=config.LOGIN, user_password=config.PASSWORD, scope='messages') vk_api = vk.API(session, v='5.38') response = vk_api.messages.getDialogs() for item in response['items']: # pdb.set_trace() if item['message'].get('chat_id'): chat_name = item['message']['title'] dialog_id = str(2000000000 + item['message']['chat_id']) else: user_id = item['message'].get('user_id') user = vk_api.users.get(user_id=user_id) chat_name = user[0].get('last_name') + ' ' + user[0].get('first_name') dialog_id = str(user_id) keyboard = types.InlineKeyboardMarkup() keyboard.add(types.InlineKeyboardButton(text=chat_name, callback_data=dialog_id)) bot.send_message(tele_id, 'Contact:', reply_markup=keyboard) else: bot.send_message(tele_id, 'You are not authorized') def set_response_addressat(user_dict, user_id, addressat_id): """Updates user_dict to set response addressat VK ID. ID is retrieved by choosing message in telegram. """ user_dict = user_dict user_id = str(user_id) addressat_id = addressat_id user_dict[user_id]['addressat_id'] = addressat_id def mark_messages_read(peer_id): """Marks chosen dialog as read.""" session = vk.AuthSession(app_id=config.APPID, user_login=config.LOGIN, user_password=config.PASSWORD, scope='messages') vk_api = vk.API(session, v='5.38') vk_api.messages.markAsRead(peer_id=peer_id) # for chat peer_id = 2000000000 + chat_id def send_messages(peer_id, message): """Sends messages to VK.""" session = vk.AuthSession(app_id=config.APPID, user_login=config.LOGIN, user_password=config.PASSWORD, scope='messages') vk_api = vk.API(session, v='5.38') vk_api.messages.send(peer_id=peer_id, message=message) def get(self): """Makes auth and then call method to retrieve unread messages. Then pushes messages to telegram. """ # choose user to check updates(!!TODO CONNECT TO DB AND RUN LOOP TO CHOOSE USER) user_id = str(config.USERID) # authentication with user's credentials session = vk.AuthSession(app_id=config.APPID, user_login=config.LOGIN, user_password=config.PASSWORD, scope='messages') vk_api = vk.API(session, v='5.38') if int(user_id) == config.USERID: #pdb.set_trace() result_list = self.get_messages(vk_api, user_id) text = '' for item in result_list: one_text = '' user = item[0] for l in item[1]: one_text = one_text + l +'\n' text = text + user + '\n' + one_text + '\n' if text == '': pass else: keyboard = types.InlineKeyboardMarkup() keyboard.add(types.InlineKeyboardButton(text=user, callback_data=item[2])) self.bot.send_message(user_id, text, reply_markup=keyboard) text = '' else: self.bot.send_message(user_id, 'You are not authorized') def get_messages(self, vk_api, user_id): """Get dialogs with unread messages. Then it prints user, user_id and messages. """ response = vk_api.messages.getDialogs(unread=1) # print(response) result_list = [] items = response.get('items') for item in items: message_list = [] count = item.get('unread') last_message = item.get('message') last_message_id = last_message.get('id') if last_message.get('chat_id'): chat_name = last_message.get('title') dialog_id = str(2000000000 + last_message.get('chat_id')) else: chat_name = '' dialog_id = str(last_message.get('user_id')) if dialog_id in self.user_dict[user_id]['dialog_dict']: if int(last_message_id) <= self.user_dict[user_id]['dialog_dict'][dialog_id]['last_message_id']: pass else: self.update_user_dict(dialog_id, last_message_id, user_id) result_list.append(self.get_unread_history(vk_api, dialog_id, chat_name, last_message, count, message_list)) else: self.update_user_dict(dialog_id, last_message_id, user_id) result_list.append(self.get_unread_history(vk_api, dialog_id, chat_name, last_message, count, message_list)) return result_list def get_unread_history(self, vk_api, dialog_id, chat_name, last_message, count, message_list): """Gets unread history by dialog_id.""" user_id = last_message.get('user_id') user = vk_api.users.get(user_id=user_id) user = user[0].get('last_name') + ' ' + user[0].get('first_name') + ' ' + chat_name if chat_name == '': history = vk_api.messages.getHistory(user_id=dialog_id,start_message=-1,count=count) else: history = vk_api.messages.getHistory(user_id=dialog_id,start_message=-1,count=count) messages = history.get('items') for message in messages: message_list.append(message.get('body')) return (user, message_list, str(dialog_id)) def _run(self): if not self._running: return try: return self.get() except Exception: self.io_loop.handle_callback_exception(self.get) finally: self._schedule_next() # Add queue and result to the bot class AppTeleBot(TeleBot, object): def __init__(self, token, request_queue, response_queue, threaded=True, skip_pending=False): super(AppTeleBot, self).__init__(token, threaded=True, skip_pending=False) self.request_queue = request_queue self.response_queue = response_queue # send all processed data from result queue def send_response_messages(self): try: message = self.response_queue.get_nowait() except QueueEmpty: pass else: self.send_chat_action(message['chat_id'], 'typing') if message['message_text'] == 'contact': self.send_contact(message['chat_id'], phone_number=PHONE_NUMBER, last_name=LAST_NAME, first_name=FIRST_NAME, reply_markup=message['markup']) else: self.send_chat_action(message['chat_id'], message['message_text'], reply_markup=message['markup']) self.response_queue.task_done() def main(): TOKEN = config.TOKEN request_queue = Queue(maxsize=0) response_queue = Queue(maxsize=0) bot = AppTeleBot(TOKEN, request_queue, response_queue) user_id = config.USERID user_dict = {str(user_id):{'dialog_dict':{}}} @bot.message_handler(commands=['start','help']) def send_welcome(message): msg = bot.send_message(message.chat.id, 'Hello from bot') @bot.message_handler(commands=['pm']) def send_pm(message): peer_id = user_dict[str(message.chat.id)]['addressat_id'] VkPeriodicCallback.send_messages(peer_id, message.text[3:]) @bot.message_handler(commands=['cont']) def get_contacts(message): VkPeriodicCallback.get_contacts(bot, message.chat.id) @bot.callback_query_handler(func=lambda call: True) def callback_inline(call): VkPeriodicCallback.mark_messages_read(call.data) VkPeriodicCallback.set_response_addressat(user_dict, user_id, call.data) # add requests to the bot into queue @bot.message_handler(func=lambda message: True, content_types=['text']) def echo_all(message): markup = ReplyKeyboardRemove(selective=false) response = bot.send_message(message.chat.id, u'Please wait...', reply_markup=markup) bot.request_queue.put({ 'text': message.text, 'chat_id': message.chat.id, 'username': message.chat.username, 'last_name': message.chat.last_name, 'message_id': message.message_id, 'wait_message_id': response.message_id }) ioloop = tornado.ioloop.IOLoop.instance() BotPeriodicCallback(bot, 5000, ioloop).start() CustomPeriodicCallback(request_queue, response_queue, 5000, ioloop).start() VkPeriodicCallback(bot, user_dict, 5000, ioloop).start() ioloop.start() if __name__ == "__main__": main()
from django.db import models from django.urls import reverse from django.utils.translation import gettext_lazy as _ COLOR_CHOICES = ( ('green', 'GREEN'), ('blue', 'BLUE'), ('red', 'RED'), ('orange', 'ORANGE'), ('black', 'BLACK'), ) class MyModel(models.Model): color = models.CharField(max_length=6, choices=COLOR_CHOICES, default='green') def __str__(self): """String for representing the Model object.""" return f"{self.color}" class Log_midlware(models.Model): """Model representing an person.""" path = models.CharField(_("path"), max_length=200) method = models.CharField(_("method"), max_length=100) timestamp = models.DateTimeField(auto_now_add=True) class Meta: ordering = ['-timestamp', 'method', 'path'] def __str__(self): """String for representing the Model object.""" return f"{self.path}, {self.method}, {self.timestamp}" class Person(models.Model): """Model representing an person.""" first_name = models.CharField(_("first_name"), max_length=200) last_name = models.CharField(_("last_name"), max_length=100) email = models.CharField(_("email"), max_length=100) class Meta: ordering = ['first_name', 'last_name'] def __str__(self): """String for representing the Model object.""" return f"{self.first_name}, {self.last_name}, {self.email}" def get_absolute_url(self): """Returns the url to access a particular author instance.""" return reverse('person-create')
# https://leetcode.com/problems/minimize-deviation-in-array/description/ """ You are given an array nums of n positive integers. You can perform two types of operations on any element of the array any number of times: If the element is even, divide it by 2. For example, if the array is [1,2,3,4], then you can do this operation on the last element, and the array will be [1,2,3,2]. If the element is odd, multiply it by 2. For example, if the array is [1,2,3,4], then you can do this operation on the first element, and the array will be [2,2,3,4]. The deviation of the array is the maximum difference between any two elements in the array. Return the minimum deviation the array can have after performing some number of operations. Example 1: Input: nums = [1,2,3,4] Output: 1 Explanation: You can transform the array to [1,2,3,2], then to [2,2,3,2], then the deviation will be 3 - 2 = 1. Example 2: Input: nums = [4,1,5,20,3] Output: 3 Explanation: You can transform the array after two operations to [4,2,5,5,3], then the deviation will be 5 - 2 = 3. Example 3: Input: nums = [2,10,8] Output: 3 Constraints: n == nums.length 2 <= n <= 5 * 104 1 <= nums[i] <= 109 """ import heapq from math import inf from sortedcontainers import SortedSet def minimum_deviation(nums: list[int]) -> int: # dedup and normalize the nums to only have even numbers vals = [-(x * 2 if x % 2 != 0 else x) for x in set(nums)] min_v = -max(vals) heapq.heapify(vals) ans = inf while True: cur_max = -heapq.heappop(vals) ans = min(ans, cur_max - min_v) if ans == 0: break if cur_max % 2 == 1: break cur_max //= 2 heapq.heappush(vals, -cur_max) min_v = min(min_v, cur_max) return ans def minimum_deviation(nums: list[int]) -> int: vals = SortedSet([x * 2 if x % 2 != 0 else x for x in nums]) ans = inf min_v = vals[0] while True: cur_max = vals.pop(-1) ans = min(ans, cur_max - min_v) if ans == 0: break if cur_max % 2 == 1: break cur_max //= 2 vals.add(cur_max) min_v = min(min_v, cur_max) return ans
# Generated by Django 2.2 on 2019-05-01 06:51 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('konchiwa', '0002_auto_20190501_1545'), ] operations = [ migrations.AlterField( model_name='article', name='journal_ID', field=models.CharField(max_length=128), ), migrations.AlterField( model_name='journal', name='journal_ID', field=models.CharField(max_length=128), ), ]
# 构建发布压缩包 import os import re import shutil import subprocess from log import logger from qq_login import QQLogin from version import now_version def package(dir_src, dir_all_release, release_dir_name, release_7z_name, dir_github_action_artifact): old_cwd = os.getcwd() # 需要复制的文件与目录 files_to_copy = [] reg_wantted_file = r'.*\.(toml|md|txt|png|jpg|docx|url)$' for file in os.listdir('.'): if not re.search(reg_wantted_file, file, flags=re.IGNORECASE): continue files_to_copy.append(file) files_to_copy.extend([ "config.toml.example", "DNF蚊子腿小助手.exe", "DNF蚊子腿小助手配置工具.exe", "双击打开配置文件.bat", "清除登录信息_误登录其他账号后请点击这个.bat", "bandizip_portable", "reference_data", f"chromedriver_{QQLogin.chrome_major_version}.exe", "public_key.der", "使用教程", "npp_portable", "utils", "icons", ]) files_to_copy = sorted(files_to_copy) # 确保发布根目录存在 if not os.path.isdir(dir_all_release): os.mkdir(dir_all_release) # 并清空当前的发布版本目录 dir_current_release = os.path.realpath(os.path.join(dir_all_release, release_dir_name)) shutil.rmtree(dir_current_release, ignore_errors=True) os.mkdir(dir_current_release) # 复制文件与目录过去 logger.info(f"将以下内容从{dir_src}复制到{dir_current_release}") for filename in files_to_copy: source = os.path.join(dir_src, filename) destination = os.path.join(dir_current_release, filename) if os.path.isdir(filename): logger.info(f"拷贝目录 {filename}") shutil.copytree(source, destination) else: logger.info(f"拷贝文件 {filename}") shutil.copyfile(source, destination) logger.info("清除一些无需发布的内容") dir_to_filenames_need_remove = { ".": [ "requirements.txt", ], "utils": [ "auto_updater.exe", "logs", ".db", ".cached", ".first_run", ".log.filename", "buy_auto_updater_users.txt", "user_monthly_pay_info.txt", "notices.txt", ], } for dir_path, filenames in dir_to_filenames_need_remove.items(): for filename in filenames: filepath = os.path.join(dir_current_release, f"{dir_path}/{filename}") if not os.path.exists(filepath): continue if os.path.isdir(filepath): logger.info(f"移除目录 {filepath}") shutil.rmtree(filepath, ignore_errors=True) else: logger.info(f"移除文件 {filepath}") os.remove(filepath) # 压缩打包 os.chdir(dir_all_release) logger.info("开始压缩打包") path_bz = os.path.join(dir_src, "bandizip_portable", "bz.exe") subprocess.call([path_bz, 'c', '-y', '-r', '-aoa', '-fmt:7z', '-l:9', release_7z_name, release_dir_name]) # 额外备份一份最新的供github action 使用 shutil.copyfile(release_7z_name, os.path.join(dir_github_action_artifact, 'djc_helper.7z')) os.chdir(old_cwd) def main(): dir_src = os.path.realpath('.') dir_all_release = os.path.realpath(os.path.join("releases")) release_dir_name = f"DNF蚊子腿小助手_v{now_version}_by风之凌殇" release_7z_name = f'{release_dir_name}.7z' dir_github_action_artifact = "_github_action_artifact" package(dir_src, dir_all_release, release_dir_name, release_7z_name, dir_github_action_artifact) if __name__ == '__main__': main()
from django.contrib import admin from django_markdown.admin import MarkdownModelAdmin from qaforum.models import (QaAnswer, QaAnswerComment, QaQuestion, QaQuestionComment) admin.site.register(QaQuestion) admin.site.register(QaAnswer, MarkdownModelAdmin) admin.site.register(QaAnswerComment) admin.site.register(QaQuestionComment)
import shutil import pandas as pd from tkinter import * from tkinter import filedialog, ttk from os import walk def get_data_from_excel(): global ce_que_on_veut_copier global path_data path_data = filedialog.askopenfilename(initialdir="C://Users//p094836//Desktop//", title="choose your Excell file", filetypes=(("Excel files", "*.xlsx"), ("all files", "*.*"))) df = pd.read_excel(path_data, sheet_name=0) # can also index sheet by name or fetch all sheets ce_que_on_veut_copier = df["data"].tolist() print("Nombre de fichier chargés : {}".format(len(ce_que_on_veut_copier))) pathlabel_1.config(text="Nombre de fichiers chargés : {}".format(len(ce_que_on_veut_copier)), fg="black", width=45) pathlabel_1.place(relx=.10, rely=.27) return ce_que_on_veut_copier def choose_extension(*args): global choosed_extension choosed_extension = variable_extension.get() print("variable changed!, Choosed extension : {}".format(choosed_extension)) return choosed_extension def get_chemin_source(): global path_source, les_fichiers_source path_source = filedialog.askdirectory() print("Source path : {}".format(path_source)) pathlabel_3.config(text="Chemin Source : {}".format(path_source), fg="red") # , width=80) pathlabel_3.place(relx=.05, rely=.62) if variable_extension.get() == ".csv": dir = [] les_dossiers = [] les_fichiers_source = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_source.extend(filenames) break elif variable_extension.get() == ".mat": dir = [] les_dossiers = [] les_fichiers_source = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_source.extend(filenames) break elif variable_extension.get() == ".emp4": dir = [] les_dossiers = [] les_fichiers_source = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_source.extend(filenames) break elif variable_extension.get() == "": dir = [] les_dossiers = [] les_fichiers_source = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_source.extend(filenames) break else: print("No extention given") def get_chemin_destination(): global path_destination, les_fichiers_destination path_destination = filedialog.askdirectory() print("Destination path : {}".format(path_destination)) pathlabel_4.config(text="Chemin Destination : {}".format(path_destination), fg="green")# , width=80) pathlabel_4.place(relx=.05, rely=.66) if variable_extension.get() == ".csv": dir = [] les_dossiers = [] les_fichiers_destination = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_destination.extend(filenames) break elif variable_extension.get() == ".mat": dir = [] les_dossiers = [] les_fichiers_destination = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_destination.extend(filenames) break elif variable_extension.get() == ".emp4": dir = [] les_dossiers = [] les_fichiers_source = [] for (et, er, filenames) in walk(path_source): dir.extend(et) les_dossiers.extend(er) les_fichiers_source.extend(filenames) break else: print("No extention given") return path_destination def get_text(): global texte_a_rajouter, choosed_extension texte_a_rajouter = ent.get() choosed_extension = variable_extension.get() print("texte_a_rajouter : {}".format(texte_a_rajouter)) print("variable changed!, Choosed extension : {}".format(choosed_extension)) def couper_fichiers(): """ :param original_path: Path of the original data from which we are going to take data :param destination_path: Path to the destiniation where we are going to put the data :param ce_que_on_veut_copier: A list containing the data :param extension: The extension of the data :return: The number of data correctly/incorrectly processed """ # The case of Cutting if variable_couper.get(): extension = variable_extension.get() print(extension) print("Le texte à rajouter : {}".format(texte_a_rajouter)) #extension = ".csv" counto = 0 for z in list(ce_que_on_veut_copier): try: shutil.move(path_source + "//" + z + texte_a_rajouter + extension, path_destination + "//" + z + texte_a_rajouter + extension) print("{}//{}{}{}".format(path_source, z, texte_a_rajouter, extension)) print("Fichier non trouvé, on le saute et on continue...") counto += 1 pathlabel_2.config(text="Avancement du transfert des données : {}".format(str(counto)), fg="black", width=45) pathlabel_2.place(relx=.1, rely=.80) pathlabel_2.update() # push the change to the screen except FileNotFoundError: print("{}//{}{}".format(path_source, z, extension)) print("Fichier non trouvé, on le saute et on continue...") pathlabel_5.config(text="Fichier(s) non trouvé(s) !", fg="red", width=45) pathlabel_5.place(relx=.250, rely=.785) pathlabel_5.update() # push the change to the screen continue except TypeError: #print(original_path + z + extension) print("Type ERROR, on le saute et on continue...") continue # The case of Copy elif variable_copier.get(): extension = variable_extension.get() print(extension) print("Le texte à rajouter : {}".format(texte_a_rajouter)) #extension = ".csv" counto = 0 for z in list(ce_que_on_veut_copier): try: shutil.copy(path_source + "//" + z + texte_a_rajouter + extension, path_destination + "//" + z + texte_a_rajouter + extension) counto += 1 pathlabel_2.config(text="Avancement du renommage : " + str(counto), fg="black", width=45) pathlabel_2.place(relx=.12, rely=.80) pathlabel_2.update() # push the change to the screen except FileNotFoundError: print(path_source + "//" + z + texte_a_rajouter + extension) print("Fichier non trouvé, on le saute et on continue...") continue except TypeError: #print(original_path + z + extension) print("Type ERROR, on le saute et on continue...") continue # No Option has been chosen else: print("Aucune option (copie ou couper) choisie !") return texte_a_rajouter def compare(): import numpy as np ce_que_on_veut_copier_ext = [i + str(texte_a_rajouter) + str(choosed_extension) for i in ce_que_on_veut_copier] ce_qui = np.setdiff1d(ce_que_on_veut_copier_ext, les_fichiers_source) ce_qui_safia = [i[:-4] for i in ce_qui] print("Ce qui reste : {}".format(ce_qui_safia)) from openpyxl import load_workbook, Workbook wb = Workbook() sheets = wb.sheetnames ws1 = wb[sheets[0]] #ws1 = wb.get_sheet_by_name('Sheet1') ws1 = wb.active ws1['A1'] = 'data' for r in range(0, len(ce_qui_safia)): ws1.cell(row=r + 2, column=1).value = ce_qui_safia[r] wb.save("{}//Ce_qui_reste.xlsx".format(path_source)) #compare(ce_que_on_veut_copier, les_fichiers_source) root = Tk() root.title('Outil pour Copier/Couper des fichiers - AKKA Technologies V5') root.geometry("600x435") root.resizable(width=False, height=False) pathlabel_1 = Label(root) pathlabel_2 = Label(root) pathlabel_3 = Label(root) pathlabel_4 = Label(root) pathlabel_5 = Label(root) d = Button(root, text='Charger les données', borderwidth=1, relief=SOLID, command=lambda: get_data_from_excel()) d.place(x=130, y=90) d.config(width=17) variable_extension = StringVar() numberChosen = ttk.Combobox(root, width=8, textvariable=variable_extension) numberChosen['values'] = (".mat", ".csv", ".emp4", "") numberChosen.place(x=275, y=91.30) numberChosen.current(0) ent = Entry(root) ent.place(x=355, y=92) fok = Button(root, text='OK', borderwidth=1, relief=SOLID, command=lambda: get_text()) fok.place(x=490, y=82) fok.config(width=5, height=2) rt = Label(root, text="Chargez les données, choisissez l'extention, puis cliquez sur OK") rt.place(x=150, y=45) rt = Label(root, text="→", font=("Helvetica", 40)) rt.place(x=265, y=195) e = Button(root, text='Chemin source', bg="lightsalmon", borderwidth=1, relief=SOLID, command=lambda: get_chemin_source()) e.place(x=45, y=162) e.config(width=17, height=3) f = Button(root, text='Chemin destination', bg="mediumspringgreen", borderwidth=1, relief=SOLID, command=lambda: get_chemin_destination()) f.place(x=425, y=162) f.config(width=17, height=3) f = Button(root, text='Comparaison', bg="snow", borderwidth=1, relief=SOLID, command=lambda: compare()) f.place(x=242, y=170) f.config(width=15, height=2) variable_copier = IntVar() a = Checkbutton(root, text="copier", variable=variable_copier) a.place(x=220, y=320) variable_couper = IntVar() b = Checkbutton(root, text="couper", variable=variable_couper) b.place(x=310, y=320) c = Button(root, text="Transférer les données", bg='lightsteelblue', borderwidth=1, relief=SOLID, command=lambda: couper_fichiers()) c.place(x=160, y=370) c.config(height=2, width=37) root.mainloop()
""" Solution for problem 20 of Project Euler. Find the sum of the digits in the number 100! """ def solve(): """ Serves as the driver for problem 20. """ factorial_value = 1 for i in range(1, 101)[::-1]: factorial_value *= i total = 0 while factorial_value > 1: total += factorial_value % 10 factorial_value //= 10 return total def display_problem(): """ Returns a string representing the problem being solved. """ return "\nFind the sum of the digits in the number 100!\n"
import codecs import copy as cp from collections import Counter import numpy as np import pickle sents2vec, sentences, labels = [], [], [] unique_words = {} idx = 0 words_freqs = {} max_label = 0 max_sentence_length = 0 classes_freqs = {} with codecs.open('train.txt', 'r', encoding='utf-8') as reader: for line in reader: line_tokens = line.strip().split() if len(line_tokens) == 1: continue max_label = max(max_label, int(line_tokens[0])) labels.append(int(line_tokens[0])) sentences.append(line_tokens[1:]) max_sentence_length = max(max_sentence_length, len(line_tokens[1:])) for word in line_tokens[1:]: if word not in unique_words: unique_words[word] = idx idx += 1 if word not in words_freqs: words_freqs[word] = 1 else: words_freqs[word] += 1 if line_tokens[0] not in classes_freqs: classes_freqs[line_tokens[0]] = 1 else: classes_freqs[line_tokens[0]] += 1 print("Number of Classes: " + str(max_label)) print("Classes Frequencies: " + classes_freqs.items().__repr__()) print("Number of Sentences: " + str(len(sentences))) print("Max Sentence Length: " + str(max_sentence_length)) print("Number of Unique Words: " + str(len(unique_words))) print("Most 5 Frequent Words: " + Counter(words_freqs).most_common(20).__repr__()) ## Paper: https://arxiv.org/pdf/1505.01504.pdf classes_one_hot = [0.0] * (max_label + 1) words_one_hot = [0.0] * len(unique_words) for i, label in enumerate(labels): vec = cp.copy(classes_one_hot) vec[label] = 1.0 labels[i] = vec[1:] for item in unique_words.items(): vec = cp.copy(words_one_hot) vec[item[1]] = 1.0 unique_words[item[0]] = vec alpha = 0.1 for sentence in sentences: z = np.asarray(cp.copy(words_one_hot)) for word in sentence: z = alpha * z + unique_words[word] sents2vec.append(z) sents2vec, labels = np.asarray(sents2vec), np.asarray(labels) with open('fofe_sent2vec.pickle', 'wb') as handle: pickle.dump(sents2vec, handle, protocol=pickle.HIGHEST_PROTOCOL)
# -*- coding: utf-8 -*- # @Time: 2020/3/13 15:24 # @Author: Rollbear # @Filename: converter.py from entity.topic import Topic def scanner(p_lines: list): """ Markdown格式解析器 分析标题、子标题、正文之间的关系,构造一个树形结构 :param p_lines: 以行为元素的列表 :return: 解析产生的Topic对象 """ lines = p_lines.copy() root_topic = Topic(lines[0]) # 文件的第一行应当是主标题 lines.remove(lines[0]) cur_topic = root_topic for line in lines: if line.startswith('#') and line.find(' ') != -1: temp = Topic(line) if temp.level > cur_topic.level: # 扫描到子标题,将它添加到子标题表 cur_topic.sub_topic.append(temp) temp.parent_topic = cur_topic cur_topic = temp # 话题指针移动到子标题 elif temp.level == cur_topic.level: # 扫描到同级标题,更新父级标题的子标题表 cur_topic.parent_topic.sub_topic.append(temp) temp.parent_topic = cur_topic.parent_topic cur_topic = temp # 话题指针移动到新标题 else: topic_level = temp.level # 回溯到该标题的同级标题 while cur_topic.level > topic_level: cur_topic = cur_topic.parent_topic # 链入沟通父级标题的子标题表 cur_topic.parent_topic.sub_topic.append(temp) temp.parent_topic = cur_topic.parent_topic cur_topic = temp else: # 这一行的内容加入当前主题的正文部分 cur_topic.text.append(line) # 解析完成,返回一个Topic对象 return root_topic
def validBraces(string): result = [] for i in string: if i in "({[": result.append(i) elif len(result) != 0: if (i == ')') & (result[-1] == '('): result.pop() elif (i == '}') & (result[-1] == '{'): result.pop() elif result[-1] == '[': result.pop() elif i not in "(){}[]": continue else: return False if len(result) == 0: return True else: return False print(validBraces("ttt(test()"))
from pickle import load, dump a = load(open('dickens_texts.pickle')) new_file = open('great_expectations.txt', 'w') print type(a[0]) new_file.write(a[0])
from django.apps import AppConfig class PhotoContentConfig(AppConfig): name = 'photo_content'
import gym from gym import wrappers from gym import spaces import constants as C class OpenAIGym: """ Wrapper class to interact with OpenAIGym Game attributes: env [gym.Environment] - environment of game game_name [string] - name of game being played render [bool] - set to true if game should be visible during play player_name [string] - OpenAI Gym account name. Either "hong" or "natasha". upload_name [int] - name of uploaded file (/tmp/game_name-version_num) """ def __init__(self, game_name, render, player_name, version_num): self.env = gym.make(game_name) self.game_name = game_name self.upload_name = '/tmp/' + game_name + '-' + str(version_num) self.env = wrappers.Monitor(self.env, self.upload_name, force=True) self.render = render # True if we want to see the game during playing self.player = player_name def take_action(self, action): """ Params: action [int] - A number in the range [0, action_space) Returns: observation [object] - environment-specific object. In most cases, screen pixels. i.e. (210, 160, 3) for breakout. reward [float] - scale varies between environments done [bool] - termination of episode info [dict] - diagnostics for debugging. Can't use in official agents. Seems useless in most cases. """ return self.env.step(action) def render_screen(self): if self.render: self.env.render() def reset(self): return self.env.reset() def close(self): self.env.close() def total_moves(self): return self.env.action_space.n def screen_space(self): return self.env.observation_space.n def upload_game(self): self.env.close() gym.upload(self.upload_name, api_key=C.API_KEY[self.player])
import logging from djangocities.cities.models import City from djangocities.iam.jwt import load_user from djangocities.pages.models import Page from djangocities.sites.models import Site def create_default_page(site): page = Page.objects.create(site=site, file_name="index.html") return page def resolve_create_site(_, info, data): user = load_user(info) if not user.is_authenticated: raise Exception("You can't do that!") city = data.get("city", None) description = data.get("description", None) if not city: logging.debug("City is missing") raise Exception("City is missing") if not description: logging.debug("Description is missing") raise Exception("Description is missing") try: city_obj = City.objects.get(id=city) except City.DoesNotExist: logging.debug("City not found") raise Exception("City not found") site = Site.objects.create(city=city_obj, description=description, user=user) create_default_page(site) return site def resolve_update_site(_, info, site_id, data): user = load_user(info) if not user.is_authenticated: raise Exception("You can't do that!") try: site = Site.objects.get(id=site_id, user=user) except Site.DoesNotExist: logging.debug("Site not found") raise Exception("Site not found") city = data.get("city", None) description = data.get("description", None) if city: try: city_obj = City.objects.get(id=city) except City.DoesNotExist: logging.debug("City not found") raise Exception("City not found") site.city = city_obj if description: site.description = description site.save() return site