text stringlengths 38 1.54M |
|---|
import re
print('a\tb')
p=r'\d+'
text='35 my lucky number is 35'
match=re.match(p,text)
print(match.group(0),match.start())
# match used to find only start of the string
# --------------------------------------------------------------------
p=r'\d+'
text=' my lucky number is 35 and 35'
match=re.search(p,text)
print(match.group(0),match.start())
# search used to find only one string
# --------------------------------------------------------------------
p=r'\d+'
text=' my lucky number is 35 and 35'
match=re.findall(p,text)
print(match)
# findall used to find all string
# --------------------------------------------------------------------
p=r'\d+'
text='my lucky number is 35 and 35'
match=re.finditer(p,text)
for i in match:
print(i.group(0))
# findinter used to find all string only if we instruct
# --------------------------------------------------------------------
# Groups
p='(\d{4})(\d{2})(\d{2})'
text='year is 20210331'
ma=re.search(p,text)
print(ma.group(0))
print(ma.groups())
# --------------------------------------------------------------------
p='(?P<year>\d{4})(?P<month>\d{2})(?P<date>\d{2})'
text='year is 20210331'
ma=re.search(p,text)
print(ma.group(0))
print(ma.groupdict())
# --------------------------------------------------------------------
# pattern to find if string contain number only
def is_integer(text):
pat=r'^\d+$'
mat=re.match(pat,text)
if mat:
return True
else:
return False
print(is_integer("1234"))
# --------------------------------------------------------------------
# using search
def is_integer(text):
pat=r'^\d+$'
mat=re.search(pat,text)
if mat:
return True
else:
return False
print(is_integer("1234"))
# --------------------------------------------------------------------
# find pattern and replace
p='(?P<year>\d{4})(?P<month>\d{2})(?P<date>\d{2})'
text='start is 20210331 end in 20210401'
replacep=r'\g<month>-\g<date>-\g<year>'
print(text)
new_t=re.sub(p,replacep,text)
print(new_t)
# --------------------------------------------------------------------
# Split
p=r','
text='1,2,3,4,5'
print(re.split(p,text))
# --------------------------------------------------------------------
# Single char pattern
print(re.findall('(?i)A','this is a great test'))
# case insensitive
# for negation use [^]
print(re.findall('[^aeiou]','this is a great test'))
print(re.findall('a|e|i|o|u','this is a great test'))
print(re.findall('[s-v]','this is a great test'))
# --------------------------------------------------------------------
# anchor
print(re.findall(r'\bis\b',' this is a sample'))
# completed word above
# startof line
print(re.findall(r'(?m)^apple','''apple is a sample
apple another ''')) multiline
# --------------------------------------------------------------------
# char classes
# \w \d \D
# Quantifier
# + {2}
# [a-z][0-9] - to match one char followed bynum
# [a-z][0-9]* - to match more number
# for 1 or mere
# +
|
def add_tip(total, tip_percent):
'''Return the total amount including tip'''
tip = tip_percent*total
return total + tip
def mean(a,b,c):
return (a+b+c)/3.
def hyp(leg1, leg2):
return (leg1**2 + leg2**2)**0.5
def perimeter(base, height):
return 2*base+2*height
#1.3.2 Function Test
print add_tip(20,0.15)
print add_tip(30,0.15)
print hyp(3,4)
print mean(3,4,7)
print perimeter(3,4)
|
import FWCore.ParameterSet.Config as cms
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import Early10TeVX322Y100VtxSmearingParameters,VtxSmearedCommon
VtxSmeared = cms.EDProducer("BetafuncEvtVtxGenerator",
Early10TeVX322Y100VtxSmearingParameters,
VtxSmearedCommon
)
|
__author__ = 'ArkJzzz (arkjzzz@gmail.com)'
import json
import logging
import textwrap
from telegram import InlineKeyboardButton
from telegram_bot_pagination import InlineKeyboardPaginator
logger = logging.getLogger('keyboards')
start_keyboard = [
[
InlineKeyboardButton(
text='Меню магазина',
callback_data='HANDLE_MENU',
)
],
]
def get_menu_keyboard(products, current_page, items_per_page=3):
pages = []
page = []
for product in products:
if len(page) == items_per_page:
pages.append(page)
page = []
else:
page.append(product)
pages.append(page)
paginator = InlineKeyboardPaginator(
len(pages),
current_page=current_page,
data_pattern='HANDLE_MENU|PAGE|{page}'
)
paginator.add_after(
InlineKeyboardButton(
text='🛒 корзина',
callback_data='HANDLE_CART'
)
)
for product in pages[current_page - 1]:
paginator.add_before(
InlineKeyboardButton(
text=product['name'],
callback_data=f'HANDLE_DESCRIPTION|{product["id"]}',
)
)
return paginator.markup
def get_product_details_keyboard(product_id):
product_details_keyboard = []
quantities = [1, 2, 5]
for quantity in quantities:
product_details_keyboard.append(
InlineKeyboardButton(
text=f'+{quantity} кг',
callback_data=f'ADD_TO_CART|{product_id}|{quantity}'
),
)
product_details_keyboard = [product_details_keyboard]
product_details_keyboard.append(
[
InlineKeyboardButton(
text='В меню',
callback_data='HANDLE_MENU',
)
],
)
return product_details_keyboard
def get_cart_show_keyboard(cart_items):
cart_show_keyboard = []
for item in cart_items['data']:
item_name = item['name']
item_id = item['id']
product_id = item['product_id']
cart_show_keyboard.append(
[
InlineKeyboardButton(
text=f'Удалить из корзины {item_name}',
callback_data=f'HANDLE_REMOVE_ITEM|{item_id}',
)
],
)
cart_show_keyboard.append(
[
InlineKeyboardButton(
text='Продолжить покупки',
callback_data='HANDLE_MENU',
),
InlineKeyboardButton(
text='Оформить заказ',
callback_data='HANDLE_CHECKOUT',
),
],
)
return cart_show_keyboard
def get_confirmation_keyboard(email):
confirmation_keyboard = [
[
InlineKeyboardButton(
text='Все верно',
callback_data=f'HANDLE_CREATE_CUSTOMER|{email}',
)
],
[
InlineKeyboardButton(
text='Ввести заново',
callback_data='HANDLE_CHECKOUT',
),
]
]
return confirmation_keyboard
def format_product_info(product_data):
product_data = product_data['data']
product_meta = product_data['meta']
display_price = product_meta['display_price']['with_tax']['formatted']
formated_info = f'''\
{product_data['name']}
{product_data['description']}
{display_price} за килограмм
'''
formated_info = textwrap.dedent(formated_info)
return formated_info
def format_cart(cart_items):
cart_price = cart_items['meta']['display_price']['with_tax']['formatted']
cart_items_for_print = ''
for item in cart_items['data']:
item_display_price = item['meta']['display_price']['with_tax']
cart_item_to_print = f'''\
{item['name']}
{item["description"]}
{item_display_price["unit"]["formatted"]} за килограмм
в корзине {item["quantity"]}кг
на сумму {item_display_price["value"]["formatted"]}
'''
cart_item_to_print = textwrap.dedent(cart_item_to_print)
cart_items_for_print += cart_item_to_print
formated_cart = f'{cart_items_for_print}\n'\
f'Сумма заказа: {cart_price}'
return formated_cart
if __name__ == '__main__':
logger.error('Этот скрипт не предназначен для запуска напрямую') |
import django_tables2 as tables
from .models import Roadmap
from issuelog.models import Issue
"""
create helper classes for django_tables2 to defined product Roadmap
tables. the Roadmap table shows product release schedule and TopIssuesTable
shows top features or bug fixes requested.
"""
class RoadmapTable(tables.Table):
class Meta:
model = Roadmap
fields = ('release_num', 'title', 'releases_date', 'content')
template_name = 'django_tables2/bootstrap.html'
class TopIssuesTable(tables.Table):
class Meta:
model = Issue
fields = ('title', 'ht_product', 'rating', 'votes')
template_name = 'django_tables2/bootstrap.html'
|
from rest_framework.views import APIView
from rest_framework.response import Response
class HelloApiView(APIView):
"""Test API View"""
def get(self, request, format=None):
"""Returns a list of API View features"""
an_apiview = [
'Uses HTTP methods as get post put delete',
'Is similiar to traditional Django views',
'gives you more control over logic',
'is mapped manually to URLs',
]
return Response({
'message':'hello',
'an_apiview':an_apiview
})
|
import requests
import json
import sqlite3 as sql
import time
app_id = '741da96a'
app_key = '5f83749620ab5b89b6a70e315e6616a9'
language = 'en'
word_id = 'destrier'
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word_id.lower() + '/definitions'
r = requests.get(url, headers = {'app_id': app_id, 'app_key': app_key})
result = format(r.status_code)
if result=="200":
dict = r.json()
dict = dict['results'][0]
dict = dict['lexicalEntries'] #dict is now list
i=0
for entry in dict:
ent0 = (entry['entries']) #ent is list
speech_type = (entry['lexicalCategory'])
ent1 = ent0[0] #ent is dict again
ent2 = ent1['senses'] #list
ent3 = ent2[0] #dict
ent4 = str(ent3['definitions'])
ent4 = ent4.replace("['", "")
ent4 = ent4.replace("']", "")
ent4 = ent4.replace("[\"","")
ent4 = ent4.replace("\"]","")
print(word_id,speech_type, ent4)
try:
with sql.connect("vocab.db") as con:
cur = con.cursor()
word_id = word_id.upper()
qry = ("insert into defined values (\"%s\",\"%s\",\"%s\",0,0)" %(speech_type, ent4, word_id))
cur.execute(qry)
con.commit()
msg = word_id + ": Definition successfully updated"
except Exception as e:
con.rollback()
e = str(e)
#print(e)
msg=word_id + ": " + e
print(msg)
else:
print(word_id+ " not found in Online OED (to do:) Moving to Undefined Words List")
time.sleep(1.5)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 20:56:00 2015
@author: Timber
"""
import json
from supervisedRWfunc import *
from loadData import *
import sys
from heapq import *
from matplotlib import pyplot as plt
from added_edge import *
from dumpGraphJSON import *
print "Reading data..."
# load the sanpshots of 6-30 and 12-31,
# 6-30 is a graph used as a basis of the learning process
# 12-31 provides both training data and test data
# fp = open('repos/1000_repos/snapshot-0630copy.txt', 'r')
# fp_end = open('repos/1000_repos/snapshot-1231copy.txt', 'r')
graphFile_prefix = '/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/cora/cora_process'
destination_dir = '/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/cora/'
datasetname = 'cora_process_srw'
# graphFile_prefix = sys.argv[1]
G, feats, id_map, class_map = loadData_cora(graphFile_prefix)
nnodes = G.number_of_nodes()
edges = G.edges()
edge_feature = []
i = 0
# print(feats[edges[i][0]].shape)
# features are formed with intercept term
# luckily we have all the nodes reindex from 0 and consecutive
for i in range(len(edges)):
# edge_feature.append([features[0][i], features[1][i]])
temp_list = []
temp_list = feats[edges[i][0]] + feats[edges[i][1]]
edge_feature.append(temp_list)
#######################################
#### Training set formation ###########
#######################################
print "Forming training set..."
# compute the candidate set for future links according to source node
# train model with a set of source nodes
elig_source = []
# for i in range(len(degrees)):
for i in list(G):
if G.degree(i) > 0 and G.node[i]['train']==True:
# if degrees[i]>0:
elig_source.append(i)
# pick nodes with number of future links larger than theshold
# these nodes then served as either training node or test node
'''
# visualize the graph
options = {
'arrows': True,
'node_color': 'blue',
'node_size': .05,
'line_color': 'black',
'linewidths': 1,
'width': 0.1,
'with_labels': False,
'node_shape': '.',
'node_list': range(G.number_of_nodes())
}
nx.draw_networkx(G, **options)
plt.savefig('/Users/april/Downloads/link_prediction-master/graph/' + datasetname + 'vis.png', dpi=1024)
# print the diameter(maximum distance) of G
k = nx.connected_component_subgraphs(G)
diameter_list = []
for i in k:
# print("Nodes in compoent.",i.nodes())
diameter_list.append(nx.diameter(i))
'''
# print(max(diameter_list))
# Store the case to avoid the tempDset or tempLset contains two many values
Dsize_cut = 17
# Dsize_cut = int(round(sum(G.degree().values())/float(len(G))*3.76))
D_source = []
Dset_all = []
Lset_all = []
for i in range(len(elig_source)):
# get the i's two-hop neighbors includes i
# print i
# two_hop_neighbors = knbrs(G, i, 3)
# print(elig_source[-1])
# print("current node is %d"%(elig_source[i]))
eight_hop_neighbors_dict = nx.single_source_shortest_path_length(G, elig_source[i], cutoff=9)
eight_hop_neighbors = set(eight_hop_neighbors_dict.keys())
eight_hop_neighbors.remove(elig_source[i])
# further remove one-hop neighbor
for neighbor in G.neighbors(elig_source[i]):
eight_hop_neighbors.remove(neighbor)
# eight_hop_neighbors.remove(neighbor for neighbor in G.neighbors(elig_source[i]))
candidates = eight_hop_neighbors
# check the label of node i
tempDset = []
tempLset = []
class_label = class_map[str(i)]
for j in candidates:
# When candidates share the same label with the source node
# Only training part with mask whose label information is available
if G.node[j]['train']==True and class_map[str(j)] == class_label:
# A further semantically close logic needs to be implemented
tempDset.append(j)
elif G.node[j]['train']==True and class_map[str(j)] != class_label:
tempLset.append(j)
elif G.node[j]['train']!=True:
print("further implement semantic closeness determination")
# test the tempDset and tempLset at lease are all non-empty
if len(tempDset) >= Dsize_cut and len(tempLset) >= Dsize_cut:
Dset_all.append(tempDset)
Lset_all.append(tempLset)
D_source.append(elig_source[i])
'''
for i in range(len(elig_source)):
sNeighbor = []
for e in edges:
if e[0] == elig_source[i]:
sNeighbor.append(e[1])
elif e[1] == elig_source[i]:
sNeighbor.append(e[0])
candidates = list(set(list(range(nnodes))) - set([elig_source[i]]) - set(sNeighbor))
sNeighbor_end = []
for e in edges_end:
if e[0] == elig_source[i]:
sNeighbor_end.append(e[1])
elif e[1] == elig_source[i]:
sNeighbor_end.append(e[0])
tempDset = list(set(sNeighbor_end) - set(sNeighbor))
if len(tempDset) >= Dsize_cut:
tempLset = list(set(candidates) - set(tempDset))
Dset_all.append(tempDset)
Lset_all.append(tempLset)
D_source.append(elig_source[i])
'''
# currently out source node should have all labels equal to [1,0]
# randomly pick nodes with current degree > 0 and number of future
# links >= Dsize_cut as the training set
# trainSize = 200
# testSize = 100
# trainSize = len(D_source)
print("length of D_source is %d" % (len(D_source)))
trainSize = min(len(D_source), 1)
testSize = len(elig_source)
# this index is the index of source nodes in D_source list
# index level selection !!!!!!!!
source_index = np.random.choice(list(range(len(D_source))), \
size=trainSize, replace=False)
source = []
Dset = []
Lset = []
for i in source_index:
source.append(D_source[i])
Dset.append(Dset_all[i])
Lset.append(Lset_all[i])
'''
# randomly pick nodes with current degree > 0, number of future links
# >= Dsize_cut and haven't been picked as training nodes to be test nodes
test_index = np.random.choice(list(range(len(D_source))), size=testSize, replace=False)
testSet = []
Dset_test = []
Lset_test = []
candidates_test = []
# original code
D_source_test=[]
Dset_all_test =[]
Lset_all_test=[]
for i in D_source:
eight_hop_neighbors_dict = nx.single_source_shortest_path_length(G, i, cutoff=9)
eight_hop_neighbors= set(eight_hop_neighbors_dict.keys())
eight_hop_neighbors.remove(i)
candidates = eight_hop_neighbors
#check the label of node i
tempDset =[]
tempLset =[]
class_label = class_map[str(i)]
for j in candidates:
#When candidates share the same label with the source node
if class_map[str(j)]==class_label:
#A further semantically close logic needs to be implemented
tempDset.append(j)
elif class_map[str(j)]!=class_label:
tempLset.append(j)
elif class_map[str(j)]==None:
print("further implement semantic closeness determination")
#test the tempDset and tempLset at lease are all non-empty
if len(tempDset)>=Dsize_cut and len(tempLset)>0:
Dset_all_test.append(tempDset)
Lset_all_test.append(tempLset)
D_source_test.append(i)
for i in test_index:
testSet.append(D_source[i])
Dset_test.append(Dset_all_test[i])
Lset_test.append(Lset_all_test[i])
candidates_test.append(Dset_all_test[i] + Lset_all_test[i])
'''
#######################################
#### Model training phase #############
#######################################
print "Training model..."
# set up parameters
lam = 50
offset = 0.01
alpha = 0.3
beta_init = np.ones(len(edge_feature[0])) * 2
# ff = genFeatures(nnodes, edges, edge_feature)
# trans_p = genTrans_plain(nnodes, edges, 0, 0)
# qqp = diffQ(ff, [0, 0.5, 0.5], trans_p, alpha)
# print qqp
beta_Opt = trainModel(Dset, Lset, offset, lam, nnodes, edges, edge_feature,
source, alpha, beta_init)
# train model direclty wtth test set, compare performance with UWRW
# beta_Opt = trainModel(Dset_test, Lset_test, offset, lam, nnodes, edges, edge_feature,
# testSet, alpha, beta_init)
print "Training source set:\n", source
print "\nTrained model parameters:\n", beta_Opt
#######################################
#### Test model performance ###########
#######################################
print "Evaluating model performance..."
# need to get a new set of nnodes, edges, edge_feature
# link prediction with transition matrices computed with trained parameters
ff = genFeatures(nnodes, edges, edge_feature)
# trans_srw = genTrans(nnodes, edges, ff, testSet, alpha, beta_Opt[0])
# trans_srw = genTrans(nnodes, edges, ff, testSet, alpha, [10, 10])
nodes = G.nodes()
trans_srw = genTrans(nnodes, edges, ff, elig_source, alpha, beta_Opt[0])
'''
#Compute all the edges strength and get a threshold that corresponds to the smallest one
edge_prob_queue = []
for edge in edges:
print("the edge is (%d, %d)"%(edge[0],edge[1]))
value =trans_srw[0][edge[0]][edge[1]]
edge_prop_entry = (value, [edge[0],edge[1]])
heappush(edge_prob_queue, edge_prop_entry)
#Get the minimum transition value for the existing edge and use it as the threshold
minimum_edge_prop_entry = heappop(edge_prob_queue)
prop_threshold = minimum_edge_prop_entry[0]
#Scan all the labeled nodes and update their corresponding n-hop neighbors if necessary
#Scan all possible edge pairs and update the edge if above threshold
#Get the all edges set
new_edges =[]
for n in G.node:
for i in range(len(G.node)):
print(i)
if i>n:
#contruct a potential new edge (n,i)
if G.has_edge(n,i)==False:
print("new edge is considered")
'''
# compute personalized PageRank for test nodes to recommend links
pgrank_srw = []
cand_pairs_srw = []
link_hits_srw = []
candidates_set = []
added_edges = []
# for i in range(len(testSet)):
num_added_edges = 0
### Here we must target all nodes
lowest_probability_value = 1.0
for i in range(len(elig_source)):
pp = np.repeat(1.0 / nnodes, nnodes)
curpgrank = iterPageRank(pp, trans_srw[i])
# record the pgrank score
pgrank_srw.append(curpgrank)
# find the top ranking nodes in candidates set
cand_pairs = []
# build the candidate set for each node in elig_source
source_set = set()
source_set.add(elig_source[i])
candidates_set.append([])
candidates_set[i] = list(set(list(range(nnodes))) - source_set)
# Print each node's candidate set according to the probability value
for j in candidates_set[i]:
cand_pairs.append((j, curpgrank[j]))
cand_pairs = sorted(cand_pairs, key=lambda x: x[1], reverse=True)
# record candidate-pagerank pairs
cand_pairs_srw.append(cand_pairs)
# calculate the lowest probability and its corresponding neighbors
source_neighbors = G.neighbors(elig_source[i])
for j in range(len(source_neighbors)):
for k in range(len(cand_pairs)):
if source_neighbors[j] == cand_pairs[k][0]:
if lowest_probability_value > cand_pairs[k][1]:
lowest_probability_value = cand_pairs[k][1]
print("lowest probability is %.15f" % (lowest_probability_value))
for i in range(len(elig_source)):
# According to the probability value, add the potential edges for each node in elig_source
# Backup plan, add the new edges = # of degrees in the original graph
added_edge = Added_edge(elig_source[i])
# Neighbors are always the model learned with relative large weight
# for k in range(len(G.neighbors(i))):
# for k in range(min(1,len(G.neighbors(elig_source[i])))):
'''
for k in range(len(G.neighbors(elig_source[i]))):
if cand_pairs[k][0] in G.neighbors(elig_source[i]):
print("There is no new edges added")
else:
added_edge.add_edge([elig_source[i],cand_pairs[k][0]])
added_edge.add_edge_value(cand_pairs[k][1])
num_added_edges +=1
'''
cand_pairs = cand_pairs_srw[i]
for cand_pair in cand_pairs:
if cand_pair[1] < lowest_probability_value:
print("No edge can be added for this node")
break
elif cand_pair[0] in G.neighbors(elig_source[i]):
print("There is no new edges added")
else:
added_edge.add_edge([elig_source[i], cand_pair[0]])
added_edge.add_edge_value(cand_pair[1])
num_added_edges += 1
added_edges.append(added_edge)
# print("The number of new added edges %d" % num_added_edges)
'''
for k in range(len(G.neighbors(i)),len(cand_pairs)):
if cand_pairs[k][0] in G.neighbors(i):
print("Something unexpected happens, check the sorting order for this node %d"%k)
break
elif class_map[str(cand_pairs[k][0])] == class_map[str(i)] or class_map[str(cand_pairs[k][0])] ==None:
added_edge.add_edge([i, cand_pairs[k][0]])
added_edge.add_value(cand_pairs[k][1])
num_added_edges += 1
'''
'''
# calculate precision of the top-Dsize_cut predicted links
link_hits = 0
for j in range(Dsize_cut):
if cand_pairs[j][0] in Dset_test[i]:
link_hits += 1
link_hits_srw.append(link_hits)
'''
# print("The number of new added edges %d" %num_added_edges)
'''
print "\nSRW performance: ", np.mean(link_hits_srw)
for j in cand_pairs_srw[271]:
if j[0]==0:
print j[1]
'''
G_data = json.load(open(graphFile_prefix + "-G.json"))
origin_G = jg.node_link_graph(G_data)
'''
num_added_edges =0
#count the num of edges
for added_edge in added_edges:
if added_edge.edge_list!=[]:
print("The source node id is")
print(added_edge.source_node_id)
print("The added edge is")
for edge in added_edge.edge_list:
print(edge)
num_added_edges+=1
print(num_added_edges)
'''
# Analyze the real added edges
added_edge_threshold = len(edges)
added_edge_num = 0
for added_edge in added_edges:
if added_edge.edge_list != []:
if added_edge_num >= added_edge_threshold:
break
print("The source node id is")
print(added_edge.source_node_id)
print("The added edge is")
for edge in added_edge.edge_list:
print(edge)
origin_G.add_edge(edge[0], edge[1])
added_edge_num += 1
if added_edge_num >= added_edge_threshold:
break
else:
print("No edge added for this node")
if added_edge_num >= added_edge_threshold:
break
print("The new edge number is")
num_edges = origin_G.number_of_edges()
print(num_edges)
'''
added_edge_dict={}
for added_edge in added_edges:
if added_edge.edge_list!=[]:
print("The source node id is")
print(added_edge.source_node_id)
print("The added edge is")
for i in range(len(added_edge.edge_list)):
edge= added_edge.edge_list[i]
print(edge)
value =added_edge.value_list[i]
added_edge_dict[[edge[0],edge[1]]]=value
'''
print("The added edge number is %d" % (num_edges - len(edges)))
dumpGraphJSON(destination_dir, datasetname, origin_G)
'''
# evaluate and compared the performance of unweighted random walk
print "Evaluating alternative models..."
# generate unweighted transition matrices for testSet nodes
trans_uw = genTrans_plain(nnodes, edges, testSet, alpha)
# compute personalized PageRank for test nodes to recommend links
pgrank_uw = []
cand_pairs_uw = []
link_hits_uw = []
for i in range(len(testSet)):
pp = np.repeat(1.0/nnodes, nnodes)
curpgrank = iterPageRank(pp, trans_uw[i])
# record the pgrank score
pgrank_uw.append(curpgrank)
# find the top ranking nodes in candidates set
cand_pairs = []
for j in candidates_test[i]:
cand_pairs.append((j, curpgrank[j]))
cand_pairs = sorted(cand_pairs, key = lambda x: x[1], reverse=True)
# record candidate-pagerank pairs
cand_pairs_uw.append(cand_pairs)
# calculate precision of the top-Dsize_cut predicted links
link_hits = 0
for j in range(Dsize_cut):
if cand_pairs[j][0] in Dset_test[i]:
link_hits += 1
link_hits_uw.append(link_hits)
print "\nUW performance: ", np.mean(link_hits_uw)
'''
'''
fjson = open('repo_test_logs/git_repo_1000test.json', 'w')
beta_json = []
beta_json.append([beta_Opt[0][0], beta_Opt[0][1]])
beta_json.append(beta_Opt[1])
tempHT = beta_Opt[2]
tempHT['grad'] = [tempHT['grad'][0], tempHT['grad'][1]]
beta_json.append(tempHT)
test_log = json.dumps({'train set': source, 'test set': testSet,
'beta': beta_json, 'SRW hit': np.mean(link_hits_srw), 'UW hit': np.mean(link_hits_uw)})
fjson.write(test_log + '\n')
fjson.close()
'''
|
from congress.etl.members import LegislatorsToDB
import os
from argparse import ArgumentParser
def main(chamber, congress, get_all):
legis = LegislatorsToDB()
if get_all:
legis.run_pipeline(
{'house': [str(i) for i in range(102, 116)],
'senate': [str(i) for i in range(80, 116)]}
)
else:
assert chamber in ('house', 'senate')
if chamber == 'house':
assert int(congress) in list(range(102, 116))
else:
assert int(congress) in list(range(80, 116))
legis.run_pipeline({chamber: [str(congress)]})
if __name__ == '__main__':
parser = ArgumentParser(
description='Scrape congress member data from propublica'
)
parser.add_argument('--chamber', help='\'house\' or \'senate\'')
parser.add_argument('--congress', help='Congress number (102-115 for house, 80-115 for senate)')
parser.add_argument('--all', help='scrape everything', default=False, action='store_true')
args = parser.parse_args()
main(args.chamber, args.congress, args.all)
|
import asyncio
import json
import os
import pwd
import shutil
from traitlets import Float, Unicode, Instance, default
from traitlets.config import SingletonConfigurable
from ..base import ClusterManager
from ...utils import TaskPool
__all__ = ("JobQueueClusterManager", "JobQueueStatusTracker")
class JobQueueStatusTracker(SingletonConfigurable):
"""A base class for tracking job status in a jobqueue cluster."""
query_period = Float(
30,
help="""
Time (in seconds) between job status checks.
This should be <= ``min(cluster_status_period, worker_status_period)``.
""",
config=True,
)
status_command = Unicode(help="The path to the job status command", config=True)
# forwarded by parent class
task_pool = Instance(TaskPool, args=())
def get_status_cmd_env(self, job_ids):
raise NotImplementedError
def parse_job_states(self, stdout):
raise NotImplementedError
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize the background status task
self.jobs_to_track = set()
self.job_states = {}
self.job_tracker = self.task_pool.create_background_task(
self.job_state_tracker()
)
async def job_state_tracker(self):
while True:
if self.jobs_to_track:
self.log.debug("Polling status of %d jobs", len(self.jobs_to_track))
cmd, env = self.get_status_cmd_env(self.jobs_to_track)
proc = await asyncio.create_subprocess_exec(
*cmd,
env=env,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
stdout = stdout.decode("utf8", "replace")
if proc.returncode != 0:
stderr = stderr.decode("utf8", "replace")
self.log.warning(
"Job status check failed with returncode %d, stderr: %s",
proc.returncode,
stderr,
)
finished_states = self.parse_job_states(stdout)
self.job_states.update(finished_states)
self.jobs_to_track.difference_update(finished_states)
await asyncio.sleep(self.query_period)
def track(self, job_id):
self.jobs_to_track.add(job_id)
# Indicate present but not finished. Stopped ids are deleted once their
# state is retrieved - missing records are always considered stopped.
self.job_states[job_id] = None
def untrack(self, job_id):
self.jobs_to_track.discard(job_id)
self.job_states.pop(job_id, None)
def status(self, job_id):
if job_id is None:
return False, None
if job_id in self.job_states:
state = self.job_states[job_id]
if state is not None:
return False, "Job %s completed with state %s" % (job_id, state)
return True, None
# Job already deleted from tracker
return False, None
class JobQueueClusterManager(ClusterManager):
"""A base cluster manager for deploying Dask on a jobqueue cluster."""
worker_setup = Unicode(
"", help="Script to run before dask worker starts.", config=True
)
scheduler_setup = Unicode(
"", help="Script to run before dask scheduler starts.", config=True
)
staging_directory = Unicode(
"{home}/.dask-gateway/",
help="""
The staging directory for storing files before the job starts.
A subdirectory will be created for each new cluster which will store
temporary files for that cluster. On cluster shutdown the subdirectory
will be removed.
This field can be a template, which receives the following fields:
- home (the user's home directory)
- username (the user's name)
""",
config=True,
)
# The following fields are configurable only for just-in-case reasons. The
# defaults should be sufficient for most users.
dask_gateway_jobqueue_launcher = Unicode(
help="The path to the dask-gateway-jobqueue-launcher executable", config=True
)
@default("dask_gateway_jobqueue_launcher")
def _default_launcher_path(self):
return (
shutil.which("dask-gateway-jobqueue-launcher")
or "dask-gateway-jobqueue-launcher"
)
submit_command = Unicode(help="The path to the job submit command", config=True)
cancel_command = Unicode(help="The path to the job cancel command", config=True)
def get_submit_cmd_env_stdin(self, worker_name=None):
raise NotImplementedError
def get_stop_cmd_env(self, job_id):
raise NotImplementedError
def parse_job_id(self, stdout):
raise NotImplementedError
def get_status_tracker(self):
raise NotImplementedError
def track_job(self, job_id):
self.get_status_tracker().track(job_id)
def untrack_job(self, job_id):
self.get_status_tracker().untrack(job_id)
def job_status(self, job_id):
return self.get_status_tracker().status(job_id)
def get_staging_directory(self):
staging_dir = self.staging_directory.format(
home=pwd.getpwnam(self.username).pw_dir, username=self.username
)
return os.path.join(staging_dir, self.cluster_name)
def get_tls_paths(self):
"""Get the absolute paths to the tls cert and key files."""
staging_dir = self.get_staging_directory()
cert_path = os.path.join(staging_dir, "dask.crt")
key_path = os.path.join(staging_dir, "dask.pem")
return cert_path, key_path
async def do_as_user(self, user, action, **kwargs):
cmd = ["sudo", "-nHu", user, self.dask_gateway_jobqueue_launcher]
kwargs["action"] = action
proc = await asyncio.create_subprocess_exec(
*cmd,
env={},
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate(json.dumps(kwargs).encode("utf8"))
stdout = stdout.decode("utf8", "replace")
stderr = stderr.decode("utf8", "replace")
if proc.returncode != 0:
raise Exception(
"Error running `dask-gateway-jobqueue-launcher`\n"
" returncode: %d\n"
" stdout: %s\n"
" stderr: %s" % (proc.returncode, stdout, stderr)
)
result = json.loads(stdout)
if not result["ok"]:
raise Exception(result["error"])
return result["returncode"], result["stdout"], result["stderr"]
async def start_job(self, worker_name=None):
cmd, env, stdin = self.get_submit_cmd_env_stdin(worker_name=worker_name)
if not worker_name:
staging_dir = self.get_staging_directory()
files = {
"dask.pem": self.tls_key.decode("utf8"),
"dask.crt": self.tls_cert.decode("utf8"),
}
else:
staging_dir = files = None
code, stdout, stderr = await self.do_as_user(
user=self.username,
action="start",
cmd=cmd,
env=env,
stdin=stdin,
staging_dir=staging_dir,
files=files,
)
if code != 0:
raise Exception(
(
"Failed to submit job to batch system\n"
" exit_code: %d\n"
" stdout: %s\n"
" stderr: %s"
)
% (code, stdout, stderr)
)
return self.parse_job_id(stdout)
async def stop_job(self, job_id, worker_name=None):
cmd, env = self.get_stop_cmd_env(job_id)
if not worker_name:
staging_dir = self.get_staging_directory()
else:
staging_dir = None
code, stdout, stderr = await self.do_as_user(
user=self.username, action="stop", cmd=cmd, env=env, staging_dir=staging_dir
)
if code != 0 and "Job has finished" not in stderr:
raise Exception("Failed to stop job_id %s" % (job_id, self.cluster_name))
async def cluster_status(self, cluster_state):
return self.job_status(cluster_state.get("job_id"))
async def worker_status(self, worker_name, worker_state, cluster_state):
return self.job_status(worker_state.get("job_id"))
def on_worker_running(self, worker_name, worker_state, cluster_state):
job_id = worker_state.get("job_id")
if job_id is None:
return
self.untrack_job(job_id)
async def start_cluster(self):
job_id = await self.start_job()
yield {"job_id": job_id}
self.track_job(job_id)
async def stop_cluster(self, cluster_state):
job_id = cluster_state.get("job_id")
if job_id is None:
return
self.untrack_job(job_id)
await self.stop_job(job_id)
async def start_worker(self, worker_name, cluster_state):
job_id = await self.start_job(worker_name=worker_name)
yield {"job_id": job_id}
self.track_job(job_id)
async def stop_worker(self, worker_name, worker_state, cluster_state):
job_id = worker_state.get("job_id")
if job_id is None:
return
self.untrack_job(job_id)
await self.stop_job(job_id, worker_name=worker_name)
|
import streamlit as st
#import openpyxl as pxl
def textFm():
st.subheader("Login Section")
userName = st.sidebar.text_input("User Name")
password = st.sidebar.text_input("password",type='password')
'''
submissionButton = st.form_submit_button(label="Login")
if submissionButton == True:
for i in range(2, sheet.max_row):
if((sheet.cell(row=i, column=2).value == userName)):
if((sheet.cell(row=i, column=3).value == password)):
y = sheet.cell(row=i, column=4).value
st.success('Successfully Login')
type(y)
else:
st.error(
"either username or password is incorrect")
'''
if st.sidebar.checkbox('Login'):
st.success("Logged In as {}".format(userName))
task = st.selectbox("Role",["Role","Waste Giver","Waste Taker"])
if task =="Role":
st.subheader("Add your Role")
elif task == "Waste Giver":
st.subheader("Waste Giver")
#user_name = st.write("Enter your name")
Name_ = st.text_input("Enter your Name")
email_id = st.text_input("Enter your email_id")
contact_no = st.text_input("Enter your contact no")
address = st.text_input("Enter your address")
#type = st.text_input("Enter type of Waste")
type= st.selectbox("Type",["type","Municipal solid waste","Hazardous waste","Biodegreadable waste","E-waste","Household hazardous waste"])
elif task == "Waste Taker":
st.subheader("Waste Taker")
compony_Name = st.text_input("Enter your Compony/Industry Name")
email_id = st.text_input("Enter your email_id")
contact_no = st.text_input("Enter your contact no")
address = st.text_input("Enter your address")
type= st.selectbox("Type of waste you want for recycle",["type","Municipal solid waste","Hazardous waste","Biodegreadable waste","E-waste","Household hazardous waste"])
|
#!/usr/bin/env python
import unittest
import os
from eulcore.binfile import eudora
from testcore import main
TEST_ROOT = os.path.dirname(__file__)
def fixture(fname):
return os.path.join(TEST_ROOT, 'fixtures', fname)
class TestEudora(unittest.TestCase):
def test_members(self):
fname = fixture('In.toc')
obj = eudora.Toc(fname)
self.assertEqual(obj.version, 1)
self.assertEqual(obj.name, 'In')
messages = list(obj.messages)
self.assertEqual(len(messages), 2)
# note: we don't actually test all of the fields here. it's not
# clear what a few of them actually are, so we only test the ones we
# know how to interpret.
self.assertTrue(isinstance(messages[0], eudora.Message))
self.assertEqual(messages[0].offset, 0)
self.assertEqual(messages[0].size, 1732)
self.assertEqual(messages[0].body_offset, 955)
self.assertEqual(messages[0].to, 'Somebody ')
self.assertEqual(messages[0].subject, 'Welcome')
# second message isn't *necessarily* immediately after first, but
# in this case it is.
self.assertEqual(messages[1].offset, 1732)
if __name__ == '__main__':
main()
|
import math
from itertools import combinations
#m number of equations
#arr is the coefficient matrix (list of lists)
#e is the error
#b is the value matrix
# 4x+y+2z=2
# 3x-y+5z=0.5
arr=[[4,3],[1,-1],[2,5]]
b=[2,0.5]
m=2
e=0.001
def Solutions(arr, b,e):
n=len(arr)#no. of variables
m=len(arr[0])#no of eqn
variables=["x"+str(i) for i in range(n)]
c= list(combinations(arr, m))
v= list(combinations(variables, m))
for A,vs in zip(c,v):
print("basic variables and their values are", end=" ")
print(vs, end=" ")
GaussSiedel(A, b, e)
def GaussSiedel(A,b,e):
#variables with zero coefficients are removed
n=len(A)
x=[0 for i in range(n)]
x0=[0 for i in range(n)]
for k in range(1000):
key=0
for i in range(n):
val=0
for j in range(n):
if j<i:
val=val+A[j][i]*x[j]
elif j>i:
val=val+A[j][i]*x0[j]
if A[i][i]!=0:
x[i]=(b[i]-val)/A[i][i]
#convergence check
if abs(x[i]-x0[i])>e:
key=1
for i in range(n):
x0[i]=x[i]
if key!=1:
break
flag=True
for num in x:
if num<0:
flag=False
print(x, end=" ")
if flag:
print("which satisfies non-negativity constraint")
else:
print("which doesn't satisfy non-negativity constraint")
Solutions(arr, b, e)
#[0.357,0.572,0],[0.643,0,-0.286],[0,1.286,0.3571]
print("_____________________________________________________")
#Menu program
print("Enter the number of test cases")
t=int(input())
for k in range(t):
print("Enter 2 spaced integers m and n, where m is number of equations, n is number of variables")
m,n = map(int, input().split())
arr=[[0 for i in range(m)] for j in range(n)]
b=[0 for i in range(m)]
for i in range(m):
print("Enter the coefficients of equations " +str(i)+": a1 a2 ... an b"+str(i))
l=list(map(float, input().split()))
for j in range(n):
arr[j][i]=l[j]
b[i]=l[n]
Solutions(arr, b, e)
print("_____________________________________________________")
|
{
"id": "mgm4440434.3",
"metadata": {
"mgm4440434.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 744131,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 316,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 37736,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/100.preprocess.removed.fna.stats"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 720273,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 315,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 46413,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/150.dereplication.removed.fna.gz"
},
"150.dereplication.removed.fna.stats": {
"compression": null,
"description": null,
"size": 312,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/150.dereplication.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 481,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/205.screen.h_sapiens_asm.info"
},
"205.screen.h_sapiens_asm.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 123,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/205.screen.h_sapiens_asm.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 4707,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 720205,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 399,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 315,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 503265,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 119,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 685487,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 316,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 174794,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 313,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 172394,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 313,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 6180,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 47,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 6114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 1333,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 951,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 13104,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/450.rna.sims.info"
},
"450.rna.source.stats": {
"compression": null,
"description": null,
"size": 106,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/450.rna.source.stats"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 472833,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 119,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 27827,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/550.cluster.aa90.mapping.stats"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 950312,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 715664,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 1140895,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 408580,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 1904029,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 767046,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 18716,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 285410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 784052,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 423333,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2375785,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 723,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 118,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 154,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 110,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 789,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 2157,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 60,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 7559,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 11336,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 3970,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 867,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22667,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 84,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 33556,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4440434.3/file/999.done.species.stats"
}
},
"id": "mgm4440434.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4440434.3"
}
},
"raw": {
"mgm4440434.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4440434.3"
}
}
} |
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from documents.models import Document
class Approval(models.Model):
"""Approval exit criteria for a node."""
comment = models.TextField()
status = models.CharField(max_length=10, default='Pending',
choices=(('pending', 'Pending'),
('approved', 'Approved'),
('rejected', 'Rejected')
))
last_modified = models.DateTimeField(blank=True, null=True)
content_type = models.ForeignKey(
ContentType, on_delete=models.CASCADE, related_name='approvals')
object_id = models.PositiveIntegerField()
approver = GenericForeignKey('content_type', 'object_id')
class DocumentRequirement(models.Model):
"""Document Required exit criteria for a node."""
name = models.CharField(max_length=50)
comment = models.TextField()
document = models.ForeignKey(
Document, on_delete=models.DO_NOTHING, blank=True, null=True)
class Process(models.Model):
"""A process is an instance of Flow, and represents a single real-life process.
It is composed of multiple nodes.
Flow could be stored in a file, separately, to allow reuse."""
name = models.CharField(max_length=30)
description = models.TextField(blank=True, null=True)
created_on = models.DateField(auto_now_add=True)
current_node = models.DecimalField(
default=1, max_digits=3, decimal_places=0)
class Node(models.Model):
"""A node is the most basic unit of a process.
Each node has either (and only one among) an approval, or a document upload,
as its exit criteria.
The document upload or process approval is atomic, and involves only one file,
or one committee/user/role approval."""
name = models.CharField(default='1', max_length=3)
document = models.ForeignKey(DocumentRequirement, related_name='document_to_upload', on_delete=models.CASCADE,
blank=True, null=True)
# TODO: Polymorphic relation for approval/document.
approval = models.ForeignKey(Approval, related_name='approvals_required', on_delete=models.CASCADE,
blank=True, null=True)
process = models.ForeignKey(
Process, related_name='node', on_delete=models.CASCADE)
class Meta:
ordering = ('name',)
unique_together = ('name', 'process')
|
#! /usr/bin/env python2.5
from count import *
print ''
print '*** HOTSHOT output *** '
print ''
import hotshot, hotshot.stats
prof = hotshot.Profile('hotshot.prof')
prof.runcall(count1)
prof.runcall(count2)
prof.close()
stats = hotshot.stats.load('hotshot.prof')
stats.sort_stats('time', 'calls')
stats.print_stats(20)
|
# An implementation of Speck32/64
# params
k = 16
alpha = 7
beta = 2
MOD = 2**k
MASK = MOD - 1
ROUNDS = 22
def rol(x, y):
assert 0 < y < k, "Can't shift by negative negative shifts"
return ((x << y) & MASK) | (x >> (k - y))
def ror(x, y):
assert 0 < y < k, "Can't shift by negative negative shifts"
return rol(x, k - y)
def split(x):
return (x >> k, x & MASK)
def merge(x, y):
return (x << k) | y
def round_function(x, y, key):
ret_x = ((ror(x, alpha) + y) % MOD) ^ key
ret_y = rol(y, beta) ^ ret_x
return ret_x, ret_y
def round_function_inverse(x, y, key):
ret_y = ror(x ^ y, beta)
ret_x = rol(((x ^ key) - ret_y) % MOD, alpha)
return ret_x, ret_y
def encrypt(m, key):
keys = expand_key(key)
# assert len(keys) == ROUNDS, "Invalid keys specified"
x, y = split(m)
for i in range(ROUNDS):
x, y = round_function(x, y, keys[i])
return merge(x, y)
def decrypt(c, key):
keys = expand_key(key)
x, y = split(c)
for i in range(ROUNDS - 1, -1, -1):
x, y = round_function_inverse(x, y, keys[i])
return merge(x, y)
def expand_key(key):
k_words = []
while key != 0:
k_words += [key & MASK]
key >>= k
m = len(k_words)
ret = [k_words[0]]
ell = k_words[1:]
for i in range(ROUNDS - 1):
ell += [((ret[i] + ror(ell[i], alpha)) % MOD) ^ i]
ret += [rol(ret[i], beta) ^ ell[i + m - 1]]
return ret
def main():
plaintext = 0x6574694c
ciphertext = 0xa86842f2
keytext = 0x1918111009080100
assert encrypt(plaintext, keytext) == ciphertext
assert decrypt(ciphertext, keytext) == plaintext
if __name__ == "__main__":
main()
|
# -*- coding: UTF-8 -*-
"""
@author: WanZhiWen
@file: adaboost.py
@time: 2018-04-25 20:59
"""
import numpy as np
import matplotlib.pyplot as plt
def loadDataSet1():
# 直接返回numpy矩阵
dataMat = np.matrix([[1., 2.1],
[2., 1.1],
[1.3, 1.],
[1., 1.],
[2., 1.]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return dataMat, classLabels
# 根据提供的参数来构造单层的决策树
# dimen —— 表示采用哪个维度,即采用哪个特征值进行单层的决策树划分
# threshaVal —— 表示决策树划分的阈值
# threshIneq —— 表示决策树划分的不等式的方向,即采用 <= 还是采用 >
def strumpClassify(dataMatrix, dimen, threshaVal, threshIneq):
retArray = np.ones((dataMatrix.shape[0], 1))
if threshIneq == 'lt':
# 利用数组的过滤来将dataMatrix中第dimen列的值小于threshaVal的数据全部置为-1
retArray[dataMatrix[:, dimen] <= threshaVal] = -1
else:
retArray[dataMatrix[:, dimen] > threshaVal] = -1
return retArray
# 根据数据集的权重D来构造一颗错误率最小的单层决策树
def buildStump(dataArr, classLabel, D):
dataMatrix = np.mat(dataArr)
labelMat = np.mat(classLabel).T
m, n = dataMatrix.shape
numSteps = 10
bestStump = {}
bestClassEst = np.zeros((m, 1))
# float('inf') 表示正无穷, -float('inf') 或 float('-inf') 表示负无穷
minError = float('inf')
# 遍历每个特征
for i in range(n):
minValue = min(dataMatrix[:, i])
maxValue = max(dataMatrix[:, i])
stepSize = (maxValue - minValue) / numSteps
# 遍历每个阈值点
for j in range(-1, numSteps + 1):
threshVal = float(minValue + stepSize * j)
# 遍历每个不等式的方向
for inequal in ['lt', 'gt']:
predictVals = strumpClassify(dataMatrix, i, threshVal, inequal)
errorArr = np.ones((m, 1))
errorArr[predictVals == labelMat] = 0
weightedError = float(np.dot(D.T, errorArr))
print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.2f"
% (i, threshVal, inequal, weightedError))
if weightedError < minError:
minError = weightedError
bestClassEst = predictVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump, minError, bestClassEst
# 使用adaBost进行训练,这里的弱分类器采用决策树桩
def adaBoostTrainDS(dataArr, classLabels, numIt=40):
weakClassArr = []
m = dataArr.shape[0]
D = np.ones((m, 1)) / m
aggClassEst = np.zeros((m, 1))
for i in range(numIt):
bestStump, minError, bestClassEst = buildStump(dataArr, classLabels, D)
print('D.T = ', D.T)
# 得到此分类器的权重
alpha = float(0.5 * np.log((1.0 - minError) / max(minError, 1e-16)))
bestStump['alpha'] = alpha
weakClassArr.append(bestStump)
print('bestClassEst:')
print(bestClassEst)
# 根据分类器的权重来更新数据集中每个样本的权重
expon = -1 * alpha * np.multiply(np.mat(classLabel).T, bestClassEst)
D = np.multiply(D, np.exp(expon))
D = D / np.sum(D)
# 将弱分类器根据其权重进行集成
aggClassEst += alpha * bestClassEst
print('aggClassEst:')
print(aggClassEst)
# 根据集成后的强分类器来得到分类结果
aggError = np.multiply((np.sign(aggClassEst) != np.mat(classLabel).T), np.ones((m, 1)))
errorRate = aggError.sum() / m
print('total error =', errorRate)
if errorRate == 0.0:
break
return weakClassArr, aggClassEst
# 根据训练好的分类器来对测试集进行测试
# datToClass —— 需要测试的测试数据集集
# classifierArr —— 训练好的集成的分类器
def adaClassify(datToClass, classifierArr):
dataMatrix = np.mat(datToClass)
m = dataMatrix.shape[0]
aggClassEst = np.zeros((m, 1))
for i in range(len(classifierArr)):
classEst = strumpClassify(dataMatrix, classifierArr[i]['dim'],
classifierArr[i]['thresh'], classifierArr[i]['ineq'])
aggClassEst += classifierArr[i]['alpha'] * classEst
print('第{}次迭代得到的集成结果:'.format(i + 1), aggClassEst)
return np.sign(aggClassEst)
# 从文件中装载数据集
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t'))
dataMat = []
labelMat = []
for line in open(fileName).readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat - 1):
lineArr.append(float(curLine[i]))
labelMat.append(float(curLine[-1]))
dataMat.append(lineArr)
return np.matrix(dataMat), labelMat
def plotROC(predStrengths, classLabels):
cur = (1.0, 1.0)
ySum = 0.0
numPosClas = sum(np.array(classLabel) == 1.0)
yStep = 1 / numPosClas
xStep = 1 / (len(classLabel) - numPosClas)
sortedIndicies = predStrengths.argsort()
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
for index in sortedIndicies.tolist()[0]:
# 如果classLabel[index]为正例,则需要对真阳率进行修改
if classLabel[index] == 1.0:
delx = 0
dely = yStep
# 如果classLabel[index]为反例,则需要对假阳率进行修改
else:
delx = xStep
dely = 0
ySum += cur[1]
ax.plot([cur[0], cur[0] - delx], [cur[1], cur[1] - dely], c='b')
cur = (cur[0] - delx, cur[1] - dely)
ax.plot([0, 1], [0, 1], 'b--') # 在(0,0)和(1,1)这两个点之间画一条虚线
ax.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve for AdaBoost Horse Colic Detection System')
plt.show()
print('The Area Under the Curve is:', ySum * xStep)
dataMat, classLabel = loadDataSet('horseColicTraining2.txt')
# m = dataMat.shape[0]
# D = np.ones((m, 1)) * (1 / m)
# print(buildStump(dataMat, classLabel, D))
# classifierArr = adaBoostTrainDS(dataMat, classLabel, numIt=50)
# print('-------------------------------------------------')
# testArr, testLabelArr = loadDataSet('horseColicTest2.txt')
# prediction = adaClassify(testArr, classifierArr)
# errArr = np.ones((len(testArr), 1))
# # 使用numpy数组的过滤功能来计算预测错误的样本个数
# errorCount = np.sum(errArr[prediction != np.mat(testLabelArr).T])
# print('测试集的错误率为:%.2f' % (errorCount / prediction.shape[0]))
print('----------------------------------------------------')
classifierArr, aggClassEst = adaBoostTrainDS(dataMat, classLabel, numIt=10)
plotROC(aggClassEst.T, classLabel)
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#using tensor flow helpers to get data from MNIST site
#first param is the folder where the data will be stored
#gets the digit in 'one-hot' format, kind of like binary but only one digit is 1. e.g 0 = 10000000000, 2 = 00100000000, 5 = 00000100000
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#tensor of type float32, shape none means we dimension exists but we don't know how big
#784 because each image is 28x28px
x = tf.placeholder(tf.float32, shape=[None, 784])
#this contains the probabilty of an image being one of each digit e.g
#[0.14, 0.8, 0,0,0,0,0,0,0,0,0.06] so the image would most likely be 1
y_ = tf.placeholder(tf.float32, [None, 10])
#defined as variables because they change as the model learns
#10 neurons because we are looking at digits 0-9
W = tf.Variable(tf.zeros([784, 10])) #weight
b = tf.Variable(tf.zeros([10])) #bias
#define model, digit prediction
#value multipled by each of the weights + bias
y = tf.nn.softmax(tf.matmul(x, W) + b)
#calculate loss/cross entropy
#softmax_cross_entropy_with_logits is difference between estimated values and the actual data
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))
#gradient decent to minimise loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#initialise variables
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(50000):
batch_xs, batch_ys = mnist.train.next_batch(100) #gets 100 random data points from the data. batch_xs = image
#batch_ys = actual digit
sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys}) #train with this data
#comparing our prediction with the actual value
#argmax 1, gets the highest probability value
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_accuracy = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print("Test Accuract: {0}%".format(test_accuracy * 100.0))
sess.close() |
import socket
import signal
import sys
ClientSocket = socket.socket()
host = '192.168.1.10'
port = 8828 #CHANGE
print('Waiting for connection')
try:
ClientSocket.connect((host, port))
except socket.error as e:
print(str(e))
Response = ClientSocket.recv(1024)
print(Response.decode("utf-8"))
while True:
Input = input('Enter the operation and number: ')
if Input == 'exit':
break
else:
ClientSocket.send(str.encode(Input))
Response = ClientSocket.recv(1024)
print(Response.decode("utf-8"))
ClientSocket.close()
|
import numpy as np
from ray.rllib.env import MultiAgentEnv
from social_dilemmas.envs.matrix_agent import MatrixAgent
class CoopMatrixEnv(MultiAgentEnv):
def __init__(self, game, cooperation_level=0.5):
self.num_agents = 3
self.agents = {}
self.game = game
self.setup_agents()
self.last_cooperation = cooperation_level
self.cooperation_level = cooperation_level
self.timestep = 0
self.last_reward = np.ones(10)
@property
def observation_space(self):
agents = list(self.agents.values())
return agents[0].observation_space
@property
def action_space(self):
agents = list(self.agents.values())
return agents[0].action_space
def setup_agents(self):
"""Construct all the agents for the environment"""
for i in range(self.num_agents):
agent_id = 'agent-' + str(i)
agent = MatrixAgent(agent_id, self.game)
self.agents[agent_id] = agent
def step(self, actions):
self.timestep += 1
played_actions = {}
p = float(actions["agent-0"])
played_actions["agent-0"] = np.random.choice([0,1], p=[1.0-p, p])
above_value = np.where(self.last_reward>self.agents["agent-1"].value(),
1, 0)
weighted_num_above_value = np.average(above_value,
weights=[10,9,8,7,6,5,4,3,2,1])
if weighted_num_above_value < self.cooperation_level:
## be selfish if they are selfish
p = float(actions["agent-1"])
played_actions["agent-1"] = np.random.choice([0,1], p=[1.0-p, p])
observations = {"agent-0": 0, "agent-1": 0, "agent-2": 0}
rewards = {}; dones = {}; info = {}
agent_0, agent_1, agent_2 = self.agents.values()
r_0 = agent_0.compute_reward(played_actions)
r_1 = agent_1.compute_reward(played_actions)
rewards["agent-0"] = r_0
rewards["agent-1"] = r_1 ## agent-1 is selfish (D) opponent
rewards["agent-2"] = 0
else:
## be cooperative if they are cooperative
p = float(actions["agent-2"])
played_actions["agent-1"] = np.random.choice([0,1], p=[1.0-p, p])
observations = {"agent-0": 0, "agent-1": 0, "agent-2": 0}
rewards = {}; dones = {}; info = {}
agent_0, agent_1, agent_2 = self.agents.values()
r_0 = agent_0.compute_reward(played_actions)
r_1 = agent_1.compute_reward(played_actions)
rewards["agent-0"] = r_0
rewards["agent-1"] = 0
rewards["agent-2"] = r_0 + r_1 ## agent-2 is (C) opponent
self.last_reward = np.concatenate(([r_1], self.last_reward[:-1]))
dones["agent-0"] = agent_0.get_done()
dones["agent-1"] = agent_1.get_done()
dones["agent-2"] = agent_2.get_done()
dones["__all__"] = np.any(list(dones.values()))
return observations, rewards, dones, info
def reset(self):
"""Reset the environment.
This method is performed in between rollouts.
Returns
-------
observation: dict of numpy ndarray
the initial observation of the space. The initial reward is assumed
to be zero.
"""
self.agents = {}
self.setup_agents()
self.last_reward = np.ones(10)
observations = {}
for agent in self.agents.values():
observations[agent.agent_id] = 0
return observations
|
from .builder import ReportFileBuilder, BuildResult
from .loader import ReportFileLoader, ReportFileLoaderError
from .reportfile import ReportFile
from .job import ReportJob, JobResult
from .const import *
|
#!/usr/bin/env python
# coding: utf-8
# In[62]:
#!pip install psycopg2
import psycopg2
import pandas as pd
import numpy as np
import random
import string
from datetime import date
def db_conn():
return psycopg2.connect("host=127.0.0.1 dbname=group18 user=postgres password=group18.ca")
# Parameters
num_of_records = 1500
user_ids = pd.read_sql_query('SELECT user_id FROM users WHERE user_type=\'a\'', db_conn())['user_id'].values
statuses = pd.read_sql_query('SELECT * FROM listing_status', db_conn())['value'].values
min_price = 50000
max_price = 3000000
headlines = pd.read_csv('headlines.txt', sep="\n", header=None)[0].values
descriptions = pd.read_csv('randomtext_descriptions.txt', sep="\n\n", header=None)[0].values
postal_code_first = ['A', 'B', 'C', 'E', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S', 'T', 'V', 'X', 'Y']
postal_code = ['A', 'B', 'C', 'E', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S', 'T', 'V', 'X', 'Y', 'W', 'Z']
image = 0
cities = pd.read_sql_query('SELECT * FROM city', db_conn())['value'].values
property_options = pd.read_sql_query('SELECT * FROM property_options', db_conn())['value'].values
bedrooms = pd.read_sql_query('SELECT * FROM bedrooms', db_conn())['value'].values
bathrooms = pd.read_sql_query('SELECT * FROM bathrooms', db_conn())['value'].values
building_types = pd.read_sql_query('SELECT * FROM building_type', db_conn())['value'].values
transaction_types = pd.read_sql_query('SELECT * FROM transaction_type', db_conn())['value'].values
basement_types = pd.read_sql_query('SELECT * FROM basement_type', db_conn())['value'].values
parking = pd.read_sql_query('SELECT * FROM parking', db_conn())['value'].values
housing_style = pd.read_sql_query('SELECT * FROM housing_style', db_conn())['value'].values
flooring = pd.read_sql_query('SELECT * FROM flooring', db_conn())['value'].values
# Listings Dataframe
df_listings = pd.DataFrame(columns = ['UserId', 'Status', 'Price', 'Headline', 'Description', 'PostalCode', 'Image',
'City', 'PropertyOption', 'Bedroom', 'Bathroom', 'BuildingType', 'TransactionType',
'BasementType', 'Parking', 'HousingStyle', 'Flooring'])
for i in range(num_of_records):
df_listings.at[i, 'UserId'] = random.choice(user_ids)
df_listings.at[i, 'Status'] = random.choice(statuses)
df_listings.at[i, 'Price'] = random.randint(min_price, max_price)
df_listings.at[i, 'Headline'] = random.choice(headlines)
df_listings.at[i, 'Description'] = random.choice(descriptions)
df_listings.at[i, 'PostalCode'] = random.choice(postal_code_first) + str(random.randint(0, 9)) + random.choice(postal_code) + str(random.randint(0, 9)) + random.choice(postal_code) + str(random.randint(0, 9))
df_listings.at[i, 'Image'] = image
df_listings.at[i, 'City'] = random.choice(cities)
df_listings.at[i, 'PropertyOption'] = random.choice(property_options)
df_listings.at[i, 'Bedroom'] = random.choice(bedrooms)
df_listings.at[i, 'Bathroom'] = random.choice(bathrooms)
df_listings.at[i, 'BuildingType'] = random.choice(building_types)
df_listings.at[i, 'TransactionType'] = random.choice(transaction_types)
df_listings.at[i, 'BasementType'] = random.choice(basement_types)
df_listings.at[i, 'Parking'] = random.choice(parking)
df_listings.at[i, 'HousingStyle'] = random.choice(housing_style)
df_listings.at[i, 'Flooring'] = random.choice(flooring)
with open('listings.sql', 'a') as file:
for i in range(num_of_records):
values = []
for x in df_listings.loc[i]:
values.append(str(x))
file.write('INSERT INTO listings VALUES(DEFAULT, \'' + values[0] + '\', \'' + values[1] + '\', \'' + values[2] + '\', \''
+ values[3] + '\', \'' + values[4] + '\', \'' + values[5] + '\', \'' + values[6] + '\', \'' + values[7]
+ '\', \'' + values[8] + '\', \'' + values[9] + '\', \'' + values[10] + '\', \'' + values[11]
+ '\', \'' + values[12] + '\', \'' + values[13] + '\', \'' + values[14] + '\', \'' + values[15]
+ '\', \'' + values[16] + '\');\n')
file.close()
# In[ ]:
|
#!/bin/python
from functools import reduce
from collections import defaultdict
class TrieNode(object):
def __init__(self, value = None):
self.children = dict()
self.value = value
self.isTerminalNode = False
def findChild(self, value):
if value in self.children.keys():
return self.children[value]
else:
return None
def addChild(self, value):
childNode = self.findChild(value)
if childNode == None:
childNode = TrieNode(value)
self.children[value] = childNode
return childNode
class TrieLexicon(object):
def __init__(self, vocabulary):
self.root = TrieNode()
self.nwords = 0
self.setup(vocabulary)
def setup(self, vocabulary):
with open(vocabulary, 'r', encoding='utf-8') as f:
for word in f.readlines():
self.addWord(word.strip())
self.nwords += 1
def addWord(self, word):
p = self.root
for letter in word:
p = p.addChild(letter)
p.isTerminalNode = True
def isWord(self, word):
p = self.root
for letter in word:
p = p.findChild(letter)
if p == None:
return False
return p.isTerminalNode
def find_words(self, chars):
words = list()
self.find_words_recur(self.root, '', chars, words)
return words
def find_words_recur(self, node, string, remaining_chars, words):
if node.isTerminalNode:
words.append(string)
if len(node.children.keys()) == 0 or len(remaining_chars) == 0:
return
for char in node.children:
if char in remaining_chars or '_' in remaining_chars:
index = remaining_chars.index(char) if char in remaining_chars else remaining_chars.index('_')
self.find_words_recur(node.children[char],
string + char,
remaining_chars[:index] + remaining_chars[index + 1:],
words)
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, NamedTuple, Sequence
import boto3
if TYPE_CHECKING:
from mypy_boto3_ec2.type_defs import DescribeImagesResultTypeDef, FilterTypeDef
from typing_extensions import NotRequired, TypedDict
import aec.util.tags as util_tags
from aec.util.config import Config
class Image(TypedDict):
Name: str | None
ImageId: str
CreationDate: str
RootDeviceName: str | None
Size: int | None
SnapshotId: NotRequired[str]
class AmiMatcher(NamedTuple):
owner: str
match_string: str
amazon_base_account_id = "137112412989"
canonical_account_id = "099720109477"
ami_keywords = {
"amazon2": AmiMatcher(amazon_base_account_id, "amzn2-ami-hvm*x86_64-gp2"),
"ubuntu1604": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64"),
"ubuntu1804": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64"),
"ubuntu2004": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64"),
}
def fetch(config: Config, ami: str) -> Image:
ami_matcher = ami_keywords.get(ami, None)
if ami_matcher:
try:
# lookup the latest ami by name match
ami_details = describe(config, owner=ami_matcher.owner, name_match=ami_matcher.match_string)[0]
except IndexError:
raise RuntimeError(
f"Could not find ami with name matching {ami_matcher.match_string} owned by account {ami_matcher.owner}"
) from None
else:
try:
# lookup by ami id
ami_details = describe(config, ident=ami)[0]
except IndexError:
raise RuntimeError(f"Could not find {ami}") from None
return ami_details
def _describe_images(
config: Config,
ident: str | None = None,
owner: str | None = None,
name_match: str | None = None,
) -> DescribeImagesResultTypeDef:
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
if ident and ident.startswith("ami-"):
return ec2_client.describe_images(ImageIds=[ident])
if owner:
owners_filter = [owner]
else:
describe_images_owners = config.get("describe_images_owners", None)
if not describe_images_owners:
owners_filter = ["self"]
elif isinstance(describe_images_owners, str):
owners_filter = [describe_images_owners]
else:
owners_filter: list[str] = describe_images_owners
if name_match is None:
name_match = config.get("describe_images_name_match", None)
if name_match is None:
filters = [{"Name": "name", "Values": [f"{ident}"]}] if ident else []
match_desc = f" named {ident}" if ident else ""
else:
filters: list[FilterTypeDef] = [{"Name": "name", "Values": [f"*{name_match}*"]}]
match_desc = f" with name containing {name_match}"
print(f"Describing images owned by {owners_filter}{match_desc}")
return ec2_client.describe_images(Owners=owners_filter, Filters=filters)
def describe(
config: Config,
ident: str | None = None,
owner: str | None = None,
name_match: str | None = None,
show_snapshot_id: bool = False,
) -> list[Image]:
"""List AMIs."""
response = _describe_images(config, ident, owner, name_match)
images = []
for i in response["Images"]:
image: Image = {
"Name": i.get("Name", None),
"ImageId": i["ImageId"],
"CreationDate": i["CreationDate"],
"RootDeviceName": i["RootDeviceName"] if "RootDeviceName" in i else None,
"Size": i["BlockDeviceMappings"][0]["Ebs"]["VolumeSize"] if i["BlockDeviceMappings"] else None,
}
if show_snapshot_id:
image["SnapshotId"] = i["BlockDeviceMappings"][0]["Ebs"]["SnapshotId"]
images.append(image)
return sorted(images, key=lambda i: i["CreationDate"], reverse=True)
def describe_tags(
config: Config,
ident: str | None = None,
owner: str | None = None,
name_match: str | None = None,
keys: Sequence[str] = [],
) -> list[dict[str, Any]]:
"""List AMI images with their tags."""
response = _describe_images(config, ident, owner, name_match)
images = []
for i in response["Images"]:
image = {"ImageId": i["ImageId"], "Name": util_tags.get_value(i, "Name")}
if not keys:
image["Tags"] = ", ".join(f"{tag['Key']}={tag['Value']}" for tag in i.get("Tags", []))
else:
for key in keys:
image[f"Tag: {key}"] = util_tags.get_value(i, key)
images.append(image)
return sorted(images, key=lambda i: str(i["Name"]))
def delete(config: Config, ami: str) -> None:
"""Deregister an AMI and delete its snapshot."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
response = describe(config, ami, show_snapshot_id=True)
ec2_client.deregister_image(ImageId=ami)
ec2_client.delete_snapshot(SnapshotId=response[0]["SnapshotId"])
def share(config: Config, ami: str, account: str) -> None:
"""Share an AMI with another account."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
ec2_client.modify_image_attribute(
ImageId=ami,
LaunchPermission={"Add": [{"UserId": account}]},
OperationType="add",
UserIds=[account],
Value="string",
DryRun=False,
)
|
# -*- coding: utf-8 -*-
import re
import os
from middleware.text_format import replace_special_chars_in_text
import urllib
import urllib2
import json
__author__ = "fuiste"
middleware_dir = os.path.dirname(__file__)
parse_url = "http://distill-server.herokuapp.com/parse"
# Old noun phrase extraction method.
def extract_noun_phrases(pos_str):
new_noun_phrase_regex = r"\(NP\s(?P<noun_phrase>(\([A-Z\$]+\s\w{4,}\)(\s)?)+)\)"
pos_regex = r"(((\s)?\([A-Z\$]+)|(\)(\s)?))"
noun_phrases = []
matches = list(re.findall(new_noun_phrase_regex, pos_str))
for m in matches:
noun_phrase = re.sub(pos_regex, "", m[0]).strip()
if len(noun_phrase.split(" ")) > 1:
noun_phrases.append(noun_phrase)
return noun_phrases
def pos_tag_text_documents(text_documents):
formatted_text = []#replace_special_chars_in_text(text_documents=text_documents, lowercase=False)
for doc in text_documents:
sentences = re.split(r' *[\.\?!][\'"\)\]]* *', doc["text"])
formatted_text.append({"sentences": sentences, "id": doc["id"]})
doc_copy = []
# maps noun phrases to their documents
noun_phrase_map = {}
formatted_cuts = []
for t in formatted_text:
if len(formatted_cuts):
found = False
for c in formatted_cuts:
if len(c) < 1:
c.append(t)
found = True
if not found:
formatted_cuts.append([t])
else:
formatted_cuts.append([t])
noun_phrases = []
total_c = len(formatted_text)
cur_c = 0
for cut in formatted_cuts:
request_object = urllib2.Request(parse_url, json.dumps(cut), {"Content-Type": "application/json"})
response = urllib2.urlopen(request_object)
html_arr = json.loads(response.read())
cur_c = cur_c + len(cut)
print "{0} / {1} reviews tagged".format(cur_c, total_c)
noun_phrases.extend(html_arr)
print "Tagging done, mapping phrases to topics"
if noun_phrases:
for p in noun_phrases:
phrases = extract_noun_phrases(p["phrase"])
for ph in phrases:
if ph not in noun_phrase_map:
noun_phrase_map[ph] = set()
noun_phrase_map[ph].add(p["id"])
noun_phrase_list =[]
for noun_phrase, id_set in noun_phrase_map.iteritems():
noun_phrase_list.append({"noun_phrase": noun_phrase, "ids": list(id_set)})
noun_phrase_list = sorted(noun_phrase_list, key=lambda n: len(n["ids"]), reverse=True)
print "\nTop 10 noun phrases for this group:"
for n in noun_phrase_list[:20]:
print "\t" + n["noun_phrase"] + " - {0}".format(n["ids"])
return noun_phrase_list
|
#!/usr/bin/env python
# coding: utf-8
# In[26]:
"""
Support vector Machine
2. For a given dataset predict number of bikes getting shared based on different parameters
"""
# In[43]:
import numpy as np
# import matplotlib.plotly as plt
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
import pandas as pd
#imputer to handle missing data
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# handle categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
#regression librarry
from sklearn.svm import SVR
#o check accuracy
from sklearn.metrics import accuracy_score
# to check accuracy
from sklearn.metrics import *
import pickle
#visualization in 3D
from mpl_toolkits.mplot3d import Axes3D
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
import os, sys
import csv
# In[2]:
# load dataset
dataset_original = pd.read_csv ("bike_sharing.csv")
dataset = dataset_original
dataset.head()
# In[20]:
# IN simple LR we want only 1 feature and 1 lable
dataset = dataset.loc[:,['temp','cnt']]
# dataset.drop(['dteday'], axis = 1, inplace = True)
# In[21]:
print("Dataset has {} rows and {} Columns".format(dataset.shape[0],dataset.shape[1]))
# In[22]:
# check dataset information
dataset.info()
# In[23]:
dataset.describe().T
# In[24]:
# handling missing data if nessesary
"""
if missing values are present
imputer = Imputer(missing_values=0, axis=0)
imputer = imputer.fit(x_data[:, 3:16])
"""
dataset.isnull().sum()
# In[25]:
# check for minimum dataset
dataset.min()
# In[26]:
# # Handle Missing data
# def handle_min_values(dataset):
# # replace min values by mean
# dataset.replace(0, dataset.mean(), inplace=True)
# return dataset
# dataset = handle_min_values(dataset)
# In[27]:
#check dataset replace with mean or not
dataset.min()
# In[40]:
#Check duplicate value
dataset.duplicated().sum()
# In[41]:
# Divide data into features and label
x_data_set = np.array(dataset["temp"])
y_data_set = np.array(pd.DataFrame(dataset.cnt))
# In[44]:
#feature scalling (here data will be converted into float)
def feature_scalling(x_data_set,y_data_set):
sc_x = StandardScaler()
sc_y = StandardScaler()
x = sc_x.fit_transform(x_data_set.reshape(-1, 1))
y = sc_y.fit_transform(y_data_set.reshape(-1, 1))
return x, y, sc_x, sc_y
x, y, sc_x, sc_y = feature_scalling(x_data_set,y_data_set)
# In[45]:
print("shape of x data",x.shape)
print("shape of y data",y.shape)
# In[46]:
x
# In[28]:
# # seperate fetures and label
# x_data = dataset.iloc[:, :-1].values
# y_data = dataset.iloc[:, 1].values
# In[29]:
# # handle categorical data
# def handle_categorical_data(x_data):
# #encode categorical data
# label_encod = LabelEncoder()
# x_data[:, 1] = label_encod.fit_transform(x_data[:, 1])
# # one hot encoding
# onehotencode = OneHotEncoder(categorical_features= [1])
# x_data = onehotencode.fit_transform(x_data).toarray()
# return x_data
# x_data = handle_categorical_data(x_data)
# In[30]:
# #convert numpy.ndarray to DataFrame
# x_data = pd.DataFrame(x_data)
# x_data.shape
# In[32]:
# create directory to store csv files
os.mkdir("CSV_files")
# In[47]:
def csv_file(x_train_data,y_train_data,file_name):
#load data to csv file
myData = x_train_data
myFile = open('CSV_files/'+file_name, 'w')
with myFile:
writer = csv.writer(myFile)
writer.writerows(myData)
colnames=['x']
df = pd.read_csv('CSV_files/'+file_name, names=colnames, header=None)
# inserting column with static value in data frame
df.insert(1, "y", y_train_data)
df.to_csv('CSV_files/'+file_name, index = False)
# In[48]:
# split dataset
def splitdata(x, y):
# split train and test data
x_train,x_test,y_train,y_test= train_test_split(x, y, test_size = 1/3, random_state=0)
print("train : ", x_train.shape,y_train.shape, " test : ", x_test.shape,y_test.shape)
# saving datasets into csv files
csv_file(x_test,y_test,'test_data.csv')
# divide train data into train and cross validation
x_train_data, x_cv_data, y_train_data, y_cv_data = train_test_split(x_train,y_train,test_size = 0.40,random_state=0)
print("train : ", x_train_data.shape,y_train_data.shape, " test : ", x_cv_data.shape,y_cv_data.shape)
#load data into csv for train and cross validation
csv_file(x_train_data,y_train_data,'train_data.csv')
csv_file(x_cv_data,y_cv_data,'cv_data.csv')
splitdata(x, y)
# In[49]:
# load dataset
train_dataset = pd.read_csv ("CSV_files/train_data.csv")
print("Dataset has {} rows and {} Columns".format(train_dataset.shape[0],train_dataset.shape[1]))
# In[50]:
train_dataset.head()
# In[55]:
class SupportVectorReg ():
def create_module(self,x_train,y_train):
# fitting simple LR to the training set
#defualt kernal for non linear module is rbf
regressor = SVR(kernel= 'rbf')
regressor.fit(x_train,y_train)
return regressor
def create_piklefile(self,regression):
# dump train model pickle file
file = open('SupportVectorReg.pkl', 'wb')
pickle.dump(regression,file)
file.close()
def y_prediction(self,x_train,regressor):
# predicting the test set result
# prediction for only 6.5
y_pred_train = sc_y.inverse_transform(regressor.predict(sc_x.transform(np.array([[6.5]]))))
y_pred_train = regressor.predict(x_train)
# return y_pred_train
print("y_predict value: ",sc_y.inverse_transform(regressor.predict(sc_x.transform(np.array([[6.5]])))))
return y_pred_train
def accuracy(self,y_predict_train,y_train):
# accuracy using r2 score
error = r2_score(y_train, y_predict_train)
acc_r2 = (1-error)*100
# Calculate accuracy using mean absolute error
total_error = mean_absolute_error(y_train, y_predict_train)
mean_ab = ( 1 - total_error/ len(y_train)) *100
median_ab_error = median_absolute_error(y_train, y_predict_train)
return acc_r2,mean_ab,median_ab_error
def visualization(self,x,y,regressor):
# Visualization the Decision Tree result (for higher resolution & smoother curve)
print("\n visualising using SVR \n ")
plt.scatter(x, y , color = 'pink')
plt.plot(x, regressor.predict(x), color = 'red')
# x_grid = np.arange(min(x), max(x), 0.1)
# x_grid = x_grid.reshape((len(x_grid),1))
# plt.scatter(x,y, color = 'pink')
# plt.plot(x_grid, regressor.predict(x_grid), color = 'red')
plt.title("Truth or Bulff(SVR)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show()
def main():
#class obj created
obj = SupportVectorReg()
# seperate fetures and label
# here we taking only 2 columns level and salary
x_train = train_dataset.iloc[:,:-1].values
y_train = train_dataset.iloc[:,1].values
# print(x_train.shape, y_train.shape)
regression = obj.create_module(x_train,y_train)
# print("\nModule created")
obj.create_piklefile(regression)
# print("\nPikle file created")
y_train_pre = obj.y_prediction(x_train,regression)
# print("\n\n y_prediction:",y_train_pre)
acc_r2,mean_ab,median_ab_error= obj.accuracy(y_train_pre,y_train)
print("\n Accuracy train by acc_r2", acc_r2)
print("\n Accuracy train by mean_ab", mean_ab)
#visualisation for train dataset
obj.visualization(x_train,y_train, regression)
if __name__ == '__main__':
main()
# In[53]:
# Cross Validation
# load dataset
CV_dataset = pd.read_csv ("CSV_files/cv_data.csv")
print("Dataset has {} rows and {} Columns".format(CV_dataset.shape[0],CV_dataset.shape[1]))
# In[56]:
class Cross_validation():
def y_prediction(self,regression, x_train):
# predicting the test set result
y_predict = regression.predict(x_train.reshape(-1,1))
print("y_predict value for 6.5 is ", regression.predict(np.array(6.5).reshape(-1,1)))
return y_predict
# # predicting the test set result
# return regression.predict(x_train)
def accuracy(self,y_predict_train,y_train):
# acc using r2
error = r2_score(y_train, y_predict_train)
acc_r2 = (1-error)*100
# using median_ab_error
median_ab_error = median_absolute_error(y_train, y_predict_train)
total_error = mean_absolute_error(y_train, y_predict_train)
mean_ab = ( 1 - total_error/ len(y_train)) *100
return acc_r2,mean_ab,median_ab_error
def visualization(self,x_test,y_test, regressor):
# Visualization the Decision Tree result (for higher resolution & smoother curve)
x_grid = np.arange(min(x_test), max(x_test), 0.01)
x_grid = x_grid.reshape((len(x_grid),1))
plt.scatter(x_test,y_test, color = 'pink')
plt.plot(x_grid, regressor.predict(x_grid), color = 'red')
plt.title("Truth or Bulff(SVR)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show()
def main():
#class obj created
obj = Cross_validation()
# seperate fetures and label
x_cv = CV_dataset.iloc[:,:-1].values
y_cv = CV_dataset.iloc[:,1].values
# print(x_cv.shape,y_cv.shape)
#cross validation
file1 = open('SupportVectorReg.pkl', 'rb')
reg1 = pickle.load(file1)
# y_prediction ( cross validation)
y_cv_pre = obj.y_prediction(reg1, x_cv)
print("\n\n y_prediction:",y_cv_pre)
acc_r2,mean_ab,median_ab_error= obj.accuracy(y_cv_pre,y_cv)
print("\n Accuracy train by acc_r2", acc_r2)
print("\n Accuracy train by mean_ab", mean_ab)
# print("\n Accuracy train by median_ab_error", median_ab_error)
obj.visualization(x_cv, y_cv, reg1)
if __name__ == '__main__':
main()
# In[ ]:
# In[ ]:
|
import ipywidgets as widgets
from traitlets import Unicode
class VaultPassword(widgets.Password):
ansible_kernel_property = Unicode('vault_password').tag(sync=True)
class SSHPassword(widgets.Password):
ansible_kernel_property = Unicode('ssh_password').tag(sync=True)
class SSHPrivateKey(widgets.Password):
ansible_kernel_property = Unicode('ssh_private_key').tag(sync=True)
|
from zipfile import BadZipFile
import pandas as pd
import plotly.express as px
from django.conf import settings
from django.core.cache import cache
from django.db import transaction
from django.shortcuts import get_object_or_404
from rest_framework import exceptions, mixins, permissions, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError, PermissionDenied, ValidationError
from rest_framework.parsers import FileUploadParser
from rest_framework.response import Response
from ..assessment.api import (
METHODS_NO_PUT,
AssessmentLevelPermissions,
AssessmentRootedTagTreeViewSet,
)
from ..assessment.constants import AssessmentViewSetPermissions
from ..assessment.models import Assessment
from ..common.api import OncePerMinuteThrottle, PaginationWithCount
from ..common.helper import FlatExport, re_digits
from ..common.renderers import PandasRenderers
from ..common.serializers import UnusedSerializer
from ..common.views import create_object_log
from . import constants, exports, filterset, models, serializers
class LiteratureAssessmentViewSet(viewsets.GenericViewSet):
model = Assessment
permission_classes = (AssessmentLevelPermissions,)
action_perms = {}
filterset_class = None
serializer_class = UnusedSerializer
lookup_value_regex = re_digits
def get_queryset(self):
return self.model.objects.all()
@action(
detail=True,
action_perms=AssessmentViewSetPermissions.CAN_VIEW_OBJECT,
renderer_classes=PandasRenderers,
)
def tags(self, request, pk):
"""
Show literature tags for entire assessment.
"""
instance = self.get_object()
df = models.ReferenceFilterTag.as_dataframe(instance.id)
export = FlatExport(df=df, filename=f"reference-tags-{self.assessment.id}")
return Response(export)
@action(detail=True, methods=("get", "post"), permission_classes=(permissions.AllowAny,))
def tagtree(self, request, pk, *args, **kwargs):
"""
Get/Update literature tags for an assessment in tree-based structure
"""
assessment = self.get_object()
context = context = {"assessment": assessment}
if self.request.method == "GET":
if not assessment.user_can_view_object(request.user):
raise exceptions.PermissionDenied()
serializer = serializers.ReferenceTreeSerializer(instance={}, context=context)
elif self.request.method == "POST":
if not assessment.user_can_edit_object(request.user):
raise exceptions.PermissionDenied()
serializer = serializers.ReferenceTreeSerializer(data=request.data, context=context)
serializer.is_valid(raise_exception=True)
serializer.update()
create_object_log(
"Updated (tagtree replace)", assessment, assessment.id, self.request.user.id
)
else:
raise ValueError()
return Response(serializer.data)
@action(
detail=True,
action_perms=AssessmentViewSetPermissions.CAN_VIEW_OBJECT,
pagination_class=PaginationWithCount,
)
def references(self, request, pk):
"""
Get references for an assessment
Args (via GET parameters):
- search_id: gets references within a given search
- tag_id: gets references with a given tagTag object id; if provided, gets references with tag
- all: fetch all references without pagination (default False)
- untagged: include untagged references (default False)
- required_tags: requires references to have at least one of the given tags
- pruned_tags: prunes references with any of the given tags if they no longer belong in the subtree without said tag
"""
assessment = self.get_object()
ref_filters = serializers.FilterReferences.from_drf(
request.query_params, assessment_id=assessment.pk
)
qs = ref_filters.get_queryset()
if "all" in request.query_params:
serializer = serializers.ReferenceSerializer(qs, many=True)
return Response(serializer.data)
else:
page = self.paginate_queryset(qs)
serializer = serializers.ReferenceSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
@action(
detail=True,
action_perms=AssessmentViewSetPermissions.CAN_VIEW_OBJECT,
renderer_classes=PandasRenderers,
url_path="reference-ids",
)
def reference_ids(self, request, pk):
"""
Get literature reference ids for all assessment references
"""
instance = self.get_object()
qs = instance.references.all()
df = models.Reference.objects.identifiers_dataframe(qs)
export = FlatExport(df=df, filename=f"reference-ids-{self.assessment.id}")
return Response(export)
@action(
detail=True,
methods=("get", "post"),
url_path="reference-tags",
permission_classes=(permissions.AllowAny,),
renderer_classes=PandasRenderers,
)
def reference_tags(self, request, pk):
"""
Apply reference tags for all references in an assessment.
"""
assessment = self.get_object()
if self.request.method == "GET":
if not assessment.user_can_view_object(request.user):
raise exceptions.PermissionDenied()
if self.request.method == "POST":
if not assessment.user_can_edit_object(request.user):
raise exceptions.PermissionDenied()
serializer = serializers.BulkReferenceTagSerializer(
data=request.data, context={"assessment": assessment}
)
serializer.is_valid(raise_exception=True)
serializer.bulk_create_tags()
df = models.ReferenceTags.objects.as_dataframe(assessment.id)
export = FlatExport(df=df, filename=f"reference-tags-{assessment.id}")
return Response(export)
@action(
detail=True,
action_perms=AssessmentViewSetPermissions.CAN_VIEW_OBJECT,
url_path="reference-year-histogram",
)
def reference_year_histogram(self, request, pk):
instance = self.get_object()
# get all the years for a given assessment
years = list(
models.Reference.objects.filter(assessment_id=instance.id, year__gt=0).values_list(
"year", flat=True
)
)
payload = {}
if len(years) > 0:
df = pd.DataFrame(years, columns=["Year"])
nbins = min(max(df.Year.max() - df.Year.min() + 1, 4), 30)
try:
fig = px.histogram(df, x="Year", nbins=nbins)
except ValueError:
# in some cases a bad nbins can be provided; just use default bins instead
# Invalid value of type 'numpy.int64' received for the 'nbinsx' property of histogram
# [2005, 2013, 1995, 2001, 2017, 1991, 1991, 2009, 2006, 2005]; nbins=27
fig = px.histogram(df, x="Year")
fig.update_yaxes(title_text="# References")
fig.update_xaxes(title_text="Year")
fig.update_traces(marker=dict(color="#003d7b"))
fig.update_layout(
bargap=0.1,
plot_bgcolor="white",
autosize=True,
margin=dict(l=0, r=0, t=30, b=0),
)
payload = fig.to_dict()
return Response(payload)
@action(
detail=True,
url_path="reference-export",
action_perms=AssessmentViewSetPermissions.CAN_VIEW_OBJECT,
renderer_classes=PandasRenderers,
)
def reference_export(self, request, pk):
"""
Get all references in an assessment.
"""
assessment = self.get_object()
queryset = (
models.Reference.objects.get_qs(assessment)
.prefetch_related("identifiers", "tags")
.order_by("id")
)
fs = filterset.ReferenceExportFilterSet(
data=request.query_params,
queryset=queryset,
request=request,
)
if not fs.is_valid():
raise ValidationError(fs.errors)
tags = models.ReferenceFilterTag.get_all_tags(assessment.id)
Exporter = (
exports.TableBuilderFormat
if request.query_params.get("export_format") == "table-builder"
else exports.ReferenceFlatComplete
)
export = Exporter(
queryset=fs.qs,
filename=f"references-{assessment.name}",
assessment=assessment,
tags=tags,
)
return Response(export.build_export())
@action(
detail=True,
url_path="user-tag-export",
renderer_classes=PandasRenderers,
action_perms=AssessmentViewSetPermissions.TEAM_MEMBER_OR_HIGHER,
)
def user_tag_export(self, request, pk):
"""
Get all references in an assessment, including all user tag data.
"""
assessment = self.get_object()
tags = models.ReferenceFilterTag.get_all_tags(assessment.id)
qs = (
models.UserReferenceTag.objects.filter(reference__assessment=assessment.id)
.select_related("reference", "user")
.prefetch_related("tags", "reference__identifiers")
.order_by("reference_id", "id")
)
exporter = exports.ReferenceFlatComplete(
qs,
filename=f"references-user-tags-{assessment.name}",
assessment=assessment,
tags=tags,
user_tags=True,
)
return Response(exporter.build_export())
@action(
detail=True,
action_perms=AssessmentViewSetPermissions.CAN_VIEW_OBJECT,
renderer_classes=PandasRenderers,
url_path="tag-heatmap",
)
def tag_heatmap(self, request, pk):
"""
Get tags formatted in a long format desireable for heatmaps.
"""
instance = self.get_object()
key = f"assessment-{instance.id}-lit-tag-heatmap"
df = cache.get(key)
if df is None:
df = models.Reference.objects.heatmap_dataframe(instance.id)
cache.set(key, df, settings.CACHE_1_HR)
export = FlatExport(df=df, filename=f"df-{instance.id}")
return Response(export)
@transaction.atomic
@action(
detail=True,
throttle_classes=(OncePerMinuteThrottle,),
methods=("post",),
url_path="replace-hero",
action_perms=AssessmentViewSetPermissions.CAN_EDIT_OBJECT,
)
def replace_hero(self, request, pk):
"""Replace old HERO ID with new HERO ID for selected references
Expects an input of `{replace: [[1,10],[2,20],[3,30]]}`, a list of lists with two items in each
inner list. Each inner list contains the reference ID and the new HERO ID, respectively.
"""
assessment = self.get_object()
serializer = serializers.ReferenceReplaceHeroIdSerializer(
data=request.data, context={"assessment": assessment}
)
serializer.is_valid(raise_exception=True)
serializer.execute()
create_object_log(
"Updated (HERO replacements)", assessment, assessment.id, self.request.user.id
)
return Response(status=status.HTTP_204_NO_CONTENT)
@transaction.atomic
@action(
detail=True,
throttle_classes=(OncePerMinuteThrottle,),
methods=("post",),
url_path="update-reference-metadata-from-hero",
action_perms=AssessmentViewSetPermissions.CAN_EDIT_OBJECT,
)
def update_reference_metadata_from_hero(self, request, pk):
"""
Query HERO for all references in an assessment that are mapped to HERO, fetch the latest
metadata from HERO, and then update the reference metadata in HAWC with the data from HERO.
"""
assessment = self.get_object()
models.Reference.update_hero_metadata(assessment.id)
create_object_log(
"Updated (HERO metadata)", assessment, assessment.id, self.request.user.id
)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(
detail=True,
methods=("post",),
action_perms=AssessmentViewSetPermissions.CAN_EDIT_OBJECT,
parser_classes=(FileUploadParser,),
renderer_classes=PandasRenderers,
url_path="excel-to-json",
)
def excel_to_json(self, request, pk):
self.get_object() # permissions check
file_ = request.data.get("file")
if file_ is None:
raise ValidationError({"file": "A file is required"})
elif not file_.name.endswith(".xlsx"):
raise ValidationError({"file": "File extension must be .xlsx"})
try:
# engine required since this is a BytesIO stream
df = pd.read_excel(file_, engine="openpyxl")
except (BadZipFile, ValueError):
raise ParseError({"file": "Unable to parse excel file"})
export = FlatExport(df=df, filename=file_.name)
return Response(export)
class SearchViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
model = models.Search
serializer_class = serializers.SearchSerializer
permission_classes = (AssessmentLevelPermissions,)
action_perms = {}
lookup_value_regex = re_digits
def get_queryset(self):
return self.model.objects.all()
class ReferenceFilterTagViewSet(AssessmentRootedTagTreeViewSet):
model = models.ReferenceFilterTag
serializer_class = serializers.ReferenceFilterTagSerializer
class ReferenceViewSet(
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet,
):
http_method_names = METHODS_NO_PUT
serializer_class = serializers.ReferenceSerializer
permission_classes = (AssessmentLevelPermissions,)
action_perms = {}
queryset = models.Reference.objects.all()
def get_queryset(self):
qs = super().get_queryset()
if self.action in ("tag", "resolve_conflict"):
qs = qs.select_related("assessment__literature_settings").prefetch_related(
"user_tags__tags", "tags"
)
return qs
@action(
detail=True, methods=("post",), action_perms=AssessmentViewSetPermissions.CAN_EDIT_OBJECT
)
def tag(self, request, pk):
response = {"status": "fail"}
instance = self.get_object()
assessment = instance.assessment
if assessment.user_can_edit_object(self.request.user):
try:
tags = [int(tag) for tag in self.request.data.get("tags", [])]
resolved = instance.update_tags(request.user, tags)
except ValueError:
return Response({"tags": "Array of tags must be valid primary keys"}, status=400)
response["status"] = "success"
response["resolved"] = resolved
return Response(response)
@action(
detail=True, methods=("post",), action_perms=AssessmentViewSetPermissions.CAN_EDIT_OBJECT
)
def resolve_conflict(self, request, pk):
instance = self.get_object()
assessment = instance.assessment
if not assessment.user_can_edit_object(self.request.user):
raise PermissionDenied()
user_reference_tag = get_object_or_404(
models.UserReferenceTag,
reference_id=instance.id,
id=int(request.POST.get("user_tag_id", -1)),
)
instance.resolve_user_tag_conflicts(self.request.user.id, user_reference_tag)
return Response({"status": "ok"})
@action(
detail=False,
url_path=r"search/type/(?P<db_id>[\d])/id/(?P<id>.*)",
renderer_classes=PandasRenderers,
permission_classes=(permissions.IsAdminUser,),
)
def id_search(self, request, id: str, db_id: int):
db_id = int(db_id)
if db_id not in constants.ReferenceDatabase:
raise ValidationError({"type": f"Must be in {constants.ReferenceDatabase.choices}"})
qs = self.get_queryset().filter(identifiers__unique_id=id, identifiers__database=db_id)
return FlatExport.api_response(
df=qs.global_df(),
filename=f"global-reference-data-{id}",
)
|
# In python 2.7
from __future__ import print_function
import sys
# import the Flask class from the flask module
# from flask.ext.login import LoginManager
from flask import (Flask, render_template, redirect, flash,
url_for, request, session, make_response)
# from flask_login import (login_required, login_user)
import dbTest
from datetime import datetime
from functools import wraps, update_wrapper
from user import User
# create the application object
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config["CACHE_TYPE"] = "null"
db = dbTest.dbConnect()
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, \
must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache, view)
'''
@app.teardown_request
def teardown_request(exception):
if exception:
print(exception)
g.db.close()
'''
'''
@app.before_request
def before_request():
print(session.keys(), session.values())
print("before request")
print('username' in session, "in session?")
g.db = dbTest.dbConnect()
g.user = None
if "username" in session:
g.user = get_user(session['username'])
'''
'''
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, \
post-check=0, pre-check=0"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "-1"
r.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
r.headers['Cache-Control'] = 'public, max-age=0'
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
return r
'''
# use decorators to link the function to a url
@app.route('/')
def home():
# return "Hello, World!" # return a string
return render_template('home.html')
@app.route('/index')
# @login_required
@nocache
def index():
if 'username' in session:
username = session['username']
return render_template('index.html', username=username)
return "You are not logged in <br><a href = '/login'></b>" + \
"click here to log in</b></a>"
@app.route('/welcome')
def welcome():
return render_template('welcome.html') # render welcome template
# route for handling the login page logic
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
passHash = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
result = dbTest.dbFetchOne(db, username)
# Check whether the query returned w/ any record
if ((result is not None) and (result != "")):
passHash = User.verify_password_hash(result["password"], password)
print("RESULT IS {}".format(passHash), file=sys.stderr)
''' Alternate way of doing the user authentication w/ check_password()
print("RETURNED RESULT IS {}".format(result), file=sys.stderr)
customUser = User(result["FirstName"],
result["LastName"], username, password)
print(customUser, file=sys.stderr)
# True or False
print("RESULT IS {}".
format(customUser.check_password(password)),
file=sys.stderr)
'''
# If the query returned with empty(0) rows
if (result is None or result == ""):
error = "User not found! Please try again."
# Password hash returned empty or null
elif ((passHash is None) or (passHash == "")):
# print("PASSHASH IS ==> {}".format(passHash), file=sys.stderr)
error = 'Oops... Something went wrong! Please try again.'
# Password verification unsuccessful
elif (passHash is False):
error = 'Invalid Credentials. Please try again.'
# Password verified successfully
else:
session['loggedIn'] = True
session['username'] = request.form['username']
return redirect(url_for('index'))
# return redirect(request.args.get('next') or url_for('index'))
return render_template('login.html', error=error)
# route for handling the logout page logic
@app.route('/logout')
def logout():
print("SESSION GETTING POPPED", file=sys.stderr)
# remove the username from the session if it is there
session.pop('username', None)
session.pop('loggedIn', None)
return redirect(url_for('login'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'GET':
return render_template('registration.html')
customUser = User(request.form['firstName'], request.form['lastName'],
request.form['email'], request.form['password'])
dbTest.dbInsert(db, customUser)
# login_user(customUser)
# db.session.add(user)
# db.session.commit()
flash('User successfully registered')
return redirect(url_for('login'))
# start the server with the 'run()' method
if __name__ == '__main__':
app.secret_key = 'super secret key'
# login_manager = LoginManager()
# login_manager.init_app(app)
# login_manager.login_view = 'login'
app.run(debug=True)
|
from random import seed
from random import randrange
from csv import reader
from math import sqrt
#load a csv file
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
# convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
#split a dataset into a train and test set
def train_test_split(dataset, split):
train = list()
train_size = split * len(dataset)
dataset_copy = list(dataset)
while len(train)<train_size:
index = randrange(len(dataset_copy))
train.append(dataset_copy.pop(index))
return train, dataset_copy
#calculate root mean squared error (RMSE)
def rmse_metric(actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
prediction_error = predicted[i] - actual[i]
sum_error += (prediction_error**2)
mean_error = sum_error/float(len(actual))
return sqrt(mean_error)
#evaluate regression algorithm on train/test split
def evaluate_algorithm(dataset, algorithm, split, *args):
train, test = train_test_split(dataset, split)
test_set = list()
for row in test:
row_copy = list(row)
row_copy[-1] = None
test_set.append(row_copy)
predicted = algorithm(train, test_set, *args)
actual = [row[-1] for row in test]
rmse = rmse_metric(actual, predicted)
return rmse
#calculate the mean value from a list of numbers
def mean(values):
return sum(values)/float(len(values))
#calculate variance from a list of values
def variance(values, mean):
return sum([(x-mean)**2 for x in values])
#calculate covariance between x and y
def covariance(x, mean_x, y, mean_y):
covar = 0.0
for i in range(len(x)):
covar += (x[i] - mean_x) * (y[i] - mean_y)
return covar
#calculate coefficients a and b for y = ax+b
def coefficients(dataset):
x = [row[0] for row in dataset]
y = [row[1] for row in dataset]
x_mean, y_mean = mean(x), mean(y)
a = covariance(x, x_mean, y, y_mean)/variance(x,x_mean)
b = y_mean - a * x_mean
return [b,a]
#implements the prediction equation to make predictions on a test dataset
def simple_linear_regression(train,test):
predictions = list()
b,a = coefficients(train)
for row in test:
yhat = b + a*row[0]
predictions.append(yhat)
return predictions
#SLR on insurance dataset
seed(1)
#load and prepare data
filename = 'insurance.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])):
str_column_to_float(dataset, i)
#evaluate algorithm
split = 0.6
rmse = evaluate_algorithm(dataset, simple_linear_regression,split)
print('RMSE : %.3f' %(rmse))
|
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
def get_color_map(label_num, cmap_name='terrain', normalzation=False):
cmap = cm.get_cmap(cmap_name, label_num)
cmap_array = np.array([cmap(i)[:3] for i in range(label_num)])
return cmap_array * 255.0 if normalzation else cmap_array
def get_color_legend(file_name, label_num, cmap_name='terrain', square_size=50):
import cv2
canvas = (np.ones(shape=(label_num * 2 * square_size, square_size, 3))
* 255).astype(np.uint8)
colors = get_color_map(label_num, cmap_name=cmap_name, normalzation=True).astype(np.uint8)
for i in range(colors.shape[0]):
start_i = 2 * i * int(square_size)
end_i = (2 * i + 1) * int(square_size)
canvas[start_i:end_i,:,:] = np.tile(colors[i], reps=(square_size, square_size, 1))
cv2.imwrite(file_name, canvas[:, :, ::-1])
def vis_matplot(points, colors, size=0.01):
ax = plt.axes(projection='3d')
ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=colors, s=size)
plt.show()
if __name__ == "__main__":
get_color_legend('label3.jpg', 6, 'jet') |
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums==None or len(nums)==0:
return None
return self.helper(nums,0,len(nums)-1)
def helper(self,nums,left,right):
if left>=right:
return nums[left]
mid=left+(right-left)/2
lmax=self.helper(nums,left,mid-1)
rmax=self.helper(nums,mid+1,right)
cmax=max(lmax,rmax)
lsum,clmax=0,0
for i in range(mid-1,left-1,-1):
lsum+=nums[i]
if lsum>clmax:
clmax=lsum
rsum,crmax=0,0
for i in range(mid+1,right+1):
rsum+=nums[i]
if rsum>crmax:
crmax=rsum
return max(cmax,clmax+nums[mid]+crmax)
|
class TrieNode:
def __init__(self, val):
self.children = dict()
self.val = val
self.end = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.head = TrieNode(None)
def addWord(self, word):
node = self.head
for c in word:
if not c in node.children:
newNode = TrieNode(c)
node.children[c] = newNode
node = node.children[c]
node.end = True
def search(self, word):
def dfs(word, node):
for i, c in enumerate(word):
if c == '.':
for ch in node.children:
if dfs(word[i+1:], node.children[ch]):
return True
if not c in node.children:
return False
node = node.children[c]
return node.end
node = self.head
return dfs(word, node)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word) |
from data.firestore import DatabaseController
class User(DatabaseController):
def __init__(self, name: str, authenticated: bool = False):
super().__init__(data="users")
user = self.get_data(name)
self.profile = user
self.username = name
self.exists = user["exists"]
self.authenticated = authenticated
def __getitem__(self, item: str):
return self.profile["data"][item]
def update(self, **new_data):
self.set_data(self.username, new_data)
def authenticate(self):
if self.exists:
self.authenticated = True
# flask-login requirements
def get_id(self):
return self.username
def is_active(self):
return True
def is_anonymous(self):
return False
def is_authenticated(self):
return self.authenticated
|
import geoip2.database
reader = geoip2.database.Reader('./GeoLite2-City_20190423/GeoLite2-City.mmdb')
response = reader.city('47.145.125.28')
print(response.country.iso_code) # US
print(response.subdivisions.most_specific.name) # Hawaii
print(response.subdivisions.most_specific.iso_code) # HI
print(response.city.name) # Kailua
print(response.postal.code) # 96734
print(response.location.longitude)
print(response.location.latitude)
reader.close()
|
import os
from .base_config import BaseConfig
class DevConfig(BaseConfig):
# Env settings
FLASK_ENV = 'development'
FLASK_DEBUG = True
# Database settings
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BaseConfig.BASE_DIR, 'devel.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(BaseConfig.BASE_DIR, 'devel_migrations')
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Message(models.Model):
content = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
def get_last_10_msg(self):
'''
Fetch last new 10 message
'''
return Message.objects.order_by('timestamp').all()[:10]
def __str__(self) -> str:
return self.content + '&' + str(self.id)
class Chat(models.Model):
room_name = models.CharField(max_length=255)
message = models.ManyToManyField(Message)
def __str__(self) -> str:
return self.room_name
|
import random
def rollDice():
return random.randint(1,6)
def main(n):
d1=1
d2=n/2+1
count=0
while d1!=d2:
r1=rollDice()
r2=rollDice()
if r1==6:
if d1==n:
d1=0
else:
d1+=1
elif r1==6:
if d1==0:
d1=n
else:
d1-=1
if r2==6:
if d2==n:
d2=0
else:
d2+=1
elif r2==6:
if d2==0:
d2=n
else:
d2-=1
count+=1
return count
a=101
total=0
for x in range(a):
total+=main(100)
print total*1.0/a
|
from typing import Tuple
def get_direction(ball_angle: float) -> int:
if ball_angle >= 345 or ball_angle <= 15:
return 0
return -1 if ball_angle < 180 else 1
def getDirectionBy4(angle: float) -> int:
if angle >= 345 or angle < 15:
return 0
elif angle >= 15 and angle < 165:
return 1
elif angle >= 165 and angle < 195:
return 2
elif angle >= 195 and angle < 345:
return 3
def getDistance(position1: Tuple[float, float], position2: Tuple[float, float]) -> float:
return ((position1[0]-position2[0])**2+(position1[1]-position2[1])**2)**0.5
def correctAngle(angle: float) -> float:
if angle < 0:
angle = angle % -360+360
else:
angle%=360
return angle |
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import seaborn as sns
import os
sns.set()
os.chdir("C:/githubrepo/CapstoneA/")
from analysis.zg_Load_Data import Load_Data
import warnings
warnings.filterwarnings("ignore")
# def square(x):
# return(float(x) ** 2)
# def norm(input, 13):
# x, y, z = (input[i] for i in 13)
# return([square(x[i]) + square(y[i]) + square(z[i]) for i in range(0, len(x))])
data_params = {'dataset' : 'firebusters',
'train_p' : 0.8,
'w_size' : 0,
'o_percent' : 0.25,
'LOSO' : True,
'clstm_params' : {}
}
dataset = Load_Data(**data_params)
num_neighbors = dataset.y_train.shape[1]
#Undoes the one-hot encoding
y_test = pd.DataFrame(pd.DataFrame(dataset.y_test).idxmax(1))
y_train = pd.DataFrame(pd.DataFrame(dataset.y_train).idxmax(1))
knn = KNeighborsClassifier(n_neighbors=num_neighbors, metric='euclidean')
knn.fit(dataset.x_train, y_train)
y_pred = knn.predict(dataset.x_test)
print(accuracy_score(y_test,y_pred))
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
#Seeing something
# plt.figure(figsize=(11,7))
# colors = ['#D62728','#2C9F2C','#FD7F23','#1F77B4','#9467BD',
# '#8C564A','#7F7F7F','#1FBECF','#E377C2','#BCBD27']
# for i, r in enumerate([1,2,3,7,8,9]):
# plt.subplot(3,2,i+1)
# plt.plot(x_train[r][:100], label=[y_train[r]], color=colors[i], linewidth=2)
# plt.xlabel('Samples @100Hz')
# plt.legend(loc='upper left')
# plt.tight_layout() |
# coding:utf-8
'''
@Copyright:LintCode
@Author: ultimate010
@Problem: http://www.lintcode.com/problem/insert-node-in-a-binary-search-tree
@Language: Python
@Datetime: 16-06-11 08:39
'''
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param node: insert this node into the binary search tree.
@return: The root of the new binary search tree.
"""
def insertNode(self, root, node):
# write your code here
if root is None:
root = node
else:
p = root
prev = root
while p:
prev = p
if p.val > node.val:
p = p.left
else:
p = p.right
if prev.val > node.val:
prev.left = node
else:
prev.right = node
return root |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
a=[]
for s in [*open(0)]:
for x in map(int,s.split()):
a+=[x]
for x in a[::-1]:
print(x**.5) |
#!/usr/bin/env python
# encoding: utf-8
name = "Disproportionation/rules"
shortDesc = ""
longDesc = """
"""
entry(
index = 1,
label = "Root",
kinetics = ArrheniusBM(A=(211.065,'m^3/(mol*s)'), n=1.40533, w0=(576945,'J/mol'), E0=(16272,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.01126358049348453, var=2.2286817499296356, Tref=1000.0, N=137, data_mean=0.0, correlation='Root',), comment="""BM rule fitted to 137 training reactions at node Root
Total Standard Deviation in ln(k): 3.021123350230915"""),
rank = 11,
shortDesc = """BM rule fitted to 137 training reactions at node Root
Total Standard Deviation in ln(k): 3.021123350230915""",
longDesc =
"""
BM rule fitted to 137 training reactions at node Root
Total Standard Deviation in ln(k): 3.021123350230915
""",
)
entry(
index = 2,
label = "Root_Ext-1R!H-R",
kinetics = ArrheniusBM(A=(9445.08,'m^3/(mol*s)'), n=0.508694, w0=(543500,'J/mol'), E0=(-990.596,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.8021051701158732, var=30.01095873466917, Tref=1000.0, N=45, data_mean=0.0, correlation='Root_Ext-1R!H-R',), comment="""BM rule fitted to 45 training reactions at node Root_Ext-1R!H-R
Total Standard Deviation in ln(k): 15.510294010456205"""),
rank = 11,
shortDesc = """BM rule fitted to 45 training reactions at node Root_Ext-1R!H-R
Total Standard Deviation in ln(k): 15.510294010456205""",
longDesc =
"""
BM rule fitted to 45 training reactions at node Root_Ext-1R!H-R
Total Standard Deviation in ln(k): 15.510294010456205
""",
)
entry(
index = 3,
label = "Root_Ext-2R!H-R",
kinetics = ArrheniusBM(A=(219.804,'m^3/(mol*s)'), n=1.42205, w0=(552000,'J/mol'), E0=(50333.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.07130535463745602, var=3.640669304482096, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_Ext-2R!H-R',), comment="""BM rule fitted to 6 training reactions at node Root_Ext-2R!H-R
Total Standard Deviation in ln(k): 4.004301565335125"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_Ext-2R!H-R
Total Standard Deviation in ln(k): 4.004301565335125""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_Ext-2R!H-R
Total Standard Deviation in ln(k): 4.004301565335125
""",
)
entry(
index = 4,
label = "Root_4R->H",
kinetics = ArrheniusBM(A=(17085.5,'m^3/(mol*s)'), n=1.00205, w0=(591750,'J/mol'), E0=(67061.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.007188943348771603, var=0.2709148414024463, Tref=1000.0, N=12, data_mean=0.0, correlation='Root_4R->H',), comment="""BM rule fitted to 12 training reactions at node Root_4R->H
Total Standard Deviation in ln(k): 1.0615168637031318"""),
rank = 11,
shortDesc = """BM rule fitted to 12 training reactions at node Root_4R->H
Total Standard Deviation in ln(k): 1.0615168637031318""",
longDesc =
"""
BM rule fitted to 12 training reactions at node Root_4R->H
Total Standard Deviation in ln(k): 1.0615168637031318
""",
)
entry(
index = 5,
label = "Root_N-4R->H",
kinetics = ArrheniusBM(A=(93.1776,'m^3/(mol*s)'), n=1.4801, w0=(596905,'J/mol'), E0=(23.239,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.006180387938361382, var=2.098032515364293, Tref=1000.0, N=74, data_mean=0.0, correlation='Root_N-4R->H',), comment="""BM rule fitted to 74 training reactions at node Root_N-4R->H
Total Standard Deviation in ln(k): 2.919304514507989"""),
rank = 11,
shortDesc = """BM rule fitted to 74 training reactions at node Root_N-4R->H
Total Standard Deviation in ln(k): 2.919304514507989""",
longDesc =
"""
BM rule fitted to 74 training reactions at node Root_N-4R->H
Total Standard Deviation in ln(k): 2.919304514507989
""",
)
entry(
index = 6,
label = "Root_Ext-1R!H-R_4R->O",
kinetics = ArrheniusBM(A=(2.75017e+18,'m^3/(mol*s)'), n=-3.9301, w0=(563000,'J/mol'), E0=(80386.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.09276657605815747, var=3.011300276946607, Tref=1000.0, N=12, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O',), comment="""BM rule fitted to 12 training reactions at node Root_Ext-1R!H-R_4R->O
Total Standard Deviation in ln(k): 3.711918376666456"""),
rank = 11,
shortDesc = """BM rule fitted to 12 training reactions at node Root_Ext-1R!H-R_4R->O
Total Standard Deviation in ln(k): 3.711918376666456""",
longDesc =
"""
BM rule fitted to 12 training reactions at node Root_Ext-1R!H-R_4R->O
Total Standard Deviation in ln(k): 3.711918376666456
""",
)
entry(
index = 7,
label = "Root_Ext-1R!H-R_N-4R->O",
kinetics = ArrheniusBM(A=(13564.2,'m^3/(mol*s)'), n=0.470009, w0=(536409,'J/mol'), E0=(-16264.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-2.1185860084328203, var=34.56885654617875, Tref=1000.0, N=33, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O',), comment="""BM rule fitted to 33 training reactions at node Root_Ext-1R!H-R_N-4R->O
Total Standard Deviation in ln(k): 17.10997764410757"""),
rank = 11,
shortDesc = """BM rule fitted to 33 training reactions at node Root_Ext-1R!H-R_N-4R->O
Total Standard Deviation in ln(k): 17.10997764410757""",
longDesc =
"""
BM rule fitted to 33 training reactions at node Root_Ext-1R!H-R_N-4R->O
Total Standard Deviation in ln(k): 17.10997764410757
""",
)
entry(
index = 8,
label = "Root_Ext-2R!H-R_2R!H->C",
kinetics = ArrheniusBM(A=(4.08618e+20,'m^3/(mol*s)'), n=-4.37582, w0=(551000,'J/mol'), E0=(103613,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=1.2202160600853793, var=10.39330756070133, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-2R!H-R_2R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-2R!H-R_2R!H->C
Total Standard Deviation in ln(k): 9.528865376815661"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-2R!H-R_2R!H->C
Total Standard Deviation in ln(k): 9.528865376815661""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-2R!H-R_2R!H->C
Total Standard Deviation in ln(k): 9.528865376815661
""",
)
entry(
index = 9,
label = "Root_Ext-2R!H-R_N-2R!H->C",
kinetics = ArrheniusBM(A=(1273.64,'m^3/(mol*s)'), n=1.17018, w0=(552500,'J/mol'), E0=(39987.4,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.05673151786327586, var=3.6789860426599743, Tref=1000.0, N=4, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C',), comment="""BM rule fitted to 4 training reactions at node Root_Ext-2R!H-R_N-2R!H->C
Total Standard Deviation in ln(k): 3.9877603244998157"""),
rank = 11,
shortDesc = """BM rule fitted to 4 training reactions at node Root_Ext-2R!H-R_N-2R!H->C
Total Standard Deviation in ln(k): 3.9877603244998157""",
longDesc =
"""
BM rule fitted to 4 training reactions at node Root_Ext-2R!H-R_N-2R!H->C
Total Standard Deviation in ln(k): 3.9877603244998157
""",
)
entry(
index = 10,
label = "Root_4R->H_Sp-2R!H-1R!H",
kinetics = ArrheniusBM(A=(134417,'m^3/(mol*s)'), n=0.760068, w0=(591944,'J/mol'), E0=(73328,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.017301751919963533, var=0.2727982590527378, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H',), comment="""BM rule fitted to 9 training reactions at node Root_4R->H_Sp-2R!H-1R!H
Total Standard Deviation in ln(k): 1.090546729162876"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_4R->H_Sp-2R!H-1R!H
Total Standard Deviation in ln(k): 1.090546729162876""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_4R->H_Sp-2R!H-1R!H
Total Standard Deviation in ln(k): 1.090546729162876
""",
)
entry(
index = 11,
label = "Root_4R->H_N-Sp-2R!H-1R!H",
kinetics = ArrheniusBM(A=(16039.5,'m^3/(mol*s)'), n=0.960818, w0=(591167,'J/mol'), E0=(59116.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.02977097333687675, var=0.0, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_4R->H_N-Sp-2R!H-1R!H',), comment="""BM rule fitted to 3 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H
Total Standard Deviation in ln(k): 0.07480144054491646"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H
Total Standard Deviation in ln(k): 0.07480144054491646""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H
Total Standard Deviation in ln(k): 0.07480144054491646
""",
)
entry(
index = 12,
label = "Root_N-4R->H_4CNOS-u1",
kinetics = ArrheniusBM(A=(33.2765,'m^3/(mol*s)'), n=1.57455, w0=(595677,'J/mol'), E0=(2495.51,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.005896437090739888, var=1.8220385479226, Tref=1000.0, N=62, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1',), comment="""BM rule fitted to 62 training reactions at node Root_N-4R->H_4CNOS-u1
Total Standard Deviation in ln(k): 2.720864875743989"""),
rank = 11,
shortDesc = """BM rule fitted to 62 training reactions at node Root_N-4R->H_4CNOS-u1
Total Standard Deviation in ln(k): 2.720864875743989""",
longDesc =
"""
BM rule fitted to 62 training reactions at node Root_N-4R->H_4CNOS-u1
Total Standard Deviation in ln(k): 2.720864875743989
""",
)
entry(
index = 13,
label = "Root_N-4R->H_N-4CNOS-u1",
kinetics = ArrheniusBM(A=(9958.36,'m^3/(mol*s)'), n=1.02956, w0=(603250,'J/mol'), E0=(54279,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.03669059532945314, var=0.23702390055722886, Tref=1000.0, N=12, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1',), comment="""BM rule fitted to 12 training reactions at node Root_N-4R->H_N-4CNOS-u1
Total Standard Deviation in ln(k): 1.068194711584249"""),
rank = 11,
shortDesc = """BM rule fitted to 12 training reactions at node Root_N-4R->H_N-4CNOS-u1
Total Standard Deviation in ln(k): 1.068194711584249""",
longDesc =
"""
BM rule fitted to 12 training reactions at node Root_N-4R->H_N-4CNOS-u1
Total Standard Deviation in ln(k): 1.068194711584249
""",
)
entry(
index = 14,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R",
kinetics = ArrheniusBM(A=(0.631851,'m^3/(mol*s)'), n=1.38334, w0=(563000,'J/mol'), E0=(4656.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.29630937279034325, var=1.3223468183437184, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R',), comment="""BM rule fitted to 9 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R
Total Standard Deviation in ln(k): 3.0498077298705226"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R
Total Standard Deviation in ln(k): 3.0498077298705226""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R
Total Standard Deviation in ln(k): 3.0498077298705226
""",
)
entry(
index = 15,
label = "Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H",
kinetics = ArrheniusBM(A=(1.70765e+07,'m^3/(mol*s)'), n=4.66546e-07, w0=(563000,'J/mol'), E0=(56300,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=0.9494596051172368, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 1.9534182263699047"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 1.9534182263699047""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 1.9534182263699047
""",
)
entry(
index = 16,
label = "Root_Ext-1R!H-R_4R->O_N-Sp-5R!H-1R!H",
kinetics = ArrheniusBM(A=(6.03e+06,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(56300,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_N-Sp-5R!H-1R!H',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_N-Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_N-Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_N-Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 17,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R",
kinetics = ArrheniusBM(A=(3.79584e+06,'m^3/(mol*s)'), n=-0.196833, w0=(535591,'J/mol'), E0=(53559.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0037403777898211976, var=0.7799706060483732, Tref=1000.0, N=11, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R',), comment="""BM rule fitted to 11 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R
Total Standard Deviation in ln(k): 1.779898653326379"""),
rank = 11,
shortDesc = """BM rule fitted to 11 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R
Total Standard Deviation in ln(k): 1.779898653326379""",
longDesc =
"""
BM rule fitted to 11 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R
Total Standard Deviation in ln(k): 1.779898653326379
""",
)
entry(
index = 18,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H",
kinetics = ArrheniusBM(A=(0.433929,'m^3/(mol*s)'), n=1.96758, w0=(539000,'J/mol'), E0=(70245.9,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.1692521391030103, var=6.079427974794933, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H',), comment="""BM rule fitted to 6 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H
Total Standard Deviation in ln(k): 5.368230882682922"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H
Total Standard Deviation in ln(k): 5.368230882682922""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H
Total Standard Deviation in ln(k): 5.368230882682922
""",
)
entry(
index = 19,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H",
kinetics = ArrheniusBM(A=(5958.46,'m^3/(mol*s)'), n=0.568208, w0=(536000,'J/mol'), E0=(-32910.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-3.2867690498643043, var=55.8098478782717, Tref=1000.0, N=16, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H',), comment="""BM rule fitted to 16 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H
Total Standard Deviation in ln(k): 23.23478535087143"""),
rank = 11,
shortDesc = """BM rule fitted to 16 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H
Total Standard Deviation in ln(k): 23.23478535087143""",
longDesc =
"""
BM rule fitted to 16 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H
Total Standard Deviation in ln(k): 23.23478535087143
""",
)
entry(
index = 20,
label = "Root_Ext-2R!H-R_2R!H->C_4R->C",
kinetics = ArrheniusBM(A=(50000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(26972.8,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-2R!H-R_2R!H->C_4R->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_2R!H->C_4R->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_2R!H->C_4R->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_2R!H->C_4R->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 21,
label = "Root_Ext-2R!H-R_2R!H->C_N-4R->C",
kinetics = ArrheniusBM(A=(7.23e+06,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(97648.2,'J/mol'), Tmin=(300,'K'), Tmax=(2000,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-2R!H-R_2R!H->C_N-4R->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_2R!H->C_N-4R->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_2R!H->C_N-4R->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_2R!H->C_N-4R->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 22,
label = "Root_Ext-2R!H-R_N-2R!H->C_4R->H",
kinetics = ArrheniusBM(A=(480,'m^3/(mol*s)'), n=1.5, w0=(557500,'J/mol'), E0=(46201.9,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C_4R->H',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_4R->H
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_4R->H
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_4R->H
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 23,
label = "Root_Ext-2R!H-R_N-2R!H->C_N-4R->H",
kinetics = ArrheniusBM(A=(3.55396e+06,'m^3/(mol*s)'), n=0.0868444, w0=(550833,'J/mol'), E0=(77421.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.000652761627062249, var=0.3548234455961088, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C_N-4R->H',), comment="""BM rule fitted to 3 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H
Total Standard Deviation in ln(k): 1.1958018205112724"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H
Total Standard Deviation in ln(k): 1.1958018205112724""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H
Total Standard Deviation in ln(k): 1.1958018205112724
""",
)
entry(
index = 24,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1",
kinetics = ArrheniusBM(A=(240554,'m^3/(mol*s)'), n=0.681806, w0=(599125,'J/mol'), E0=(74070.9,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.11637774989113545, var=0.5068220816987241, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1',), comment="""BM rule fitted to 8 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1
Total Standard Deviation in ln(k): 1.7196061325740737"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1
Total Standard Deviation in ln(k): 1.7196061325740737""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1
Total Standard Deviation in ln(k): 1.7196061325740737
""",
)
entry(
index = 25,
label = "Root_4R->H_Sp-2R!H-1R!H_N-2R!H-u1",
kinetics = ArrheniusBM(A=(480,'m^3/(mol*s)'), n=1.5, w0=(534500,'J/mol'), E0=(53450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_N-2R!H-u1',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_N-2R!H-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_N-2R!H-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_N-2R!H-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 26,
label = "Root_4R->H_N-Sp-2R!H-1R!H_1R!H->C",
kinetics = ArrheniusBM(A=(240,'m^3/(mol*s)'), n=1.5, w0=(557500,'J/mol'), E0=(55750,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_N-Sp-2R!H-1R!H_1R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_1R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_1R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_1R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 27,
label = "Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C",
kinetics = ArrheniusBM(A=(16039.5,'m^3/(mol*s)'), n=0.960818, w0=(608000,'J/mol'), E0=(60800,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.4478648794535599, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C
Total Standard Deviation in ln(k): 1.1252886418431154"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C
Total Standard Deviation in ln(k): 1.1252886418431154""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C
Total Standard Deviation in ln(k): 1.1252886418431154
""",
)
entry(
index = 28,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O",
kinetics = ArrheniusBM(A=(4.86137,'m^3/(mol*s)'), n=1.88298, w0=(647000,'J/mol'), E0=(-2980.15,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.2591287082868978, var=3.746723415601589, Tref=1000.0, N=22, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O',), comment="""BM rule fitted to 22 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O
Total Standard Deviation in ln(k): 4.531533543179482"""),
rank = 11,
shortDesc = """BM rule fitted to 22 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O
Total Standard Deviation in ln(k): 4.531533543179482""",
longDesc =
"""
BM rule fitted to 22 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O
Total Standard Deviation in ln(k): 4.531533543179482
""",
)
entry(
index = 29,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O",
kinetics = ArrheniusBM(A=(63.3186,'m^3/(mol*s)'), n=1.4714, w0=(567450,'J/mol'), E0=(-1407.03,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.004295895700507421, var=1.692299905031995, Tref=1000.0, N=40, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O',), comment="""BM rule fitted to 40 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O
Total Standard Deviation in ln(k): 2.6187220517465106"""),
rank = 11,
shortDesc = """BM rule fitted to 40 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O
Total Standard Deviation in ln(k): 2.6187220517465106""",
longDesc =
"""
BM rule fitted to 40 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O
Total Standard Deviation in ln(k): 2.6187220517465106
""",
)
entry(
index = 30,
label = "Root_N-4R->H_N-4CNOS-u1_1R!H->O",
kinetics = ArrheniusBM(A=(6.06985e+07,'m^3/(mol*s)'), n=-0.0727699, w0=(665667,'J/mol'), E0=(73198.9,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=1.0730570062729807, var=4.7084908975045465, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_1R!H->O',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O
Total Standard Deviation in ln(k): 7.046209272340443"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O
Total Standard Deviation in ln(k): 7.046209272340443""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O
Total Standard Deviation in ln(k): 7.046209272340443
""",
)
entry(
index = 31,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O",
kinetics = ArrheniusBM(A=(10895.1,'m^3/(mol*s)'), n=1.01432, w0=(582444,'J/mol'), E0=(55106.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.009342386299330369, var=0.25836087721149187, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O',), comment="""BM rule fitted to 9 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O
Total Standard Deviation in ln(k): 1.042464370909332"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O
Total Standard Deviation in ln(k): 1.042464370909332""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O
Total Standard Deviation in ln(k): 1.042464370909332
""",
)
entry(
index = 32,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H",
kinetics = ArrheniusBM(A=(1.02976e+08,'m^3/(mol*s)'), n=-1.08436, w0=(563000,'J/mol'), E0=(46128.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.09151076886860776, var=0.2905474551235391, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H',), comment="""BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 1.3105279586067948"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 1.3105279586067948""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 1.3105279586067948
""",
)
entry(
index = 33,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R_N-Sp-5R!H-1R!H",
kinetics = ArrheniusBM(A=(602200,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(40463,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R_N-Sp-5R!H-1R!H',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_N-Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_N-Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_N-Sp-5R!H-1R!H
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 34,
label = "Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H_Ext-1R!H-R",
kinetics = ArrheniusBM(A=(1.21e+07,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(56300,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H_Ext-1R!H-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H_Ext-1R!H-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H_Ext-1R!H-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Sp-5R!H-1R!H_Ext-1R!H-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 35,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C",
kinetics = ArrheniusBM(A=(3.54362e+06,'m^3/(mol*s)'), n=-0.187345, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0005435713900316853, var=1.2012760832834504, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C',), comment="""BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C
Total Standard Deviation in ln(k): 2.1986103517020235"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C
Total Standard Deviation in ln(k): 2.1986103517020235""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C
Total Standard Deviation in ln(k): 2.1986103517020235
""",
)
entry(
index = 36,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C",
kinetics = ArrheniusBM(A=(4.55971e+06,'m^3/(mol*s)'), n=-0.222135, w0=(526500,'J/mol'), E0=(52650,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.01226519497806337, var=0.009633055594024382, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C',), comment="""BM rule fitted to 3 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C
Total Standard Deviation in ln(k): 0.22757807355262039"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C
Total Standard Deviation in ln(k): 0.22757807355262039""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C
Total Standard Deviation in ln(k): 0.22757807355262039
""",
)
entry(
index = 37,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R",
kinetics = ArrheniusBM(A=(1.7265,'m^3/(mol*s)'), n=1.78155, w0=(539000,'J/mol'), E0=(71042.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.2864715707319417, var=8.102447089509363, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R',), comment="""BM rule fitted to 5 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R
Total Standard Deviation in ln(k): 6.426215660910921"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R
Total Standard Deviation in ln(k): 6.426215660910921""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R
Total Standard Deviation in ln(k): 6.426215660910921
""",
)
entry(
index = 38,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R",
kinetics = ArrheniusBM(A=(17549.8,'m^3/(mol*s)'), n=0.428311, w0=(534500,'J/mol'), E0=(-14543.2,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-3.582895962257546, var=62.60541265629812, Tref=1000.0, N=13, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R',), comment="""BM rule fitted to 13 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R
Total Standard Deviation in ln(k): 24.8644332369221"""),
rank = 11,
shortDesc = """BM rule fitted to 13 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R
Total Standard Deviation in ln(k): 24.8644332369221""",
longDesc =
"""
BM rule fitted to 13 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R
Total Standard Deviation in ln(k): 24.8644332369221
""",
)
entry(
index = 39,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C",
kinetics = ArrheniusBM(A=(4.56235e+06,'m^3/(mol*s)'), n=-0.16, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-4.305460834090209e-17, var=0.26130883078297484, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C
Total Standard Deviation in ln(k): 1.0247880034798968"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C
Total Standard Deviation in ln(k): 1.0247880034798968""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C
Total Standard Deviation in ln(k): 1.0247880034798968
""",
)
entry(
index = 40,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_N-4CHNS->C",
kinetics = ArrheniusBM(A=(1.81e+06,'m^3/(mol*s)'), n=0, w0=(549500,'J/mol'), E0=(54950,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_N-4CHNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_N-4CHNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_N-4CHNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_N-4CHNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 41,
label = "Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_4CNO->O",
kinetics = ArrheniusBM(A=(2.4,'m^3/(mol*s)'), n=2, w0=(571000,'J/mol'), E0=(57100,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_4CNO->O',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_4CNO->O
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_4CNO->O
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_4CNO->O
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 42,
label = "Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O",
kinetics = ArrheniusBM(A=(23777.5,'m^3/(mol*s)'), n=0.682531, w0=(540750,'J/mol'), E0=(64614.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.6075970314782776, var=0.6553537765114736, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O
Total Standard Deviation in ln(k): 3.149537412505626"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O
Total Standard Deviation in ln(k): 3.149537412505626""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O
Total Standard Deviation in ln(k): 3.149537412505626
""",
)
entry(
index = 43,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O",
kinetics = ArrheniusBM(A=(1151.49,'m^3/(mol*s)'), n=1.37766, w0=(657250,'J/mol'), E0=(60077.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.03633496085430713, var=0.028750657654443026, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O',), comment="""BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O
Total Standard Deviation in ln(k): 0.43121712987894956"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O
Total Standard Deviation in ln(k): 0.43121712987894956""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O
Total Standard Deviation in ln(k): 0.43121712987894956
""",
)
entry(
index = 44,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O",
kinetics = ArrheniusBM(A=(358904,'m^3/(mol*s)'), n=0.629663, w0=(579750,'J/mol'), E0=(76357.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.18777365918498876, var=0.5057209361704464, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O',), comment="""BM rule fitted to 6 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O
Total Standard Deviation in ln(k): 1.897441595632223"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O
Total Standard Deviation in ln(k): 1.897441595632223""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O
Total Standard Deviation in ln(k): 1.897441595632223
""",
)
entry(
index = 45,
label = "Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_2R!H->C",
kinetics = ArrheniusBM(A=(240,'m^3/(mol*s)'), n=1.5, w0=(545000,'J/mol'), E0=(54500,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 46,
label = "Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(240,'m^3/(mol*s)'), n=1.5, w0=(671000,'J/mol'), E0=(67100,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_N-Sp-2R!H-1R!H_N-1R!H->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 47,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C",
kinetics = ArrheniusBM(A=(7.43489e+07,'m^3/(mol*s)'), n=-0.0998224, w0=(662885,'J/mol'), E0=(44766.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.7298065405965491, var=4.858257525622271, Tref=1000.0, N=13, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C',), comment="""BM rule fitted to 13 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C
Total Standard Deviation in ln(k): 6.252412638771265"""),
rank = 11,
shortDesc = """BM rule fitted to 13 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C
Total Standard Deviation in ln(k): 6.252412638771265""",
longDesc =
"""
BM rule fitted to 13 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C
Total Standard Deviation in ln(k): 6.252412638771265
""",
)
entry(
index = 48,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C",
kinetics = ArrheniusBM(A=(0.847891,'m^3/(mol*s)'), n=2.07148, w0=(624056,'J/mol'), E0=(-9108.61,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.3066934194206129, var=3.646168388500196, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C',), comment="""BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C
Total Standard Deviation in ln(k): 4.598616635309227"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C
Total Standard Deviation in ln(k): 4.598616635309227""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C
Total Standard Deviation in ln(k): 4.598616635309227
""",
)
entry(
index = 49,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O",
kinetics = ArrheniusBM(A=(336.66,'m^3/(mol*s)'), n=1.32779, w0=(590706,'J/mol'), E0=(-8150.35,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.00558337269774861, var=1.4985896104748897, Tref=1000.0, N=17, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O',), comment="""BM rule fitted to 17 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O
Total Standard Deviation in ln(k): 2.4681630030595403"""),
rank = 11,
shortDesc = """BM rule fitted to 17 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O
Total Standard Deviation in ln(k): 2.4681630030595403""",
longDesc =
"""
BM rule fitted to 17 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O
Total Standard Deviation in ln(k): 2.4681630030595403
""",
)
entry(
index = 50,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O",
kinetics = ArrheniusBM(A=(32.606,'m^3/(mol*s)'), n=1.47916, w0=(550261,'J/mol'), E0=(62814.9,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.024499240070432513, var=1.3407175726784197, Tref=1000.0, N=23, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O',), comment="""BM rule fitted to 23 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O
Total Standard Deviation in ln(k): 2.38282578132325"""),
rank = 11,
shortDesc = """BM rule fitted to 23 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O
Total Standard Deviation in ln(k): 2.38282578132325""",
longDesc =
"""
BM rule fitted to 23 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O
Total Standard Deviation in ln(k): 2.38282578132325
""",
)
entry(
index = 51,
label = "Root_N-4R->H_N-4CNOS-u1_1R!H->O_4CNOS->C",
kinetics = ArrheniusBM(A=(1.21e+06,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_1R!H->O_4CNOS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 52,
label = "Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C",
kinetics = ArrheniusBM(A=(1.99016e+08,'m^3/(mol*s)'), n=-0.225563, w0=(670750,'J/mol'), E0=(75059.9,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=1.2522028424953557, var=6.0581551465221715, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C
Total Standard Deviation in ln(k): 8.080556867668387"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C
Total Standard Deviation in ln(k): 8.080556867668387""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C
Total Standard Deviation in ln(k): 8.080556867668387
""",
)
entry(
index = 53,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(15938.6,'m^3/(mol*s)'), n=0.994035, w0=(571333,'J/mol'), E0=(58835.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.032911533750999235, var=0.2853175032213902, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS',), comment="""BM rule fitted to 6 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.153523940801922"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.153523940801922""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.153523940801922
""",
)
entry(
index = 54,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(11361.3,'m^3/(mol*s)'), n=0.960818, w0=(604667,'J/mol'), E0=(60466.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.02977097491065517, var=1.805559322863034e-35, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 0.0748014444991336"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 0.0748014444991336""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 0.0748014444991336
""",
)
entry(
index = 55,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R",
kinetics = ArrheniusBM(A=(10000,'m^3/(mol*s)'), n=-2.70943e-08, w0=(563000,'J/mol'), E0=(19737.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=6.435288686813596e-10, var=5.162895344862459e-19, Tref=1000.0, N=4, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R',), comment="""BM rule fitted to 4 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R
Total Standard Deviation in ln(k): 3.0573748226362496e-09"""),
rank = 11,
shortDesc = """BM rule fitted to 4 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R
Total Standard Deviation in ln(k): 3.0573748226362496e-09""",
longDesc =
"""
BM rule fitted to 4 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R
Total Standard Deviation in ln(k): 3.0573748226362496e-09
""",
)
entry(
index = 56,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-1R!H-R",
kinetics = ArrheniusBM(A=(10974.5,'m^3/(mol*s)'), n=2.73044e-07, w0=(563000,'J/mol'), E0=(19809.2,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-3.2290956255676566e-17, var=0.06917824979652494, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-1R!H-R',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-1R!H-R
Total Standard Deviation in ln(k): 0.5272805777722495"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-1R!H-R
Total Standard Deviation in ln(k): 0.5272805777722495""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-1R!H-R
Total Standard Deviation in ln(k): 0.5272805777722495
""",
)
entry(
index = 57,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R",
kinetics = ArrheniusBM(A=(3.28526e+06,'m^3/(mol*s)'), n=-0.168394, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0006212264085765338, var=1.4218690013356239, Tref=1000.0, N=7, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R',), comment="""BM rule fitted to 7 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R
Total Standard Deviation in ln(k): 2.3920500512879865"""),
rank = 11,
shortDesc = """BM rule fitted to 7 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R
Total Standard Deviation in ln(k): 2.3920500512879865""",
longDesc =
"""
BM rule fitted to 7 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R
Total Standard Deviation in ln(k): 2.3920500512879865
""",
)
entry(
index = 58,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_4HS->H",
kinetics = ArrheniusBM(A=(904000,'m^3/(mol*s)'), n=0, w0=(549500,'J/mol'), E0=(54950,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_4HS->H',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_4HS->H
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_4HS->H
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_4HS->H
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 59,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H",
kinetics = ArrheniusBM(A=(1.02405e+07,'m^3/(mol*s)'), n=-0.333202, w0=(515000,'J/mol'), E0=(51500,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.27677043112298655, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H
Total Standard Deviation in ln(k): 0.6954030932738355"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H
Total Standard Deviation in ln(k): 0.6954030932738355""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H
Total Standard Deviation in ln(k): 0.6954030932738355
""",
)
entry(
index = 60,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-6R!H-R",
kinetics = ArrheniusBM(A=(84300,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(76690.3,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-6R!H-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-6R!H-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-6R!H-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-6R!H-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 61,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R",
kinetics = ArrheniusBM(A=(5.7233e-06,'m^3/(mol*s)'), n=3.63493, w0=(539000,'J/mol'), E0=(10543.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.004220313209984355, var=8.94173911161422, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R
Total Standard Deviation in ln(k): 6.005311154003196"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R
Total Standard Deviation in ln(k): 6.005311154003196""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R
Total Standard Deviation in ln(k): 6.005311154003196
""",
)
entry(
index = 62,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Sp-6R!H-4CHNS",
kinetics = ArrheniusBM(A=(964000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(92497.8,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Sp-6R!H-4CHNS',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Sp-6R!H-4CHNS
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Sp-6R!H-4CHNS
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Sp-6R!H-4CHNS
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 63,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_N-Sp-6R!H-4CHNS",
kinetics = ArrheniusBM(A=(2.41e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_N-Sp-6R!H-4CHNS',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_N-Sp-6R!H-4CHNS
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_N-Sp-6R!H-4CHNS
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_N-Sp-6R!H-4CHNS
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 64,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S",
kinetics = ArrheniusBM(A=(1.27667e-13,'m^3/(mol*s)'), n=4.8323, w0=(527000,'J/mol'), E0=(25354.4,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=10.238614422136449, var=279.3313223037818, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S
Total Standard Deviation in ln(k): 59.23071623718318"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S
Total Standard Deviation in ln(k): 59.23071623718318""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S
Total Standard Deviation in ln(k): 59.23071623718318
""",
)
entry(
index = 65,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S",
kinetics = ArrheniusBM(A=(365253,'m^3/(mol*s)'), n=0.0895424, w0=(535864,'J/mol'), E0=(4633.75,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.8281757095360616, var=15.207890755227291, Tref=1000.0, N=11, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S',), comment="""BM rule fitted to 11 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S
Total Standard Deviation in ln(k): 12.411330975974792"""),
rank = 11,
shortDesc = """BM rule fitted to 11 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S
Total Standard Deviation in ln(k): 12.411330975974792""",
longDesc =
"""
BM rule fitted to 11 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S
Total Standard Deviation in ln(k): 12.411330975974792
""",
)
entry(
index = 66,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_4C-u1",
kinetics = ArrheniusBM(A=(1.15e+07,'m^3/(mol*s)'), n=-0.32, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_4C-u1',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_4C-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_4C-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_4C-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 67,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_N-4C-u1",
kinetics = ArrheniusBM(A=(1.81e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_N-4C-u1',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_N-4C-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_N-4C-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_4CHNS->C_N-4C-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 68,
label = "Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_4CN->C",
kinetics = ArrheniusBM(A=(1.6,'m^3/(mol*s)'), n=1.87, w0=(547000,'J/mol'), E0=(47358.9,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_4CN->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_4CN->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_4CN->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_4CN->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 69,
label = "Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_N-4CN->C",
kinetics = ArrheniusBM(A=(1.8,'m^3/(mol*s)'), n=1.94, w0=(534500,'J/mol'), E0=(53450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_N-4CN->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_N-4CN->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_N-4CN->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-2R!H-R_N-2R!H->C_N-4R->H_N-4CNO->O_N-4CN->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 70,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_2R!H->C",
kinetics = ArrheniusBM(A=(2e+07,'m^3/(mol*s)'), n=0, w0=(666000,'J/mol'), E0=(66600,'J/mol'), Tmin=(295,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 71,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_N-2R!H->C",
kinetics = ArrheniusBM(A=(480,'m^3/(mol*s)'), n=1.5, w0=(648500,'J/mol'), E0=(59319.9,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_1R!H->O_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 72,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C",
kinetics = ArrheniusBM(A=(48750,'m^3/(mol*s)'), n=0.958373, w0=(589333,'J/mol'), E0=(58933.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.47580866012507, var=4.807609562897205, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C',), comment="""BM rule fitted to 3 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C
Total Standard Deviation in ln(k): 8.103696573709435"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C
Total Standard Deviation in ln(k): 8.103696573709435""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C
Total Standard Deviation in ln(k): 8.103696573709435
""",
)
entry(
index = 73,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C",
kinetics = ArrheniusBM(A=(12490.1,'m^3/(mol*s)'), n=1.0397, w0=(570167,'J/mol'), E0=(68188.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.023704145862347308, var=0.7277245335318464, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C',), comment="""BM rule fitted to 3 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C
Total Standard Deviation in ln(k): 1.7697329354950213"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C
Total Standard Deviation in ln(k): 1.7697329354950213""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C
Total Standard Deviation in ln(k): 1.7697329354950213
""",
)
entry(
index = 74,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R",
kinetics = ArrheniusBM(A=(2.64485e+07,'m^3/(mol*s)'), n=-0.132719, w0=(662045,'J/mol'), E0=(32116.4,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0510078349133705, var=2.263935109688879, Tref=1000.0, N=11, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R',), comment="""BM rule fitted to 11 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R
Total Standard Deviation in ln(k): 3.144560699232883"""),
rank = 11,
shortDesc = """BM rule fitted to 11 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R
Total Standard Deviation in ln(k): 3.144560699232883""",
longDesc =
"""
BM rule fitted to 11 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R
Total Standard Deviation in ln(k): 3.144560699232883
""",
)
entry(
index = 75,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_4CNOS->C",
kinetics = ArrheniusBM(A=(8.49e+07,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(298,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_4CNOS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 76,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_N-4CNOS->C",
kinetics = ArrheniusBM(A=(2.41e+07,'m^3/(mol*s)'), n=0, w0=(679500,'J/mol'), E0=(67950,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_N-4CNOS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_N-4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_N-4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_N-4CNOS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 77,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O",
kinetics = ArrheniusBM(A=(127.138,'m^3/(mol*s)'), n=1.57348, w0=(653000,'J/mol'), E0=(4452.52,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-3.9155328033681194, var=28.66315200497933, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O
Total Standard Deviation in ln(k): 20.570968575960745"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O
Total Standard Deviation in ln(k): 20.570968575960745""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O
Total Standard Deviation in ln(k): 20.570968575960745
""",
)
entry(
index = 78,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O",
kinetics = ArrheniusBM(A=(0.864362,'m^3/(mol*s)'), n=2.04909, w0=(609583,'J/mol'), E0=(-311.902,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.3524242618291431, var=5.122711251797606, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O',), comment="""BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O
Total Standard Deviation in ln(k): 5.422886644907272"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O
Total Standard Deviation in ln(k): 5.422886644907272""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O
Total Standard Deviation in ln(k): 5.422886644907272
""",
)
entry(
index = 79,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R",
kinetics = ArrheniusBM(A=(202.633,'m^3/(mol*s)'), n=1.393, w0=(597000,'J/mol'), E0=(-5139.23,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.06911874917151527, var=4.397702883000043, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R',), comment="""BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R
Total Standard Deviation in ln(k): 4.377735130161602"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R
Total Standard Deviation in ln(k): 4.377735130161602""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R
Total Standard Deviation in ln(k): 4.377735130161602
""",
)
entry(
index = 80,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(625.943,'m^3/(mol*s)'), n=1.27875, w0=(575333,'J/mol'), E0=(57533.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.03990583248034886, var=0.24629950025387834, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS',), comment="""BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.0951872704960075"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.0951872704960075""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.0951872704960075
""",
)
entry(
index = 81,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(330.615,'m^3/(mol*s)'), n=1.27907, w0=(604667,'J/mol'), E0=(60466.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.03980613352150107, var=0.0, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 0.10001541085804289"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 0.10001541085804289""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 0.10001541085804289
""",
)
entry(
index = 82,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R",
kinetics = ArrheniusBM(A=(373.459,'m^3/(mol*s)'), n=1.26122, w0=(547200,'J/mol'), E0=(46931,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.09318681774672263, var=0.1421306232878116, Tref=1000.0, N=10, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R',), comment="""BM rule fitted to 10 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R
Total Standard Deviation in ln(k): 0.9899271731946009"""),
rank = 11,
shortDesc = """BM rule fitted to 10 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R
Total Standard Deviation in ln(k): 0.9899271731946009""",
longDesc =
"""
BM rule fitted to 10 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R
Total Standard Deviation in ln(k): 0.9899271731946009
""",
)
entry(
index = 83,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C",
kinetics = ArrheniusBM(A=(30.9211,'m^3/(mol*s)'), n=1.45757, w0=(548688,'J/mol'), E0=(78196.5,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.2556974699678075, var=0.6728411382279629, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C',), comment="""BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C
Total Standard Deviation in ln(k): 2.2868778768615523"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C
Total Standard Deviation in ln(k): 2.2868778768615523""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C
Total Standard Deviation in ln(k): 2.2868778768615523
""",
)
entry(
index = 84,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C",
kinetics = ArrheniusBM(A=(238.944,'m^3/(mol*s)'), n=1.2433, w0=(558900,'J/mol'), E0=(34984.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.0012581813448375246, var=0.5817728263131192, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C',), comment="""BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C
Total Standard Deviation in ln(k): 1.5322535742673742"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C
Total Standard Deviation in ln(k): 1.5322535742673742""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C
Total Standard Deviation in ln(k): 1.5322535742673742
""",
)
entry(
index = 85,
label = "Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_2R!H->C",
kinetics = ArrheniusBM(A=(9.04e+07,'m^3/(mol*s)'), n=0, w0=(679500,'J/mol'), E0=(67950,'J/mol'), Tmin=(298,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 86,
label = "Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(330,'m^3/(mol*s)'), n=1.5, w0=(662000,'J/mol'), E0=(47976.3,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_1R!H->O_N-4CNOS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 87,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(33706.5,'m^3/(mol*s)'), n=0.959594, w0=(564500,'J/mol'), E0=(56450,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.1948601205724478, var=0.35256865674022686, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 1.6799597049858794"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 1.6799597049858794""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 1.6799597049858794
""",
)
entry(
index = 88,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(9237.74,'m^3/(mol*s)'), n=1.04888, w0=(574750,'J/mol'), E0=(56238.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.010334634855692922, var=0.2652013577163164, Tref=1000.0, N=4, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 4 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.058358967033491"""),
rank = 11,
shortDesc = """BM rule fitted to 4 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.058358967033491""",
longDesc =
"""
BM rule fitted to 4 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.058358967033491
""",
)
entry(
index = 89,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(170,'m^3/(mol*s)'), n=1.5, w0=(571000,'J/mol'), E0=(57100,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 90,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(11361.3,'m^3/(mol*s)'), n=0.960818, w0=(621500,'J/mol'), E0=(62150,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.4478648794535599, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.1252886418431154"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.1252886418431154""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.1252886418431154
""",
)
entry(
index = 91,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R",
kinetics = ArrheniusBM(A=(10000,'m^3/(mol*s)'), n=-2.10203e-08, w0=(563000,'J/mol'), E0=(21006.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R
Total Standard Deviation in ln(k): 0.0"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R
Total Standard Deviation in ln(k): 0.0""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R
Total Standard Deviation in ln(k): 0.0
""",
)
entry(
index = 92,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_Sp-7R!H#4C",
kinetics = ArrheniusBM(A=(6.03e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_Sp-7R!H#4C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_Sp-7R!H#4C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_Sp-7R!H#4C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_Sp-7R!H#4C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 93,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C",
kinetics = ArrheniusBM(A=(2.96901e+06,'m^3/(mol*s)'), n=-0.196459, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0007247616677251713, var=0.7512272601497324, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C',), comment="""BM rule fitted to 6 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C
Total Standard Deviation in ln(k): 1.7393924065119997"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C
Total Standard Deviation in ln(k): 1.7393924065119997""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C
Total Standard Deviation in ln(k): 1.7393924065119997
""",
)
entry(
index = 94,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H_Ext-4S-R_Ext-7R!H-R",
kinetics = ArrheniusBM(A=(763000,'m^3/(mol*s)'), n=0, w0=(515000,'J/mol'), E0=(51500,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H_Ext-4S-R_Ext-7R!H-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H_Ext-4S-R_Ext-7R!H-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H_Ext-4S-R_Ext-7R!H-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_N-4CHNS->C_N-4HS->H_Ext-4S-R_Ext-7R!H-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 95,
label = "Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R_Ext-4CHNS-R",
kinetics = ArrheniusBM(A=(2.89e+07,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(85952.7,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R_Ext-4CHNS-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R_Ext-4CHNS-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R_Ext-4CHNS-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Sp-5R!H=1R!H_Ext-4CHNS-R_Ext-4CHNS-R_Ext-4CHNS-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 96,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S_Ext-2R!H-R",
kinetics = ArrheniusBM(A=(644,'m^3/(mol*s)'), n=1.19, w0=(515000,'J/mol'), E0=(39525.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S_Ext-2R!H-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S_Ext-2R!H-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S_Ext-2R!H-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_6R!H->S_Ext-2R!H-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 97,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C",
kinetics = ArrheniusBM(A=(779957,'m^3/(mol*s)'), n=0.0248232, w0=(537950,'J/mol'), E0=(22331.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.6732134459096422, var=1.1082236828190586, Tref=1000.0, N=10, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C',), comment="""BM rule fitted to 10 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C
Total Standard Deviation in ln(k): 3.8019198602869193"""),
rank = 11,
shortDesc = """BM rule fitted to 10 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C
Total Standard Deviation in ln(k): 3.8019198602869193""",
longDesc =
"""
BM rule fitted to 10 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C
Total Standard Deviation in ln(k): 3.8019198602869193
""",
)
entry(
index = 98,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_N-4CHNS->C",
kinetics = ArrheniusBM(A=(7.19e-07,'m^3/(mol*s)'), n=3.13, w0=(515000,'J/mol'), E0=(51500,'J/mol'), Tmin=(300,'K'), Tmax=(2000,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_N-4CHNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_N-4CHNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_N-4CHNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_N-4CHNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 99,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_2R!H->C",
kinetics = ArrheniusBM(A=(3.61e+06,'m^3/(mol*s)'), n=0, w0=(549500,'J/mol'), E0=(54950,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 100,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(48483.4,'m^3/(mol*s)'), n=0.959594, w0=(609250,'J/mol'), E0=(60925,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.6761713695632989, var=0.41113237262952895, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C
Total Standard Deviation in ln(k): 2.984351249045606"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C
Total Standard Deviation in ln(k): 2.984351249045606""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C
Total Standard Deviation in ln(k): 2.984351249045606
""",
)
entry(
index = 101,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_2R!H->C",
kinetics = ArrheniusBM(A=(400,'m^3/(mol*s)'), n=1.5, w0=(564000,'J/mol'), E0=(56400,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 102,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(26.4856,'m^3/(mol*s)'), n=1.82402, w0=(573250,'J/mol'), E0=(52116.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.20147811603791788, var=0.27148248712845674, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C
Total Standard Deviation in ln(k): 1.5507732128102802"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C
Total Standard Deviation in ln(k): 1.5507732128102802""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C
Total Standard Deviation in ln(k): 1.5507732128102802
""",
)
entry(
index = 103,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS",
kinetics = ArrheniusBM(A=(7.38112e+07,'m^3/(mol*s)'), n=-3.24554e-09, w0=(655500,'J/mol'), E0=(7107.41,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.06280608228446091, var=6.8952486502763435, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS
Total Standard Deviation in ln(k): 5.421999069670699"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS
Total Standard Deviation in ln(k): 5.421999069670699""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS
Total Standard Deviation in ln(k): 5.421999069670699
""",
)
entry(
index = 104,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS",
kinetics = ArrheniusBM(A=(1.33617e+07,'m^3/(mol*s)'), n=-0.103414, w0=(663500,'J/mol'), E0=(25127,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.04090083298548687, var=1.1654817577770016, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS',), comment="""BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS
Total Standard Deviation in ln(k): 2.2670273905941976"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS
Total Standard Deviation in ln(k): 2.2670273905941976""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS
Total Standard Deviation in ln(k): 2.2670273905941976
""",
)
entry(
index = 105,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R",
kinetics = ArrheniusBM(A=(8.34831e-40,'m^3/(mol*s)'), n=13.7834, w0=(648500,'J/mol'), E0=(-48242.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-2.1311443344629586, var=3.4442370952847847, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R
Total Standard Deviation in ln(k): 9.075152857245785"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R
Total Standard Deviation in ln(k): 9.075152857245785""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R
Total Standard Deviation in ln(k): 9.075152857245785
""",
)
entry(
index = 106,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_4CN->C",
kinetics = ArrheniusBM(A=(1.6,'m^3/(mol*s)'), n=1.87, w0=(638000,'J/mol'), E0=(77381.5,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_4CN->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_4CN->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_4CN->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_4CN->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 107,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C",
kinetics = ArrheniusBM(A=(44.6418,'m^3/(mol*s)'), n=1.56985, w0=(603900,'J/mol'), E0=(22091.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.1610562744436888, var=4.093886631501787, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C',), comment="""BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C
Total Standard Deviation in ln(k): 4.46091569890246"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C
Total Standard Deviation in ln(k): 4.46091569890246""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C
Total Standard Deviation in ln(k): 4.46091569890246
""",
)
entry(
index = 108,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0",
kinetics = ArrheniusBM(A=(41.769,'m^3/(mol*s)'), n=1.71947, w0=(595400,'J/mol'), E0=(23393.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.059443513509909215, var=0.4132725572855109, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0',), comment="""BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0
Total Standard Deviation in ln(k): 1.4381251318640216"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0
Total Standard Deviation in ln(k): 1.4381251318640216""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0
Total Standard Deviation in ln(k): 1.4381251318640216
""",
)
entry(
index = 109,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0",
kinetics = ArrheniusBM(A=(564726,'m^3/(mol*s)'), n=-0.242464, w0=(599667,'J/mol'), E0=(42555.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=2.455757428679977, var=11.56486943123057, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0
Total Standard Deviation in ln(k): 12.987779484083884"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0
Total Standard Deviation in ln(k): 12.987779484083884""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0
Total Standard Deviation in ln(k): 12.987779484083884
""",
)
entry(
index = 110,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(1004.69,'m^3/(mol*s)'), n=1.27744, w0=(576500,'J/mol'), E0=(57650,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=1.2983662469973947, var=5.131928286299004, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 7.803705422168636"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 7.803705422168636""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 7.803705422168636
""",
)
entry(
index = 111,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(556.026,'m^3/(mol*s)'), n=1.27907, w0=(574750,'J/mol'), E0=(57475,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.03980613384668196, var=0.21353467318894948, Tref=1000.0, N=4, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 4 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.0263997235150666"""),
rank = 11,
shortDesc = """BM rule fitted to 4 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.0263997235150666""",
longDesc =
"""
BM rule fitted to 4 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.0263997235150666
""",
)
entry(
index = 112,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(1.2,'m^3/(mol*s)'), n=2, w0=(571000,'J/mol'), E0=(57100,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 113,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(330.615,'m^3/(mol*s)'), n=1.27907, w0=(621500,'J/mol'), E0=(62150,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.5988305691570073, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.5045994199924806"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.5045994199924806""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 1.5045994199924806
""",
)
entry(
index = 114,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_Sp-5R!H#4CCCNNNSSS",
kinetics = ArrheniusBM(A=(3.61e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_Sp-5R!H#4CCCNNNSSS',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_Sp-5R!H#4CCCNNNSSS
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_Sp-5R!H#4CCCNNNSSS
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_Sp-5R!H#4CCCNNNSSS
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 115,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS",
kinetics = ArrheniusBM(A=(369.15,'m^3/(mol*s)'), n=1.26281, w0=(548111,'J/mol'), E0=(46919.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.19390939481463373, var=0.18238905447925002, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS',), comment="""BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS
Total Standard Deviation in ln(k): 1.3433723769367325"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS
Total Standard Deviation in ln(k): 1.3433723769367325""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS
Total Standard Deviation in ln(k): 1.3433723769367325
""",
)
entry(
index = 116,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C",
kinetics = ArrheniusBM(A=(265.595,'m^3/(mol*s)'), n=1.19634, w0=(550667,'J/mol'), E0=(55066.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.03752710350769617, var=2.3527370323157824, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C
Total Standard Deviation in ln(k): 3.1692790336585617"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C
Total Standard Deviation in ln(k): 3.1692790336585617""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C
Total Standard Deviation in ln(k): 3.1692790336585617
""",
)
entry(
index = 117,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C",
kinetics = ArrheniusBM(A=(12.3143,'m^3/(mol*s)'), n=1.5703, w0=(547500,'J/mol'), E0=(78127.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.3006228376348046, var=0.597653593422656, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C',), comment="""BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C
Total Standard Deviation in ln(k): 2.3051555325691324"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C
Total Standard Deviation in ln(k): 2.3051555325691324""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C
Total Standard Deviation in ln(k): 2.3051555325691324
""",
)
entry(
index = 118,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(260.548,'m^3/(mol*s)'), n=1.2433, w0=(537333,'J/mol'), E0=(31366,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.010636409468894204, var=1.4552981397233726, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 2.445151611974016"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 2.445151611974016""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 2.445151611974016
""",
)
entry(
index = 119,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(209.849,'m^3/(mol*s)'), n=1.2433, w0=(591250,'J/mol'), E0=(59125,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.5787018105298811, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.4540246495725655"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.4540246495725655""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.4540246495725655
""",
)
entry(
index = 120,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C",
kinetics = ArrheniusBM(A=(3.01e+07,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 121,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(500,'m^3/(mol*s)'), n=1.5, w0=(590000,'J/mol'), E0=(59000,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 122,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->C",
kinetics = ArrheniusBM(A=(330,'m^3/(mol*s)'), n=1.5, w0=(577500,'J/mol'), E0=(57750,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 123,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(4747.78,'m^3/(mol*s)'), n=1.1268, w0=(573833,'J/mol'), E0=(53979.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.015461369399676534, var=0.5264999263267074, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 1.4934897420245925"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 1.4934897420245925""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 1.4934897420245925
""",
)
entry(
index = 124,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C",
kinetics = ArrheniusBM(A=(170,'m^3/(mol*s)'), n=1.5, w0=(558500,'J/mol'), E0=(55850,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 125,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(170,'m^3/(mol*s)'), n=1.5, w0=(684500,'J/mol'), E0=(68450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 126,
label = "Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R_Ext-8R!H-R",
kinetics = ArrheniusBM(A=(10000,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(20706.2,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R_Ext-8R!H-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R_Ext-8R!H-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R_Ext-8R!H-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_4R->O_Ext-4O-R_Sp-5R!H-1R!H_Ext-5R!H-R_Ext-1R!H-R_Ext-8R!H-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 127,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C",
kinetics = ArrheniusBM(A=(4.90603e+06,'m^3/(mol*s)'), n=-0.235751, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0008697138710928922, var=0.39521319869048593, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C',), comment="""BM rule fitted to 5 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C
Total Standard Deviation in ln(k): 1.2624816505233833"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C
Total Standard Deviation in ln(k): 1.2624816505233833""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C
Total Standard Deviation in ln(k): 1.2624816505233833
""",
)
entry(
index = 128,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_N-7R!H->C",
kinetics = ArrheniusBM(A=(241000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_N-7R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_N-7R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_N-7R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_N-7R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 129,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C",
kinetics = ArrheniusBM(A=(6.03e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 130,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C",
kinetics = ArrheniusBM(A=(772178,'m^3/(mol*s)'), n=0.0249446, w0=(537833,'J/mol'), E0=(23950.2,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.5224495907336478, var=0.6251865196623992, Tref=1000.0, N=9, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C',), comment="""BM rule fitted to 9 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C
Total Standard Deviation in ln(k): 2.8978061230720504"""),
rank = 11,
shortDesc = """BM rule fitted to 9 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C
Total Standard Deviation in ln(k): 2.8978061230720504""",
longDesc =
"""
BM rule fitted to 9 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C
Total Standard Deviation in ln(k): 2.8978061230720504
""",
)
entry(
index = 131,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_2NO->N",
kinetics = ArrheniusBM(A=(720,'m^3/(mol*s)'), n=1.5, w0=(576500,'J/mol'), E0=(57650,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 132,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_N-2NO->N",
kinetics = ArrheniusBM(A=(1.81e+07,'m^3/(mol*s)'), n=0, w0=(642000,'J/mol'), E0=(64200,'J/mol'), Tmin=(300,'K'), Tmax=(1000,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_N-2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_1CN->C_N-2R!H->C_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 133,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_2NO->N",
kinetics = ArrheniusBM(A=(240,'m^3/(mol*s)'), n=1.5, w0=(534500,'J/mol'), E0=(53450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 134,
label = "Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_N-2NO->N",
kinetics = ArrheniusBM(A=(480,'m^3/(mol*s)'), n=1.5, w0=(612000,'J/mol'), E0=(63724.7,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_N-2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_4R->H_Sp-2R!H-1R!H_2R!H-u1_N-1R!H->O_N-1CN->C_N-2R!H->C_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 135,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_5R!H->C",
kinetics = ArrheniusBM(A=(3.01e+07,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_5R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 136,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_N-5R!H->C",
kinetics = ArrheniusBM(A=(1.81e+08,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(56853,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_N-5R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_Sp-5R!H=4CCNNOOSS_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 137,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R",
kinetics = ArrheniusBM(A=(2.85561e+07,'m^3/(mol*s)'), n=-0.375, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-4.305460834090209e-17, var=0.06912283083491383, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R
Total Standard Deviation in ln(k): 0.5270693322083513"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R
Total Standard Deviation in ln(k): 0.5270693322083513""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R
Total Standard Deviation in ln(k): 0.5270693322083513
""",
)
entry(
index = 138,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-5R!H-R",
kinetics = ArrheniusBM(A=(1.81e+07,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(56999.2,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-5R!H-R',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-5R!H-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-5R!H-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-5R!H-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 139,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C",
kinetics = ArrheniusBM(A=(5.1898e+06,'m^3/(mol*s)'), n=-3.43792e-07, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.4852771291652849e-07, var=1.4637540075814492, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C
Total Standard Deviation in ln(k): 2.4254431787653163"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C
Total Standard Deviation in ln(k): 2.4254431787653163""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C
Total Standard Deviation in ln(k): 2.4254431787653163
""",
)
entry(
index = 140,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C",
kinetics = ArrheniusBM(A=(2.63176e+08,'m^3/(mol*s)'), n=-0.401293, w0=(679500,'J/mol'), E0=(37186.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.1741100101612896, var=4.186068487337028, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C
Total Standard Deviation in ln(k): 7.051689842241445"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C
Total Standard Deviation in ln(k): 7.051689842241445""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C
Total Standard Deviation in ln(k): 7.051689842241445
""",
)
entry(
index = 141,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_2NOS->N",
kinetics = ArrheniusBM(A=(0.0294,'m^3/(mol*s)'), n=2.69, w0=(662000,'J/mol'), E0=(33464.5,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_2NOS->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 142,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_N-2NOS->N",
kinetics = ArrheniusBM(A=(0.029,'m^3/(mol*s)'), n=2.69, w0=(635000,'J/mol'), E0=(6270.39,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_N-2NOS->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_N-2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_N-2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_4CNOS->O_Ext-4O-R_N-2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 143,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R",
kinetics = ArrheniusBM(A=(1.76809e+09,'m^3/(mol*s)'), n=-0.613668, w0=(598500,'J/mol'), E0=(43328.7,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.47302926670052275, var=7.002677954598716, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R
Total Standard Deviation in ln(k): 6.493560675869729"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R
Total Standard Deviation in ln(k): 6.493560675869729""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R
Total Standard Deviation in ln(k): 6.493560675869729
""",
)
entry(
index = 144,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_2NOS->N",
kinetics = ArrheniusBM(A=(1.8,'m^3/(mol*s)'), n=1.94, w0=(625500,'J/mol'), E0=(51859.9,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_2NOS->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 145,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_N-2NOS->N",
kinetics = ArrheniusBM(A=(0.92,'m^3/(mol*s)'), n=1.94, w0=(598500,'J/mol'), E0=(35268.2,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_N-2NOS->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_N-2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_N-2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_N-2NOS->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 146,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(55.3682,'m^3/(mol*s)'), n=1.72068, w0=(573833,'J/mol'), E0=(26330,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.16555948575625073, var=0.14398858117357702, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.1766919183137672"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.1766919183137672""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 1.1766919183137672
""",
)
entry(
index = 147,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(27.368,'m^3/(mol*s)'), n=1.71766, w0=(627750,'J/mol'), E0=(62775,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.8809875394276374, var=0.011502560091510204, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 2.4285443457657907"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 2.4285443457657907""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 2.4285443457657907
""",
)
entry(
index = 148,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(7.23e+06,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(92345.7,'J/mol'), Tmin=(700,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_Sp-2R!H-1CNS',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 149,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS",
kinetics = ArrheniusBM(A=(587946,'m^3/(mol*s)'), n=-0.248363, w0=(618000,'J/mol'), E0=(21389.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=1.394125347182226, var=15.830042173788614, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 11.479064056591858"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 11.479064056591858""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS
Total Standard Deviation in ln(k): 11.479064056591858
""",
)
entry(
index = 150,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C",
kinetics = ArrheniusBM(A=(2.41e+07,'m^3/(mol*s)'), n=0, w0=(563000,'J/mol'), E0=(56300,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 151,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(3.6,'m^3/(mol*s)'), n=2, w0=(590000,'J/mol'), E0=(59000,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 152,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N",
kinetics = ArrheniusBM(A=(467.561,'m^3/(mol*s)'), n=1.27907, w0=(548000,'J/mol'), E0=(54800,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.5988305691570074, var=0.9609060278364029, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N
Total Standard Deviation in ln(k): 3.469757305105129"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N
Total Standard Deviation in ln(k): 3.469757305105129""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N
Total Standard Deviation in ln(k): 3.469757305105129
""",
)
entry(
index = 153,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N",
kinetics = ArrheniusBM(A=(661.23,'m^3/(mol*s)'), n=1.27907, w0=(601500,'J/mol'), E0=(60150,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.5988305691570073, var=0.0, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N
Total Standard Deviation in ln(k): 1.5045994199924806"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N
Total Standard Deviation in ln(k): 1.5045994199924806""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N
Total Standard Deviation in ln(k): 1.5045994199924806
""",
)
entry(
index = 154,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C",
kinetics = ArrheniusBM(A=(1.2,'m^3/(mol*s)'), n=2, w0=(558500,'J/mol'), E0=(55850,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 155,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(1.2,'m^3/(mol*s)'), n=2, w0=(684500,'J/mol'), E0=(68450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_N-Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 156,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_2R!H->S",
kinetics = ArrheniusBM(A=(979000,'m^3/(mol*s)'), n=0, w0=(537500,'J/mol'), E0=(38210.6,'J/mol'), Tmin=(298,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_2R!H->S',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_2R!H->S
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_2R!H->S
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_2R!H->S
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 157,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S",
kinetics = ArrheniusBM(A=(363.33,'m^3/(mol*s)'), n=1.26517, w0=(549438,'J/mol'), E0=(41379.5,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.1533575328403071, var=0.10389045665815419, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S',), comment="""BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S
Total Standard Deviation in ln(k): 1.031487497319548"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S
Total Standard Deviation in ln(k): 1.031487497319548""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S
Total Standard Deviation in ln(k): 1.031487497319548
""",
)
entry(
index = 158,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C",
kinetics = ArrheniusBM(A=(460.84,'m^3/(mol*s)'), n=1.19515, w0=(552500,'J/mol'), E0=(55250,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.1546561267678964, var=0.37853885495848577, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C
Total Standard Deviation in ln(k): 1.6220067411055652"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C
Total Standard Deviation in ln(k): 1.6220067411055652""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C
Total Standard Deviation in ln(k): 1.6220067411055652
""",
)
entry(
index = 159,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_N-Sp-2R!H-1C",
kinetics = ArrheniusBM(A=(0.81,'m^3/(mol*s)'), n=1.87, w0=(547000,'J/mol'), E0=(54700,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_N-Sp-2R!H-1C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_N-Sp-2R!H-1C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_N-Sp-2R!H-1C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_N-Sp-2R!H-1C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 160,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C",
kinetics = ArrheniusBM(A=(69.6524,'m^3/(mol*s)'), n=1.34293, w0=(544000,'J/mol'), E0=(54400,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.43780050013999694, var=0.5999111817527254, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 2.6527474307051118"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 2.6527474307051118""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C
Total Standard Deviation in ln(k): 2.6527474307051118
""",
)
entry(
index = 161,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C",
kinetics = ArrheniusBM(A=(0.725143,'m^3/(mol*s)'), n=1.93933, w0=(549833,'J/mol'), E0=(76446.4,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.008417366290780803, var=1.809976781745819, Tref=1000.0, N=3, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C',), comment="""BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 2.718227067151954"""),
rank = 11,
shortDesc = """BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 2.718227067151954""",
longDesc =
"""
BM rule fitted to 3 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C
Total Standard Deviation in ln(k): 2.718227067151954
""",
)
entry(
index = 162,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N",
kinetics = ArrheniusBM(A=(207.556,'m^3/(mol*s)'), n=1.2433, w0=(511500,'J/mol'), E0=(51150,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.5787018105298811, var=3.7227133182354435, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N
Total Standard Deviation in ln(k): 5.322027504076752"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N
Total Standard Deviation in ln(k): 5.322027504076752""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N
Total Standard Deviation in ln(k): 5.322027504076752
""",
)
entry(
index = 163,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_N-2R!H->N",
kinetics = ArrheniusBM(A=(1.8,'m^3/(mol*s)'), n=1.94, w0=(589000,'J/mol'), E0=(45404.8,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_N-2R!H->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_N-2R!H->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_N-2R!H->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_N-2R!H->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 164,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(0.92,'m^3/(mol*s)'), n=1.94, w0=(534500,'J/mol'), E0=(53450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 165,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(0.92,'m^3/(mol*s)'), n=1.94, w0=(648000,'J/mol'), E0=(64800,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 166,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1",
kinetics = ArrheniusBM(A=(400.418,'m^3/(mol*s)'), n=1.43269, w0=(586750,'J/mol'), E0=(45173.2,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.23284945485790864, var=0.6476388655306268, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1
Total Standard Deviation in ln(k): 2.1983797414238784"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1
Total Standard Deviation in ln(k): 2.1983797414238784""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1
Total Standard Deviation in ln(k): 2.1983797414238784
""",
)
entry(
index = 167,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_N-2NO-u1",
kinetics = ArrheniusBM(A=(330,'m^3/(mol*s)'), n=1.5, w0=(548000,'J/mol'), E0=(54800,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_N-2NO-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_N-2NO-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_N-2NO-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_N-2NO-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 168,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R",
kinetics = ArrheniusBM(A=(5.25814e+07,'m^3/(mol*s)'), n=-0.55, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=3.5036392791388526, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R
Total Standard Deviation in ln(k): 3.7524652808647927"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R
Total Standard Deviation in ln(k): 3.7524652808647927""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R
Total Standard Deviation in ln(k): 3.7524652808647927
""",
)
entry(
index = 169,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-7C-R",
kinetics = ArrheniusBM(A=(783000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-7C-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-7C-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-7C-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-7C-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 170,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Sp-7C-4C",
kinetics = ArrheniusBM(A=(843000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Sp-7C-4C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Sp-7C-4C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Sp-7C-4C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Sp-7C-4C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 171,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_N-Sp-7C-4C",
kinetics = ArrheniusBM(A=(843000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_N-Sp-7C-4C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_N-Sp-7C-4C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_N-Sp-7C-4C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_N-Sp-7C-4C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 172,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C",
kinetics = ArrheniusBM(A=(773968,'m^3/(mol*s)'), n=0.0250683, w0=(537688,'J/mol'), E0=(22302.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.6712802518990716, var=0.8880216259988452, Tref=1000.0, N=8, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C',), comment="""BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C
Total Standard Deviation in ln(k): 3.5757938816348322"""),
rank = 11,
shortDesc = """BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C
Total Standard Deviation in ln(k): 3.5757938816348322""",
longDesc =
"""
BM rule fitted to 8 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C
Total Standard Deviation in ln(k): 3.5757938816348322
""",
)
entry(
index = 173,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_N-6BrCClFINOPSi->C",
kinetics = ArrheniusBM(A=(482000,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_N-6BrCClFINOPSi->C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_N-6BrCClFINOPSi->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_N-6BrCClFINOPSi->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_N-6BrCClFINOPSi->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 174,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R_Ext-4CNOS-R",
kinetics = ArrheniusBM(A=(3.47e+08,'m^3/(mol*s)'), n=-0.75, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R_Ext-4CNOS-R',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R_Ext-4CNOS-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R_Ext-4CNOS-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_Ext-4CNOS-R_Ext-4CNOS-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 175,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C",
kinetics = ArrheniusBM(A=(3.40826e+06,'m^3/(mol*s)'), n=-1.95659e-07, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=0.9609060278364027, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C
Total Standard Deviation in ln(k): 1.9651578851126479"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C
Total Standard Deviation in ln(k): 1.9651578851126479""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C
Total Standard Deviation in ln(k): 1.9651578851126479
""",
)
entry(
index = 176,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_N-Sp-5R!H-4C",
kinetics = ArrheniusBM(A=(1.20333e+07,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_N-Sp-5R!H-4C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_N-Sp-5R!H-4C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_N-Sp-5R!H-4C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_N-Sp-5R!H-4C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 177,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0",
kinetics = ArrheniusBM(A=(2.78282e+08,'m^3/(mol*s)'), n=-0.359057, w0=(679500,'J/mol'), E0=(75512.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.12167865343175763, var=0.6437215165799257, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0
Total Standard Deviation in ln(k): 1.914169472146607"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0
Total Standard Deviation in ln(k): 1.914169472146607""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0
Total Standard Deviation in ln(k): 1.914169472146607
""",
)
entry(
index = 178,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_N-5R!H-u0",
kinetics = ArrheniusBM(A=(5.7209e+06,'m^3/(mol*s)'), n=0, w0=(679500,'J/mol'), E0=(20296,'J/mol'), Tmin=(298,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_N-5R!H-u0',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_N-5R!H-u0
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_N-5R!H-u0
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_N-5R!H-u0
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 179,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_5R!H->N",
kinetics = ArrheniusBM(A=(0.92,'m^3/(mol*s)'), n=1.94, w0=(598500,'J/mol'), E0=(24711.5,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_5R!H->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_5R!H->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_5R!H->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_5R!H->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 180,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N",
kinetics = ArrheniusBM(A=(39.3893,'m^3/(mol*s)'), n=1.71766, w0=(598500,'J/mol'), E0=(7731.75,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.8611054555760191, var=1.2141850405896415, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N
Total Standard Deviation in ln(k): 4.372600429820919"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N
Total Standard Deviation in ln(k): 4.372600429820919""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N
Total Standard Deviation in ln(k): 4.372600429820919
""",
)
entry(
index = 181,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N",
kinetics = ArrheniusBM(A=(55.3682,'m^3/(mol*s)'), n=1.72068, w0=(548000,'J/mol'), E0=(41967.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.0426036023134155, var=0.11276809873671895, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N
Total Standard Deviation in ln(k): 3.2928163591177713"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N
Total Standard Deviation in ln(k): 3.2928163591177713""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N
Total Standard Deviation in ln(k): 3.2928163591177713
""",
)
entry(
index = 182,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_N-2R!H->N",
kinetics = ArrheniusBM(A=(0.029,'m^3/(mol*s)'), n=2.69, w0=(625500,'J/mol'), E0=(26705.8,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_N-2R!H->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_N-2R!H->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_N-2R!H->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_N-2R!H->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 183,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(0.014,'m^3/(mol*s)'), n=2.69, w0=(571000,'J/mol'), E0=(57100,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 184,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(0.014,'m^3/(mol*s)'), n=2.69, w0=(684500,'J/mol'), E0=(68450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 185,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C",
kinetics = ArrheniusBM(A=(2.6e+09,'m^3/(mol*s)'), n=-1.26, w0=(551500,'J/mol'), E0=(34913.7,'J/mol'), Tmin=(300,'K'), Tmax=(2000,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 186,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C",
kinetics = ArrheniusBM(A=(1.2e+06,'m^3/(mol*s)'), n=-0.34, w0=(684500,'J/mol'), E0=(61174.3,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_N-5R!H-u0_N-Sp-2R!H-1CNS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 187,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_2N-u1",
kinetics = ArrheniusBM(A=(1.2,'m^3/(mol*s)'), n=2, w0=(548000,'J/mol'), E0=(54800,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_2N-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 188,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_N-2N-u1",
kinetics = ArrheniusBM(A=(2.4,'m^3/(mol*s)'), n=2, w0=(548000,'J/mol'), E0=(54800,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_N-2N-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 189,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_2CO->C",
kinetics = ArrheniusBM(A=(2.4,'m^3/(mol*s)'), n=2, w0=(577500,'J/mol'), E0=(57750,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_2CO->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_2CO->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_2CO->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_2CO->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 190,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_N-2CO->C",
kinetics = ArrheniusBM(A=(2.4,'m^3/(mol*s)'), n=2, w0=(625500,'J/mol'), E0=(55947.8,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_N-2CO->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_N-2CO->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_N-2CO->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->N_N-2CO->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 191,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O",
kinetics = ArrheniusBM(A=(334.46,'m^3/(mol*s)'), n=1.27744, w0=(593500,'J/mol'), E0=(32251.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.2786278929612721, var=0.671686074395935, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O
Total Standard Deviation in ln(k): 2.3430799122511208"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O
Total Standard Deviation in ln(k): 2.3430799122511208""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O
Total Standard Deviation in ln(k): 2.3430799122511208
""",
)
entry(
index = 192,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O",
kinetics = ArrheniusBM(A=(1.47985e+07,'m^3/(mol*s)'), n=-0.311932, w0=(534750,'J/mol'), E0=(37491,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.14761419944759951, var=0.2702865095049491, Tref=1000.0, N=6, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O',), comment="""BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O
Total Standard Deviation in ln(k): 1.4131333979764096"""),
rank = 11,
shortDesc = """BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O
Total Standard Deviation in ln(k): 1.4131333979764096""",
longDesc =
"""
BM rule fitted to 6 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O
Total Standard Deviation in ln(k): 1.4131333979764096
""",
)
entry(
index = 193,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_2R!H->C",
kinetics = ArrheniusBM(A=(2.19e+08,'m^3/(mol*s)'), n=-0.68, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 194,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_N-2R!H->C",
kinetics = ArrheniusBM(A=(2.4,'m^3/(mol*s)'), n=1.87, w0=(566000,'J/mol'), E0=(56600,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_N-2R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_1CNS->C_Sp-2R!H-1C_N-2R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 195,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_Sp-2C-1N",
kinetics = ArrheniusBM(A=(1.6,'m^3/(mol*s)'), n=1.87, w0=(553500,'J/mol'), E0=(55350,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_Sp-2C-1N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_Sp-2C-1N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_Sp-2C-1N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_Sp-2C-1N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 196,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_N-Sp-2C-1N",
kinetics = ArrheniusBM(A=(0.82,'m^3/(mol*s)'), n=1.87, w0=(534500,'J/mol'), E0=(53450,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_N-Sp-2C-1N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_N-Sp-2C-1N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_N-Sp-2C-1N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_2R!H->C_N-Sp-2C-1N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 197,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1",
kinetics = ArrheniusBM(A=(1.9471e-05,'m^3/(mol*s)'), n=3.27783, w0=(562750,'J/mol'), E0=(48062.1,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.8568478925825855, var=0.083327900484, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1
Total Standard Deviation in ln(k): 2.73158245570468"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1
Total Standard Deviation in ln(k): 2.73158245570468""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1
Total Standard Deviation in ln(k): 2.73158245570468
""",
)
entry(
index = 198,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_N-2NO-u1",
kinetics = ArrheniusBM(A=(1.6,'m^3/(mol*s)'), n=1.87, w0=(524000,'J/mol'), E0=(52400,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_N-2NO-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_N-2NO-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_N-2NO-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_N-2NO-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 199,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_2N-u1",
kinetics = ArrheniusBM(A=(0.46,'m^3/(mol*s)'), n=1.94, w0=(511500,'J/mol'), E0=(51150,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_2N-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 200,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_N-2N-u1",
kinetics = ArrheniusBM(A=(1.8,'m^3/(mol*s)'), n=1.94, w0=(511500,'J/mol'), E0=(51150,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_N-2N-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_N-4CNS->C_Sp-2R!H-1CNS_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 201,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N",
kinetics = ArrheniusBM(A=(170,'m^3/(mol*s)'), n=1.5, w0=(548000,'J/mol'), E0=(54800,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 202,
label = "Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N",
kinetics = ArrheniusBM(A=(330,'m^3/(mol*s)'), n=1.5, w0=(625500,'J/mol'), E0=(52128.2,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_N-4CNOS-u1_N-1R!H->O_Sp-2R!H-1CNS_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 203,
label = "Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R_Ext-4C-R",
kinetics = ArrheniusBM(A=(1.08e+08,'m^3/(mol*s)'), n=-0.75, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R_Ext-4C-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R_Ext-4C-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R_Ext-4C-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_Ext-1R!H-R_4CHNS->C_Ext-4C-R_N-Sp-7R!H#4C_7R!H->C_Ext-4C-R_Ext-4C-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 204,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_1R!H-inRing",
kinetics = ArrheniusBM(A=(0.000675,'m^3/(mol*s)'), n=2.7, w0=(483500,'J/mol'), E0=(35785.7,'J/mol'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_1R!H-inRing',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_1R!H-inRing
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_1R!H-inRing
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_1R!H-inRing
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 205,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing",
kinetics = ArrheniusBM(A=(6.31109e+06,'m^3/(mol*s)'), n=-0.199393, w0=(545429,'J/mol'), E0=(32200.5,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.1700346401571909, var=0.5832966950308113, Tref=1000.0, N=7, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing',), comment="""BM rule fitted to 7 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing
Total Standard Deviation in ln(k): 1.9583163355851787"""),
rank = 11,
shortDesc = """BM rule fitted to 7 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing
Total Standard Deviation in ln(k): 1.9583163355851787""",
longDesc =
"""
BM rule fitted to 7 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing
Total Standard Deviation in ln(k): 1.9583163355851787
""",
)
entry(
index = 206,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_5R!H->C",
kinetics = ArrheniusBM(A=(2.41e+06,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_5R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 207,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_N-5R!H->C",
kinetics = ArrheniusBM(A=(4.82e+06,'m^3/(mol*s)'), n=0, w0=(655500,'J/mol'), E0=(65550,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_N-5R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_4CNOS->C_Sp-5R!H-4C_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 208,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_5R!H->C",
kinetics = ArrheniusBM(A=(2.41e+07,'m^3/(mol*s)'), n=0, w0=(679500,'J/mol'), E0=(67950,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_5R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 209,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_N-5R!H->C",
kinetics = ArrheniusBM(A=(1.21e+07,'m^3/(mol*s)'), n=0, w0=(679500,'J/mol'), E0=(55695.5,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_N-5R!H->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_2R!H->C_Ext-4CNOS-R_N-Sp-5R!H=4CCNNOOSS_N-4CNOS->C_5R!H-u0_N-5R!H->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 210,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_5BrCClFIOPSSi->C",
kinetics = ArrheniusBM(A=(0.014,'m^3/(mol*s)'), n=2.69, w0=(598500,'J/mol'), E0=(17330.2,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_5BrCClFIOPSSi->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_5BrCClFIOPSSi->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_5BrCClFIOPSSi->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_5BrCClFIOPSSi->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 211,
label = "Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_N-5BrCClFIOPSSi->C",
kinetics = ArrheniusBM(A=(0.029,'m^3/(mol*s)'), n=2.69, w0=(598500,'J/mol'), E0=(13374.2,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_N-5BrCClFIOPSSi->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_N-5BrCClFIOPSSi->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_N-5BrCClFIOPSSi->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_1R!H->O_N-2R!H->C_N-4CNOS->O_N-4CN->C_Ext-4N-R_N-5R!H->N_N-5BrCClFIOPSSi->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 212,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_2N-u1",
kinetics = ArrheniusBM(A=(0.029,'m^3/(mol*s)'), n=2.69, w0=(548000,'J/mol'), E0=(39565.9,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_2N-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_2N-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 213,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_N-2N-u1",
kinetics = ArrheniusBM(A=(0.029,'m^3/(mol*s)'), n=2.69, w0=(548000,'J/mol'), E0=(54800,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_N-2N-u1',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_4CNOS->O_Ext-4O-R_5R!H-u0_Sp-2R!H-1CNS_2R!H->N_N-2N-u1
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 214,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_1CNS->C",
kinetics = ArrheniusBM(A=(2.89e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 215,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_N-1CNS->C",
kinetics = ArrheniusBM(A=(1.2,'m^3/(mol*s)'), n=2, w0=(648000,'J/mol'), E0=(54170.7,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_N-1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_5R!H->O_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 216,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS",
kinetics = ArrheniusBM(A=(1.05319e+07,'m^3/(mol*s)'), n=-0.250519, w0=(533900,'J/mol'), E0=(39536.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.1636050951277374, var=0.25119831907169365, Tref=1000.0, N=5, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS',), comment="""BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS
Total Standard Deviation in ln(k): 1.4158350573264697"""),
rank = 11,
shortDesc = """BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS
Total Standard Deviation in ln(k): 1.4158350573264697""",
longDesc =
"""
BM rule fitted to 5 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS
Total Standard Deviation in ln(k): 1.4158350573264697
""",
)
entry(
index = 217,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_N-Sp-5CS-4CCNSS",
kinetics = ArrheniusBM(A=(1.52e+08,'m^3/(mol*s)'), n=-0.7, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_N-Sp-5CS-4CCNSS',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_N-Sp-5CS-4CCNSS
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_N-Sp-5CS-4CCNSS
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_N-Sp-5CS-4CCNSS
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 218,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N",
kinetics = ArrheniusBM(A=(0.82,'m^3/(mol*s)'), n=1.87, w0=(524000,'J/mol'), E0=(52400,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 219,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N",
kinetics = ArrheniusBM(A=(1.6,'m^3/(mol*s)'), n=1.87, w0=(601500,'J/mol'), E0=(75336.7,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_4CNS->C_N-1CNS->C_N-2R!H->C_2NO-u1_N-2NO->N
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 220,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R",
kinetics = ArrheniusBM(A=(2.36058e+07,'m^3/(mol*s)'), n=-0.372184, w0=(550250,'J/mol'), E0=(35580.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.25457934710988045, var=1.6100541888330673, Tref=1000.0, N=4, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R',), comment="""BM rule fitted to 4 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R
Total Standard Deviation in ln(k): 3.1834130560690546"""),
rank = 11,
shortDesc = """BM rule fitted to 4 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R
Total Standard Deviation in ln(k): 3.1834130560690546""",
longDesc =
"""
BM rule fitted to 4 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R
Total Standard Deviation in ln(k): 3.1834130560690546
""",
)
entry(
index = 221,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C",
kinetics = ArrheniusBM(A=(1.97085e+06,'m^3/(mol*s)'), n=-0.0393785, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.0327092327690802, var=0.00213978781668374, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C
Total Standard Deviation in ln(k): 0.1749187175813773"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C
Total Standard Deviation in ln(k): 0.1749187175813773""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C
Total Standard Deviation in ln(k): 0.1749187175813773
""",
)
entry(
index = 222,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_N-Sp-6C-4C",
kinetics = ArrheniusBM(A=(1.21e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_N-Sp-6C-4C',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_N-Sp-6C-4C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_N-Sp-6C-4C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_N-Sp-6C-4C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 223,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R",
kinetics = ArrheniusBM(A=(7.76827e+08,'m^3/(mol*s)'), n=-0.9, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-1.0763652085225523e-17, var=0.04891149884417046, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R
Total Standard Deviation in ln(k): 0.4433660913390881"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R
Total Standard Deviation in ln(k): 0.4433660913390881""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R
Total Standard Deviation in ln(k): 0.4433660913390881
""",
)
entry(
index = 224,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C",
kinetics = ArrheniusBM(A=(3.11937e+07,'m^3/(mol*s)'), n=-0.389378, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.032709232769080235, var=0.0016076635746038646, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C',), comment="""BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C
Total Standard Deviation in ln(k): 0.16256521857885725"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C
Total Standard Deviation in ln(k): 0.16256521857885725""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C
Total Standard Deviation in ln(k): 0.16256521857885725
""",
)
entry(
index = 225,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_N-1CNS->C",
kinetics = ArrheniusBM(A=(644,'m^3/(mol*s)'), n=1.19, w0=(513500,'J/mol'), E0=(42507.8,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_N-1CNS->C',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_N-1CNS->C
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 226,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C",
kinetics = ArrheniusBM(A=(1.05265e+08,'m^3/(mol*s)'), n=-0.55, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=3.513977146582821, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C
Total Standard Deviation in ln(k): 3.75799723098171"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C
Total Standard Deviation in ln(k): 3.75799723098171""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C
Total Standard Deviation in ln(k): 3.75799723098171
""",
)
entry(
index = 227,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C",
kinetics = ArrheniusBM(A=(1.9053e+06,'m^3/(mol*s)'), n=-0.0575531, w0=(561500,'J/mol'), E0=(16527.3,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=-0.2144940523654643, var=1.5169644222011907, Tref=1000.0, N=2, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C
Total Standard Deviation in ln(k): 3.008063935012343"""),
rank = 11,
shortDesc = """BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C
Total Standard Deviation in ln(k): 3.008063935012343""",
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C
Total Standard Deviation in ln(k): 3.008063935012343
""",
)
entry(
index = 228,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C_Ext-6C-R",
kinetics = ArrheniusBM(A=(1.45e+06,'m^3/(mol*s)'), n=0, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C_Ext-6C-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C_Ext-6C-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C_Ext-6C-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Sp-6C-4C_Ext-6C-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 229,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R_Ext-4CNS-R",
kinetics = ArrheniusBM(A=(2.86e+09,'m^3/(mol*s)'), n=-1.1, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R_Ext-4CNS-R',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R_Ext-4CNS-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R_Ext-4CNS-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_Ext-4CNS-R_Ext-4CNS-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 230,
label = "Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C_Ext-5CS-R",
kinetics = ArrheniusBM(A=(2.29e+07,'m^3/(mol*s)'), n=-0.35, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C_Ext-5CS-R',), comment="""BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C_Ext-5CS-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C_Ext-5CS-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_N-4R->H_4CNOS-u1_N-1R!H->O_N-4CNOS->O_Ext-4CNS-R_N-Sp-5R!H#4CCCNNNSSS_N-2R!H->S_N-5R!H->O_Sp-5CS-4CCNSS_1CNS->C_Ext-5CS-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 231,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C_Ext-4C-R",
kinetics = ArrheniusBM(A=(2.16e+08,'m^3/(mol*s)'), n=-0.75, w0=(539000,'J/mol'), E0=(53900,'J/mol'), Tmin=(300,'K'), Tmax=(2500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C_Ext-4C-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C_Ext-4C-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C_Ext-4C-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_2R!H->C_Ext-4C-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
entry(
index = 232,
label = "Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C_Ext-7R!H-R_Ext-6C-R",
kinetics = ArrheniusBM(A=(1.94e+06,'m^3/(mol*s)'), n=0, w0=(561500,'J/mol'), E0=(36677.6,'J/mol'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.13686319048999, Tref=1000.0, N=1, data_mean=0.0, correlation='Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C_Ext-7R!H-R_Ext-6C-R',), comment="""BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C_Ext-7R!H-R_Ext-6C-R
Total Standard Deviation in ln(k): 11.540182761524994"""),
rank = 11,
shortDesc = """BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C_Ext-7R!H-R_Ext-6C-R
Total Standard Deviation in ln(k): 11.540182761524994""",
longDesc =
"""
BM rule fitted to 1 training reactions at node Root_Ext-1R!H-R_N-4R->O_N-Sp-5R!H=1R!H_Ext-4CHNS-R_N-6R!H->S_4CHNS->C_N-Sp-6BrBrBrCCCClClClFFFIIINNNOOOPPPSiSiSi#4C_6BrCClFINOPSi->C_N-1R!H-inRing_Ext-4C-R_N-2R!H->C_Ext-7R!H-R_Ext-6C-R
Total Standard Deviation in ln(k): 11.540182761524994
""",
)
|
import argparse
import os
import time
import googleapiclient.discovery
credential_path = "./MyProject.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
def delete_instance(compute, project_id, zone, name):
return compute.instances().delete(
project=project_id,
zone=zone,
instance=name).execute()
def wait_for_operation(compute, project, zone, operation):
print('Waiting for delete operation to finish...')
while True:
result = compute.zoneOperations().get(
project=project,
zone=zone,
operation=operation).execute()
if result['status'] == 'DONE':
print('Deletion has done. ')
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
def main(project, zone, instance_name):
compute = googleapiclient.discovery.build('compute', 'v1')
print('Deleting instance.')
operation = delete_instance(compute, project, zone, instance_name)
wait_for_operation(compute, project, zone, operation['name'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='Google Cloud Project ID')
parser.add_argument('--zone', default='us-central1-a', help='Zone of the instance to delete')
parser.add_argument('--name', default='instance-1', help='Instance name')
args = parser.parse_args()
main(args.project_id, args.zone, args.name)
|
from django.utils.translation import ugettext_lazy as _
CODE_TEXT_EDIT = 'EDIT'
TEXT_MSG_SUCCESS = _('Data saved correctly')
TEXT_MSG_ERROR = _('There was an error saving information')
TEXT_MSG_WARNING = _('It required to complete required fields')
STATUS_MSG_TAGS = {
'success': TEXT_MSG_SUCCESS,
'error': TEXT_MSG_ERROR,
'warning': TEXT_MSG_WARNING,
}
NAME_SELECT_DEFAULT = _('--Choose--')
SELECT_DEFAULT_ = [('', NAME_SELECT_DEFAULT)]
SELECT_DEFAULT = (('', NAME_SELECT_DEFAULT),)
DISABLED = 'DISABLED'
ENABLED = 'ENABLED'
STATUS_MODEL1 = (
(ENABLED, _('Enabled')),
(DISABLED, _('Disabled')),
)
# ITEMS IDENTITY DOCUMENT
CODE_DOCUMENT_DNI = 'DOC-1'
CODE_DOCUMENT_RUC = 'DOC-2'
CODE_DOCUMENT_PASSPORT = 'DOC-3'
DOCUMENT_DNI_STRING = (CODE_DOCUMENT_DNI, _('DNI'))
DOCUMENT_RUC_STRING = (CODE_DOCUMENT_RUC, _('RUC'))
DOCUMENT_PASSPORT_STRING = (CODE_DOCUMENT_PASSPORT, _('PASSPORT'))
TYPE_IDENTITY_DOCUMENT_OPTIONS = (
DOCUMENT_DNI_STRING, DOCUMENT_RUC_STRING, DOCUMENT_PASSPORT_STRING,
)
# ITEMS TRIBUTE PERSON
CODE_TRIBUTE_PERSON_NATURAL = 'PERS-1'
CODE_TRIBUTE_PERSON_JURIDICAL = 'PERS-2'
TRIBUTE_PERSON_NATURAL_STRING = (CODE_TRIBUTE_PERSON_NATURAL, _('Natural'))
TRIBUTE_PERSON_JURIDICAL_STRING = (CODE_TRIBUTE_PERSON_JURIDICAL, _('Legal'))
TRIBUTE_PERSON_OPTIONS = (
TRIBUTE_PERSON_NATURAL_STRING, TRIBUTE_PERSON_JURIDICAL_STRING,
)
# ITEMS GENDER
CODE_GENDER_MALE = 'GEN-1'
CODE_GENDER_FEMALE = 'GEN-2'
GENDER_MALE_STRING = (CODE_GENDER_MALE, _('Male'))
GENDER_FEMALE_STRING = (CODE_GENDER_FEMALE, _('Female'))
TYPE_GENDER_OPTIONS = (
GENDER_MALE_STRING, GENDER_FEMALE_STRING,
)
# ITEMS CIVIL STATUS
CODE_CIVIL_STATUS_SINGLE = 'CVS-1'
CODE_CIVIL_STATUS_MARRIED = 'CVS-2'
CODE_CIVIL_STATUS_WIDOWED = 'CVS-3'
CODE_CIVIL_STATUS_DIVORCED = 'CVS-4'
CODE_CIVIL_STATUS_SEPARATED = 'CVS-5'
CIVIL_STATUS_SINGLE_STRING = (CODE_CIVIL_STATUS_SINGLE, _('Single'))
CIVIL_STATUS_MARRIED_STRING = (CODE_CIVIL_STATUS_MARRIED, _('Married'))
CIVIL_STATUS_WIDOWED_STRING = (CODE_CIVIL_STATUS_WIDOWED, _('Widowed'))
CIVIL_STATUS_DIVORCED_STRING = (CODE_CIVIL_STATUS_DIVORCED, _('Divorced'))
CIVIL_STATUS_SEPARATED_STRING = (CODE_CIVIL_STATUS_SEPARATED, _('Separated'))
TYPE_CIVIL_STATUS_OPTIONS = (
CIVIL_STATUS_SINGLE_STRING, CIVIL_STATUS_MARRIED_STRING,
CIVIL_STATUS_WIDOWED_STRING, CIVIL_STATUS_DIVORCED_STRING,
CIVIL_STATUS_SEPARATED_STRING,
)
# ITEMS PLAN COMMERCIAL
CODE_PLAN_ONE = 'PLN-1'
CODE_PLAN_TWO = 'PLN-2'
CODE_PLAN_THREE = 'PLN-3'
CODE_PLAN_FOUR = 'PLN-4'
CODE_PLAN_FIVE = 'PLN-5'
PLAN_ONE_STRING = (CODE_PLAN_ONE, _('Free'))
PLAN_TWO_STRING = (CODE_PLAN_TWO, _('Basic'))
PLAN_THREE_STRING = (CODE_PLAN_THREE, _('Bronze'))
PLAN_FOUR_STRING = (CODE_PLAN_FOUR, _('Silver'))
PLAN_FIVE_STRING = (CODE_PLAN_FIVE, _('Gold'))
TYPE_PLAN_OPTIONS = (
PLAN_ONE_STRING, PLAN_TWO_STRING, PLAN_THREE_STRING,
PLAN_FOUR_STRING, PLAN_FIVE_STRING
)
# ITEMS ROLES COMPANY
CODE_ROLE_ONE = 'ROL-1'
CODE_ROLE_TWO = 'ROL-2'
CODE_ROLE_THREE = 'ROL-3'
ROLE_ONE_STRING = (CODE_ROLE_ONE, _('ADMIN'))
ROLE_TWO_STRING = (CODE_ROLE_TWO, _('DATOS'))
ROLE_THREE_STRING = (CODE_ROLE_THREE, _('OPERADOR'))
TYPE_ROLE_OPTIONS = (
ROLE_ONE_STRING, ROLE_TWO_STRING, ROLE_THREE_STRING,
)
|
# QUESTION:
#
# This problem was asked by Stripe.
#
# Given an array of integers, find the first missing positive integer in
# linear time and constant space. In other words, find the lowest positive
# integer that does not exist in the array. The array can contain duplicates
# and negative numbers as well.
#
# For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0]
# should give 3.
#
# You can modify the input array in-place.
def solution(L):
# Sanity check
if not L:
return None
# Return the identity if the element-count is fewer than 2
if len(L) <= 1:
return L
item_count = 0
item_sum = 0
for i in range(0, len(L)):
# Throw away negative integers
if L[i] < 0:
continue
item_sum += L[i]
item_count += 1
# If you don't know this trick then you won't be able to solve this in
# linear time.
total = item_count * (item_count + 1) / 2
return abs(total - item_sum) |
"""
class for storing static results of a tarjan ordering
"""
from scipy.sparse.csc import csc_matrix
from scipy.sparse.csr import csr_matrix
from scipy.sparse.linalg import inv, factorized, spsolve
from scipy.sparse import eye
from scipy.io import savemat, loadmat
import os
from collections import namedtuple, defaultdict
from antelope import CONTEXT_STATUS_, comp_dir # , num_dir
from ..engine import BackgroundEngine
from antelope_core import from_json, to_json
from antelope_core.contexts import Context
SUPPORTED_FILETYPES = ('.mat', )
_FLATTEN_AF = False
class NoLciDatabase(Exception):
pass
class TermRef(object):
def __init__(self, flow_ref, direction, term_ref, scc_id=None):
"""
:param flow_ref:
:param direction: direction w.r.t. term
:param term_ref:
:param scc_id: None or 0 for singleton /emission; external_ref of a contained process for SCC
"""
self._f = str(flow_ref) # some flows were serialized with integer refs...
self._d = {'Input': 0, 'Output': 1, 0: 0, 1: 1}[direction]
# self._d = num_dir(direction)
self._t = term_ref
self._s = 0
self.scc_id = scc_id
@property
def term_ref(self):
return self._t
@property
def flow_ref(self):
return self._f
@property
def direction(self):
return ('Input', 'Output')[self._d]
@property
def scc_id(self):
if self._s == 0:
return []
return self._s
@scc_id.setter
def scc_id(self, item):
if item is None:
self._s = 0
else:
self._s = item
def __array__(self):
return self.flow_ref, self._d, self.term_ref, self._s
def __iter__(self):
return iter(self.__array__())
ExchDef = namedtuple('ExchDef', ('process', 'flow', 'direction', 'term', 'value'))
def _iterate_a_matrix(a, y, threshold=1e-8, count=100, quiet=False, solver=None):
if solver == 'spsolve':
ima = eye(a.shape[0]) - a
x = spsolve(ima, y)
return csr_matrix(x).T
y = csr_matrix(y) # tested this with ecoinvent: convert to sparse: 280 ms; keep full: 4.5 sec
total = csr_matrix(y.shape)
if a is None:
return total
mycount = 0
sumtotal = 0.0
while mycount < count:
total += y
y = a.dot(y)
inc = sum(abs(y).data)
if inc == 0:
if not quiet:
print('exact result')
break
sumtotal += inc
if inc / sumtotal < threshold:
break
mycount += 1
if not quiet:
print('completed %d iterations' % mycount)
return total
def _unit_column_vector(dim, inx):
return csr_matrix(((1,), ((inx,), (0,))), shape=(dim, 1))
def split_af(_af, _inds):
"""
splits the input matrix into diagonal and off-diagonal portions, with the split being determined by _inds
:param _af:
:param _inds:
:return:
"""
_af = _af.tocoo()
_r = _af.row
_c = _af.col
_d = _af.data
_d_non = []
_d_scc = []
_shape = _af.shape
for i in range(len(_d)):
if _r[i] in _inds and _c[i] in _inds:
_d_non.append(0)
_d_scc.append(_d[i])
else:
_d_non.append(_d[i])
_d_scc.append(0)
_af_non = csc_matrix((_d_non, (_r, _c)), shape=_shape)
_af_scc = csc_matrix((_d_scc, (_r, _c)), shape=_shape)
assert (_af_non + _af_scc - _af).nnz == 0
return _af_non, _af_scc
def _determine_scc_inds(ts):
scc_inds = set()
for _s in ts.nontrivial_sccs():
if ts.is_background_scc(_s):
continue
for k in ts.scc(_s):
scc_inds.add(ts.fg_dict(k.index))
return scc_inds
def flatten(af, ad, bf, ts):
"""
Accepts a fully populated background engine as argument
:param af:
:param ad:
:param bf:
:param ts:
:return: af_flat, ad_flat, bf_flat
"""
scc_inds = _determine_scc_inds(ts)
non, scc = split_af(af, scc_inds)
scc_inv = inv(eye(ts.pdim).tocsc() - scc)
return non * scc_inv, ad * scc_inv, bf * scc_inv
ORDERING_SUFFIX = '.ordering.json.gz'
class FlatBackground(object):
"""
Static, ordered background stored in an easily serializable way
"""
@classmethod
def from_index(cls, index, quiet=True, **kwargs):
"""
:param index: an index interface with operable processes() and terminate()
:param quiet: passed to cls
:param kwargs: passed to add_all_ref_products()
:return:
"""
be = BackgroundEngine(index)
be.add_all_ref_products(**kwargs)
return cls.from_background_engine(be, quiet=quiet)
@classmethod
def from_background_engine(cls, be, **kwargs):
af, ad, bf = be.make_foreground()
if _FLATTEN_AF:
af, ad, bf = flatten(af, ad, bf, be.tstack)
_map_nontrivial_sccs = {k: be.product_flow(k).process.external_ref for k in be.tstack.nontrivial_sccs()}
def _make_term_ref(pf):
try:
_scc_id = _map_nontrivial_sccs[be.tstack.scc_id(pf)]
except KeyError:
_scc_id = 0
return pf.flow.external_ref, pf.direction, pf.process.external_ref, _scc_id
def _make_term_ext(em):
"""
Here we decide to store contexts as '; '-concatenated strings -- which we must do bc it is serializable
gets undone in generate_em_defs which indicates we should use a method of TermRef to produce the ExchDef
middleman
Note also the directionality here: comp_dir(em.direction) em is coming from the BackgroundEngine so it
is an Emission type, which is created from an exterior exchange using its native flow and direction [w/r/t
the parent]. We take direction w.r.t. the context so the declaration is self-consistent, but that is not
really sensible. But it's serialized. Thus we take comp-dir.
Not investigated: whether problems arise when an exchange with a non-complementary context is used
as the source for a BackgroundEngine emission, the BackgroundEngine is flattened, serialized to .mat, and
deserialized for computation. Not sure what the problem would be, but we should probably test it.
[LciaResult negates value when it detects a conflicting exchange-context pairing.]
:param em:
:return:
"""
''' # <<< master
try:
comp = em.compartment[-1]
except IndexError:
comp = None
return em.flow.external_ref, comp_dir(em.direction), comp, 0
>>>>>>> preferred_product
'''
return em.flow.external_ref, comp_dir(em.direction), '; '.join(em.context.as_list()), 0
return cls([_make_term_ref(x) for x in be.foreground_flows(outputs=False)],
[_make_term_ref(x) for x in be.background_flows()],
[_make_term_ext(x) for x in be.emissions],
af, ad, bf,
lci_db=be.lci_db,
**kwargs)
@classmethod
def from_file(cls, file, **kwargs):
ext = os.path.splitext(file)[1]
if ext == '.mat':
return cls.from_matfile(file, **kwargs)
elif ext == '.hdf':
return cls.from_hdf5(file, **kwargs)
else:
raise ValueError('Unsupported file type %s' % ext)
@classmethod
def from_hdf5(cls, fle, quiet=True):
raise NotImplementedError
@classmethod
def from_matfile(cls, file, quiet=True):
d = loadmat(file)
if 'A' in d:
lci_db = (d['A'].tocsr(), d['B'].tocsr())
else:
lci_db = None
try:
ordr = from_json(file + ORDERING_SUFFIX)
except FileNotFoundError: # legacy
ordr = from_json(file + '.index.json.gz')
'''
def _unpack_term_ref(arr):
_xt = arr[3][0]
if len(_xt) == 1:
_xt = _xt[0]
return arr[0][0], arr[1][0][0], arr[2][0], _xt
return cls((_unpack_term_ref(f) for f in d['foreground']),
(_unpack_term_ref(f) for f in d['background']),
(_unpack_term_ref(f) for f in d['exterior']),
d['Af'].tocsr(), d['Ad'].tocsr(), d['Bf'].tocsr(),
lci_db=lci_db,
quiet=quiet)
'''
return cls(ordr['foreground'], ordr['background'], ordr['exterior'],
d['Af'].tocsr(), d['Ad'].tocsr(), d['Bf'].tocsr(),
lci_db=lci_db,
quiet=quiet)
def __init__(self, foreground, background, exterior, af, ad, bf, lci_db=None, quiet=True):
"""
:param foreground: iterable of foreground Product Flows as TermRef params
:param background: iterable of background Product Flows as TermRef params
:param exterior: iterable of Exterior flows as TermRef params
:param af: sparse, flattened Af
:param ad: sparse, flattened Ad
:param bf: sparse, flattened Bf
:param lci_db: [None] optional (A, B) 2-tuple
:param quiet: [True] does nothing for now
"""
self._fg = tuple([TermRef(*f) for f in foreground])
self._bg = tuple([TermRef(*x) for x in background])
self._ex = tuple([TermRef(*x) for x in exterior])
self._af = af
self._ad = ad
self._bf = bf
if lci_db is None:
self._A = None
self._B = None
else:
self._A = lci_db[0].tocsr()
self._B = lci_db[1].tocsr()
self._lu = None # store LU decomposition
self._fg_index = {(k.term_ref, k.flow_ref): i for i, k in enumerate(self._fg)}
self._bg_index = {(k.term_ref, k.flow_ref): i for i, k in enumerate(self._bg)}
self._ex_index = {(k.term_ref, k.flow_ref, k.direction): i for i, k in enumerate(self._ex)}
self._quiet = quiet
def index_of(self, term_ref, flow_ref):
key = (term_ref, flow_ref)
if key in self._fg_index:
return self._fg_index[key]
elif key in self._bg_index:
return self._bg_index[key]
else:
raise KeyError('Unknown termination %s, %s' % key)
@property
def _complete(self):
return self._A is not None and self._B is not None
@property
def ndim(self):
return len(self._bg)
@property
def pdim(self):
return len(self._fg)
@property
def mdim(self):
return len(self._ex)
@property
def fg(self):
return self._fg
@property
def bg(self):
return self._bg
@property
def ex(self):
return self._ex
def is_in_scc(self, process, ref_flow):
if self.is_in_background(process, ref_flow):
tr = self._bg[self._bg_index[(process, ref_flow)]]
else:
tr = self._fg[self._fg_index[(process, ref_flow)]]
return len(tr.scc_id) > 0
def is_in_background(self, process, ref_flow):
return (process, ref_flow) in self._bg_index
def foreground(self, process, ref_flow, traverse=False, exterior=False):
"""
Most of the way toward making exchanges. yields a sequence of 5-tuples defining terminated exchanges.
NOTE: traverse=True differs from the prior implementation because the old BackgroundEngine returned an Af
matrix and the foreground routine generated one exchange per matrix entry.
In contrast, the current implementation traverses the foreground and creates one exchange per traversal link.
If a fragment references the same subfragment multiple times, this will result in redundant entries for the
same fragment. At the moment this is by design but it may be undesirable.
An easy solution would be to keep a log of nonzero Af indices and 'continue' if one is encountered.
:param process:
:param ref_flow:
:param traverse: [False] if True, generate one exchange for every traversal link. Default is to create one
exchange for every matrix entry. traverse=True will produce duplicate exchanges in cases where sub-fragments
are traversed multiple times.
:param exterior: [False] return entries for exterior flows
:return:
"""
if _FLATTEN_AF is False and traverse is True:
print('Warning: traversal of foreground SCC will never terminate')
index = self._fg_index[process, ref_flow]
yield ExchDef(process, ref_flow, self._fg[index].direction, None, 1.0)
cols_seen = set()
cols_seen.add(index)
q = [index]
while len(q) > 0:
current = q.pop(0)
node = self._fg[current]
fg_deps = self._af[:, current]
rows, cols = fg_deps.nonzero()
for i in range(len(rows)):
assert cols[i] == 0 # 1-column slice
if _FLATTEN_AF:
assert rows[i] > current # well-ordered and flattened
if rows[i] in cols_seen:
if traverse:
q.append(rows[i]) # allow fragment to be traversed multiple times
else:
cols_seen.add(rows[i])
q.append(rows[i])
term = self._fg[rows[i]]
dat = fg_deps.data[i]
if dat < 0:
dat *= -1
dirn = term.direction # comp directions w.r.t. parent node
else:
dirn = comp_dir(term.direction) # comp directions w.r.t. parent node
yield ExchDef(node.term_ref, term.flow_ref, dirn, term.term_ref, dat)
bg_deps = self._ad[:, current]
for dep in self._generate_exch_defs(node.term_ref, bg_deps, self._bg):
yield dep
if exterior:
ems = self._bf[:, current]
for ext in self._generate_em_defs(node.term_ref, ems):
yield ext
@staticmethod
def _generate_exch_defs(node_ref, data_vec, enumeration):
rows, cols = data_vec.nonzero()
assert all(cols == 0)
for i in range(len(rows)):
term = enumeration[rows[i]]
dat = data_vec.data[i]
if dat < 0:
dat *= -1
dirn = term.direction
else:
dirn = comp_dir(term.direction)
yield ExchDef(node_ref, term.flow_ref, dirn, term.term_ref, dat)
def _generate_em_defs(self, node_ref, data_vec):
"""
Emissions have a natural direction which should not be changed.
:param node_ref:
:param data_vec:
:return:
"""
rows, cols = data_vec.nonzero()
assert all(cols == 0)
for i in range(len(rows)):
term = self._ex[rows[i]]
dat = data_vec.data[i]
dirn = comp_dir(term.direction)
if CONTEXT_STATUS_ == 'compat':
_term = None
else:
_term = tuple(term.term_ref.split('; ')) # here we undo the '; '-join
yield ExchDef(node_ref, term.flow_ref, dirn, _term, dat)
def consumers(self, process, ref_flow):
idx = self.index_of(process, ref_flow)
if self.is_in_background(process, ref_flow):
for i in self._ad[idx, :].nonzero()[1]:
yield self.fg[i]
for i in self._A[idx, :].nonzero()[1]:
yield self.bg[i]
else:
for i in self._af[idx, :].nonzero()[1]:
yield self.fg[i]
def emitters(self, flow_ref, direction, context):
if context is None:
term = None
else:
term = '; '.join(context)
yielded = set()
for idx, ex in enumerate(self.ex): # termination, flow_ref, direction
if ex.flow_ref != flow_ref:
continue
if direction:
if ex.direction != direction:
continue
if term:
if ex.term_ref != term:
continue
# found an eligible external flow
for i in self._bf[idx, :].nonzero()[1]:
yielded.add(self.fg[i])
for i in self._B[idx, :].nonzero()[1]:
yielded.add(self.bg[i])
for rx in yielded:
yield rx
def dependencies(self, process, ref_flow):
if self.is_in_background(process, ref_flow):
index = self._bg_index[process, ref_flow]
fg_deps = csr_matrix([])
bg_deps = self._A[:, index]
else:
index = self._fg_index[process, ref_flow]
fg_deps = self._af[:, index]
bg_deps = self._ad[:, index]
for x in self._generate_exch_defs(process, fg_deps, self._fg):
yield x
for x in self._generate_exch_defs(process, bg_deps, self._bg):
yield x
def exterior(self, process, ref_flow):
if self.is_in_background(process, ref_flow):
index = self._bg_index[process, ref_flow]
ems = self._B[:, index]
else:
index = self._fg_index[process, ref_flow]
ems = self._bf[:, index]
for x in self._generate_em_defs(process, ems):
yield x
def _x_tilde(self, process, ref_flow, quiet=True, **kwargs):
index = self._fg_index[process, ref_flow]
return _iterate_a_matrix(self._af, _unit_column_vector(self.pdim, index), quiet=quiet, **kwargs)
def ad(self, process, ref_flow, **kwargs):
if self.is_in_background(process, ref_flow):
for x in self.dependencies(process, ref_flow):
yield x
else:
ad_tilde = self._ad.dot(self._x_tilde(process, ref_flow, **kwargs))
for x in self._generate_exch_defs(process, ad_tilde, self._bg):
yield x
def bf(self, process, ref_flow, **kwargs):
if self.is_in_background(process, ref_flow):
for x in self.exterior(process, ref_flow):
yield x
else:
bf_tilde = self._bf.dot(self._x_tilde(process, ref_flow, **kwargs))
for x in self._generate_em_defs(process, bf_tilde):
yield x
def _compute_bg_lci(self, ad, solver=None, **kwargs):
if solver == 'factorize':
if self._lu is None:
ima = eye(self._A.shape[0]) - self._A
self._lu = factorized(ima.tocsc())
if self._lu is None:
bx = _iterate_a_matrix(self._A, ad, solver=solver, **kwargs)
else:
bx = csr_matrix(self._lu(ad.toarray().flatten())).T
return self._B.dot(bx)
def _compute_lci(self, process, ref_flow, **kwargs):
if self.is_in_background(process, ref_flow):
if not self._complete:
raise NoLciDatabase
ad = _unit_column_vector(self.ndim, self._bg_index[process, ref_flow])
bx = self._compute_bg_lci(ad, **kwargs)
return bx
else:
x_tilde = self._x_tilde(process, ref_flow, **kwargs)
ad_tilde = self._ad.dot(x_tilde)
bf_tilde = self._bf.dot(x_tilde)
if self._complete:
bx = self._compute_bg_lci(ad_tilde, **kwargs)
return bx + bf_tilde
else:
return bf_tilde
def lci(self, process, ref_flow, **kwargs):
for x in self._generate_em_defs(process,
self._compute_lci(process, ref_flow, **kwargs)):
yield x
@staticmethod
def _check_dirn(term_ref, exch):
if comp_dir(exch.direction) == term_ref.direction:
return 1
return -1
def sys_lci(self, demand, quiet=None, **kwargs):
"""
:param demand: an iterable of exchanges, each of which must be mapped to a foreground, interior, or exterior
TermRef
:return:
"""
data = defaultdict(list)
for x in demand:
if isinstance(x.termination, Context):
key = ('; '.join(x.termination.as_list()), x.flow.external_ref, comp_dir(x.direction))
try:
ind = self._ex_index[key]
data['ex_ind'].append(ind)
data['ex_val'].append(x.value * self._check_dirn(self._ex[ind], x))
except KeyError:
data['missed'].append(x)
elif x.termination is None:
data['missed'].append(x)
else:
key = (x.termination, x.flow.external_ref)
if key in self._fg_index:
ind = self._fg_index[key]
data['fg_ind'].append(ind)
data['fg_val'].append(x.value * self._check_dirn(self._fg[ind], x))
elif key in self._bg_index:
ind = self._bg_index[key]
data['bg_ind'].append(ind)
data['bg_val'].append(x.value * self._check_dirn(self._bg[ind], x))
else:
data['missed'].append(x)
# compute ad_tilde # csr_matrix(((1,), ((inx,), (0,))), shape=(dim, 1))
x_dmd = csr_matrix((data['fg_val'], (data['fg_ind'], [0]*len(data['fg_ind']))), shape=(self.pdim, 1))
x_tilde = _iterate_a_matrix(self._af, x_dmd, quiet=True, **kwargs)
ad_tilde = self._ad.dot(x_tilde).todense()
bf_tilde = self._bf.dot(x_tilde).todense()
# consolidate bg dependencies
for i in range(len(data['bg_ind'])):
ad_tilde[data['bg_ind'][i]] += data['bg_val'][i]
# compute b
bx = self._compute_bg_lci(ad_tilde, quiet=quiet, **kwargs) + bf_tilde
# consolidate direct emissions
for i in range(len(data['ex_ind'])):
bx[data['ex_ind'][i]] += data['ex_val'][i]
for x in self._generate_em_defs(None, csr_matrix(bx)):
yield x
for x in data['missed']:
yield ExchDef(None, x.flow, x.direction, x.termination, x.value)
def _write_ordering(self, filename):
if not filename.endswith(ORDERING_SUFFIX):
filename += ORDERING_SUFFIX
ordr = {'foreground': [tuple(f) for f in self._fg],
'background': [tuple(f) for f in self._bg],
'exterior': [tuple(f) for f in self._ex]}
to_json(ordr, filename, gzip=True)
def _write_mat(self, filename, complete=True):
d = {'Af': csr_matrix((self.pdim, self.pdim)) if self._af is None else self._af,
'Ad': csr_matrix((self.ndim, self.pdim)) if self._ad is None else self._ad,
'Bf': csr_matrix((self.mdim, self.pdim)) if self._bf is None else self._bf}
if complete and self._complete:
d['A'] = self._A
d['B'] = self._B
savemat(filename, d)
def write_to_file(self, filename, complete=True):
if filename.endswith(ORDERING_SUFFIX):
filename = filename[:-len(ORDERING_SUFFIX)]
filetype = os.path.splitext(filename)[1]
if filetype not in SUPPORTED_FILETYPES:
raise ValueError('Unsupported file type %s' % filetype)
if filetype == '.mat':
self._write_mat(filename, complete=complete)
else:
raise ValueError('Unsupported file type %s' % filetype)
self._write_ordering(filename)
|
# -*- coding: utf-8 -*-
value = input().split()
a,b = value
x = int(a)
y = int(b)
if(x==1):
snack = float((y*4.00))
print(f'Total: R$ {snack:.2f}')
if(x==2):
snack = float((y*4.50))
print(f'Total: R$ {snack:.2f}')
if(x==3):
snack = float((y*5.00))
print(f'Total: R$ {snack:.2f}')
if(x==4):
snack = float((y*2.00))
print(f'Total: R$ {snack:.2f}')
if(x==5):
snack = float((y*1.50))
print(f'Total: R$ {snack:.2f}')
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
|
from PySide2 import QtWidgets, QtCore
from Elegan import Ui_MainWindow
import connection
import serial
class MyApp(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self):
super(MyApp, self).__init__()
self.setupUi(self)
self.setWindowTitle("MyApp 3.7")
self.progressBar.setValue(0)
self.set_spinbox()
self.fill_comboxes()
connection_object.get_comport()
self.comboBox_Port.addItem(connection_object.com_port)
if connection_object.com_port != "No device":
connection_object.serial_connection = serial.Serial(
baudrate=115200,
port=connection_object.com_port)
print("open")
connection_object.serial_connection.close()
connection_object.serial_connection.setDTR(False)
self.checkBox_Lam.stateChanged.connect(lambda: self.lam_checkbox())
self.checkBox_Yayma.stateChanged.connect(lambda: self.yayma_checkbox())
self.set_button_clicks()
self.doubleSpinBox_Lam.setDisabled(1)
self.spinBox_Aci.setDisabled(1)
self.spinBox_Rampa.setDisabled(1)
self.spinBox_Yayma_hizi.setDisabled(1)
timer.timeout.connect(lambda: self.update_bar())
times.timeout.connect(lambda: self.handle_time())
def button_action(self, file_path, button, pressed_text, time_takes):
if connection_object.check_connection():
self.progressBar.setValue(0)
son = self.load_file(file_path)
if button is self.pushButton_Yayma:
code = self.code_replace_yayma(son)
elif button is self.pushButton_Lam_Al:
code = self.code_replace_lam_birak(son)
elif button is self.pushButton_Yikama:
code = self.code_replace_yikama(son)
else:
code = son
timer.start(time_takes)
button.setText(pressed_text)
self.disable_buttons(1)
self.parse_code(code)
times.start(100)
else:
QtWidgets.QMessageBox.warning(self, "Error", "Device is not connected!")
def yayma_checkbox(self):
if self.checkBox_Yayma.isChecked():
self.spinBox_Yayma_hizi.setEnabled(1)
self.spinBox_Rampa.setEnabled(1)
self.spinBox_Aci.setEnabled(1)
else:
self.spinBox_Yayma_hizi.setDisabled(1)
self.spinBox_Rampa.setDisabled(1)
self.spinBox_Aci.setDisabled(1)
def lam_checkbox(self):
if self.checkBox_Lam.isChecked():
self.doubleSpinBox_Lam.setEnabled(1)
else:
self.doubleSpinBox_Lam.setDisabled(1)
def code_replace_yikama(self, code):
pump = self.comboBox_Pump.currentText()
if pump == "Pump 1":
son = code
elif pump == "Pump 2":
son = code.replace("= 1 :", "= 2 :")
elif pump == "Pump 3":
son = code.replace("= 1 :", "= 2 :").replace("T0", "T1")
elif pump == "Pump 4":
son = code.replace("= 1 :", "= 3 :").replace("T0", "T1")
elif pump == "Pump 5":
son = code.replace("= 1 :", "= 4 :")
else:
son = code.replace("= 1 :", "= 4 :").replace("T0", "T1")
return son
def code_replace_lam_birak(self, code):
if self.checkBox_Lam.isChecked():
lam = self.doubleSpinBox_Lam.value()
son = code.replace("LAM", "Y" + str(lam))
else:
sale = self.comboBox_Lam.currentText()
if sale == "Şale 1":
son = code.replace("LAM", "Y46")
elif sale == "Şale 2":
son = code.replace("LAM", "Y41")
elif sale == "Şale 3":
son = code.replace("LAM", "Y34.5")
elif sale == "Şale 4":
son = code.replace("LAM", "Y28")
elif sale == "Şale 5":
son = code.replace("LAM", "Y22.5")
else:
son = code.replace("LAM", "Y16.5")
return son
def code_replace_yayma(self, code):
miktar = self.comboBox_Miktar.currentText()
tup = self.comboBox_Tup.currentText()
if self.checkBox_Yayma.isChecked():
yayma_hizi = self.spinBox_Yayma_hizi.text()
rampa = self.spinBox_Rampa.text()
yayma_aci = self.spinBox_Aci.text()
x = code.replace("HIZ", "F" + yayma_hizi).replace("AÇI", "Z" + yayma_aci).replace("RAMPA_X", "X" + rampa)
else:
x = code.replace("HIZ", "F10000").replace("AÇI", "Z5").replace("RAMPA_X", "X1000")
if tup == "Tüp 1":
y = x.replace("TÜP", "Y198.5")
elif tup == "Tüp 2":
y = x.replace("TÜP", "Y180.5")
elif tup == "Tüp 3":
y = x.replace("TÜP", "Y162.5")
elif tup == "Tüp 4":
y = x.replace("TÜP", "Y144.5")
elif tup == "Tüp 5":
y = x.replace("TÜP", "Y126.5")
elif tup == "Tüp 6":
y = x.replace("TÜP", "Y108.5")
elif tup == "Tüp 7":
y = x.replace("TÜP", "Y90.5")
elif tup == "Tüp 8":
y = x.replace("TÜP", "Y72.5")
elif tup == "Tüp 9":
y = x.replace("TÜP", "Y54.5")
else:
y = x.replace("TÜP", "Y36.5")
if miktar == "3 UL":
son = y.replace("X_MIKTAR", "X32.5").replace("MİKTAR", "X168")
elif miktar == "4 UL":
son = y.replace("X_MIKTAR", "X30").replace("MİKTAR", "X168")
elif miktar == "5 UL":
son = y.replace("X_MIKTAR", "X27.5").replace("MİKTAR", "X168")
elif miktar == "6 UL":
son = y.replace("X_MIKTAR", "X25").replace("MİKTAR", "X168")
elif miktar == "7 UL":
son = y.replace("X_MIKTAR", "X20").replace("MİKTAR", "X169")
elif miktar == "8 UL":
son = y.replace("X_MIKTAR", "X19").replace("MİKTAR", "X169")
elif miktar == "9 UL":
son = y.replace("X_MIKTAR", "X18").replace("MİKTAR", "X169")
else:
son = y.replace("X_MIKTAR", "X17").replace("MİKTAR", "X169")
return son
def update_bar(self):
val = self.progressBar.value()
self.progressBar.setValue(val + 1)
if val == 100:
# self.pushButton_Yayma.setText(button_text)
self.pushButton_Reset.setText("Reset")
self.pushButton_Yikama.setText("Yıkama")
self.pushButton_Lam_Al.setText("Lam Bırak")
self.pushButton_Yayma.setText("Kanı Yay")
self.pushButton_4_Methanol.setText("Methanol")
self.pushButton_May_G.setText("May Grunwald")
self.pushButton_Kurutma.setText("Kurutma")
self.pushButton_Yikama_after_May_G.setText("Yıkama 1")
self.pushButton_May_G_Cozelti.setText("May Grunwald\n Çözeltisi")
self.pushButton_Yikama_After_Cozelti.setText("Yıkama 2")
self.pushButton_Giemsa.setText("Giemsa")
self.pushButton_yikama_after_giemsa.setText("Yıkama 3")
self.pushButton_Birakma.setText("Bırakma")
self.pushButton_Igne_Yikama.setText("İğne Yıkama")
self.pushButton_start.setText("START")
self.disable_buttons(0)
def set_spinbox(self):
self.spinBox_Aci.setMinimum(0)
self.spinBox_Aci.setMaximum(130)
self.spinBox_Aci.setSingleStep(1)
self.spinBox_Aci.setValue(0)
self.spinBox_Rampa.setMinimum(0)
self.spinBox_Rampa.setMaximum(25000)
self.spinBox_Rampa.setSingleStep(1)
self.spinBox_Rampa.setValue(0)
self.spinBox_Yayma_hizi.setMinimum(0)
self.spinBox_Yayma_hizi.setMaximum(25000)
self.spinBox_Yayma_hizi.setSingleStep(1)
self.spinBox_Yayma_hizi.setValue(0)
def load_file(self, path):
file = QtCore.QFile(path)
if not file.open(QtCore.QIODevice.ReadOnly | QtCore.QIODevice.Text):
QtWidgets.QMessageBox.warning(self, "Error!", file.errorString())
return
stream = QtCore.QTextStream(file)
code = stream.readAll()
file.close()
return code
def parse_code(self, code):
rows = code.splitlines()
for lines in rows:
if lines:
a = lines.split()
while len(parsed_code) - 1 < int(a[0]):
parsed_code.append("")
parsed_code[int(a[0])] += str(' '.join(a[2:])) + "\n"
return parsed_code
def initiliaze_variables(self):
global code_index
global time_counter
del parsed_code[:]
times.stop()
time_counter = 0
code_index = 0
def handle_time(self):
global code_index
global time_counter
if code_index < len(parsed_code):
while parsed_code[code_index] == '':
code_index += 1
if time_counter == code_index:
exact_time = QtCore.QTime.currentTime()
connection_object.write_data_to_port(parsed_code[time_counter])
self.textBrowser.append("**************" + exact_time.toString("hh : mm : ss : ms") +
"**************\n\n" + parsed_code[time_counter])
code_index += 1
time_counter += 1
times.start(1000)
else:
self.initiliaze_variables()
def fill_comboxes(self):
for i in range(1, 11):
self.comboBox_Tup.addItem("Tüp " + str(i))
for k in range(3, 11):
self.comboBox_Miktar.addItem(str(k) + " UL")
for m in range(1, 7):
self.comboBox_Lam.addItem("Şale " + str(m))
self.comboBox_Pump.addItem("Pump " + str(m))
def disable_buttons(self, val):
buttons = {self.pushButton_Yikama, self.pushButton_Connect, self.pushButton_Yayma,
self.pushButton_Lam_Al, self.pushButton_Reset, self.pushButton_4_Methanol,
self.pushButton_Disconnect, self.pushButton_May_G, self.pushButton_start,
self.pushButton_Kurutma, self.pushButton_Yikama_after_May_G, self.pushButton_May_G_Cozelti,
self.pushButton_Yikama_After_Cozelti, self.pushButton_Giemsa,
self.pushButton_yikama_after_giemsa, self.pushButton_Birakma, self.pushButton_Igne_Yikama}
for button in buttons:
if val == 1:
button.setDisabled(True)
else:
button.setEnabled(True)
def set_button_clicks(self):
self.pushButton_Disconnect.clicked.connect(lambda: connection_object.dis_connect(parsed_code, qt_app))
self.pushButton_Connect.clicked.connect(lambda: connection_object.connect(parsed_code, qt_app))
self.pushButton_start.clicked.connect(
lambda: self.button_action("resources/G_code/Start.txt", self.pushButton_start, "STARTED", 100))
self.pushButton_May_G.clicked.connect(
lambda: self.button_action(":G_code/G_code/Kurutma Al May.G Koy.txt",
self.pushButton_May_G,
"May Grunwald İşlemi\n Yapılıyor", 100))
self.pushButton_4_Methanol.clicked.connect(
lambda: self.button_action(":G_code/G_code/Şale Alma Methanol Koy.txt",
self.pushButton_4_Methanol, "Methanol İşlemi\n Yapılıyor", 100))
self.pushButton_Yayma.clicked.connect(
lambda: self.button_action(":G_code/G_code/Yayma.txt",
self.pushButton_Yayma, "Yayma Devam Ediyor",
100))
self.pushButton_Reset.clicked.connect(
lambda: self.button_action(":G_code/G_code/Başlangıç Home.txt",
self.pushButton_Reset, "Resetleniyor", 100))
self.pushButton_Lam_Al.clicked.connect(
lambda: self.button_action(":G_code/G_code/Lam Alma 1.txt",
self.pushButton_Lam_Al, "Lam Bırakılıyor",
100))
self.pushButton_Yikama.clicked.connect(
lambda: self.button_action(":G_code/G_code/1. Pump yıkama.txt",
self.pushButton_Yikama, "Yıkanıyor", 100))
self.pushButton_Kurutma.clicked.connect(
lambda: self.button_action(":G_code/G_code/Metanol Al Kurutma İçin Beklet.txt",
self.pushButton_Kurutma, "Kurutma İşlemi\n Yapılıyor", 100))
self.pushButton_Yikama_after_May_G.clicked.connect(
lambda: self.button_action(":G_code/G_code/May G. Al Yıkama Git.txt",
self.pushButton_Yikama_after_May_G, "Yıkama 1 İşlemi\n Yapılıyor", 100))
self.pushButton_May_G_Cozelti.clicked.connect(
lambda: self.button_action(":G_code/G_code/Yıkama Al May G. Çözelti Git.txt",
self.pushButton_May_G_Cozelti, "May Grunwald Çözeltisi\n İşlemi Yapılıyor", 100))
self.pushButton_Yikama_After_Cozelti.clicked.connect(
lambda: self.button_action(":G_code/G_code/May G. Çözeltisi Al Yıkama Git.txt",
self.pushButton_Yikama_After_Cozelti, "Yıkama 2 İşlemi\nYapılıyor", 100))
self.pushButton_Giemsa.clicked.connect(
lambda: self.button_action(":G_code/G_code/Yıkama Al Geimsa Gİt.txt",
self.pushButton_Giemsa, "Giemsa İşlemi\nYapılıyor", 100))
self.pushButton_yikama_after_giemsa.clicked.connect(
lambda: self.button_action(":G_code/G_code/Giemsa Al Yıkama Git.txt",
self.pushButton_yikama_after_giemsa, "Yikama 3 İşlemi\nYapılıyor", 100))
self.pushButton_Birakma.clicked.connect(
lambda: self.button_action(":G_code/G_code/Yıkama Al Finish.txt",
self.pushButton_Birakma, "Bırakma İşlemi\nYapılıyor", 100))
self.pushButton_Igne_Yikama.clicked.connect(
lambda: self.button_action(":G_code/G_code/İğne Yıkama.txt",
self.pushButton_Igne_Yikama, "İğne Yıkama\n İşlemi Yapılıyor", 100))
self.pushButton_Stop.clicked.connect(lambda: self.disable_motors_and_stop("1 : M112\n2 : M112\n3 : M112\n4 : M112"))
self.pushButton_disable_motors.clicked.connect(lambda: self.disable_motors_and_stop("1 : M18\n2 : M18\n3 : M18\n4 : M18"))
def disable_motors_and_stop(self, code):
if connection_object.check_connection():
connection_object.write_data_to_port(code)
self.initiliaze_variables()
else:
QtWidgets.QMessageBox.warning(self, "Error", "Device is not connected!")
if __name__ == '__main__':
timer = QtCore.QTimer()
times = QtCore.QTimer()
parsed_code = []
code_index = 0
time_counter = 0
connection_object = connection.Arduino()
app = QtWidgets.QApplication()
qt_app = MyApp()
qt_app.show()
app.exec_()
|
#!/usr/bin/env python
'''
Created on 12/lug/2013
@author: davide
'''
import sys
import csv
import numpy as np
import os
from optparse import OptionParser
def_out_file = 'out_stat_overhead.csv'
def_release = 'ft_release.csv'
def_schedule = 'ft_schedule.csv'
def parse_args():
parser = OptionParser("usage: %prog [options]")
parser.add_option('-o', '--out-file', dest='out_file',
help='file for data output',
default=("%s/%s" % (os.getcwd(), def_out_file)))
parser.add_option('-r', '--release', dest='ft_release',
help='ft release csv file',
default=("%s/%s" % (os.getcwd(), def_release)))
parser.add_option('-s', '--schedule', dest='ft_schedule',
help='ft schedule csv file',
default=("%s/%s" % (os.getcwd(), def_schedule)))
return parser.parse_args()
def main():
opts, args = parse_args()
dirname = os.path.dirname(opts.out_file)
if not os.path.exists(dirname):
raise Exception(dirname + ' not found')
files = {
'release': None,
'schedule': None
}
if os.path.exists(opts.ft_release):
files['release'] = opts.ft_release
if os.path.exists(opts.ft_schedule):
files['schedule'] = opts.ft_schedule
# files['release'] = [s for s in args if 'release' in s]
# files['schedule'] = [s for s in args if 'schedule' in s]
data = []
write = False
for k in files.keys():
if files[k] != None:
try:
with open(files[k], 'rb') as f:
tmp_data = []
csv_data = csv.reader(f)
for row in csv_data:
tmp_data.append(long(row[2].strip()))
max_value = max(tmp_data)
min_value = min(tmp_data)
avg_value = np.mean(tmp_data)
std_value = np.std(tmp_data)
sum_value = sum(tmp_data)
data.append(max_value)
data.append(min_value)
data.append(long(avg_value))
data.append(long(std_value))
data.append(sum_value)
write = True
except IOError:
print k + ' file not found!'
if write:
with open(opts.out_file, 'wb') as f:
writer = csv.writer(f)
writer.writerow(data)
else:
print 'Nothing to write. You are probably missing some input files.'
if __name__ == '__main__':
main() |
# 使用SimpleTagBased算法对Delicious2K数据进行推荐
# 原始数据集:https://grouplens.org/datasets/hetrec-2011/
# 数据格式:userID bookmarkID tagID timestamp
import random
import math
import operator
import pandas as pd
"""0.数据准备,变量定义"""
file_path = "./user_taggedbookmarks-timestamps.dat"
records = {} # 从原始数据生成user->item->tag记录,保存了user对item的tag,即{userid: {item1:[tag1, tag2], ...}}
train_data = dict() # 训练集
test_data = dict() # 测试集
user_tags = dict() # 用户user及其使用过tags和次数
tag_items = dict() # 标签tag及其标记过item和次数
user_items = dict() # 用户user及其标记过的item和次数
tag_users = dict() # 标签tag及使用过它的user和次数
item_tags = dict() # 商品item及标记过它的tag和次数
item_users = dict() # 商品item及标记过它用户user和次数
"""1. 数据加载,生成records"""
def load_data():
print("开始数据加载...")
df = pd.read_csv(file_path, sep='\t')
for i in range(len(df)):
uid = df['userID'][i]
iid = df['bookmarkID'][i]
tag = df['tagID'][i]
records.setdefault(uid,{}) # uid键不存在时,设置默认值{}
records[uid].setdefault(iid,[]) # 嵌套字典,iid键不存在时,设置默认值[]
records[uid][iid].append(tag)
print("数据集大小为 %d." % (len(df)))
print("设置tag的人数 %d." % (len(records)))
print("数据加载完成!\n")
"""2. 数据处理,将数据集拆分为训练集和测试集"""
def train_test_split(ratio=0.2, seed=100):
random.seed(seed)
for u in records.keys():
for i in records[u].keys():
if random.random()<ratio: # ratio比例设置为测试集
test_data.setdefault(u,{})
test_data[u].setdefault(i,[])
for t in records[u][i]:
test_data[u][i].append(t)
else:
train_data.setdefault(u,{})
train_data[u].setdefault(i,[])
for t in records[u][i]:
train_data[u][i].append(t)
print("训练集样本数 %d, 测试集样本数 %d" % (len(train_data),len(test_data)))
"""3. 数据初始化,使用records生成user_tags, tag_items, user_items, tag_users, item_tags, item_users"""
# 设置矩阵 mat[index, item] = 1
def addValueToMat(mat, index, item, value=1):
if index not in mat:
mat.setdefault(index,{})
mat[index].setdefault(item,value)
else:
if item not in mat[index]:
mat[index][item] = value
else:
mat[index][item] += value
# 使用训练集,初始化user_tags, tag_items, user_items
def initStat():
records=train_data
for u,items in records.items():
for i,tags in items.items():
for tag in tags:
addValueToMat(user_tags, u, tag, 1)
addValueToMat(tag_items, tag, i, 1)
addValueToMat(user_items, u, i, 1)
addValueToMat(tag_users, tag, u, 1)
addValueToMat(item_tags, i, tag, 1)
addValueToMat(item_users, i, u, 1)
print("user_tags, tag_items, user_items, tag_users, item_tags, item_users初始化完成.")
print("user_tags大小 %d, tag_items大小 %d, user_items大小 %d" % (len(user_tags), len(tag_items), len(user_items)))
print("tag_users大小 %d, item_tags大小 %d, item_users大小 %d" % (len(tag_users), len(item_tags), len(item_users)))
"""4. 生成推荐列表,Top-N"""
# 对用户user推荐Top-N
def recommend(user, N, norm_type):
recommend_items=dict()
tagged_items = user_items[user]
for tag, wut in user_tags[user].items():
for item, wti in tag_items[tag].items():
if item in tagged_items:
continue
if norm_type == "SimpleTagBased": # SimpleTagBased算法
# 对于该user使用过的所有标签 [( user使用某一标签tag的次数 wut) * ( 所有用户使用该标签tag标记item的次数 wti )]
norm = 1
elif norm_type == "NormTagBased-1": # NormTagBased-1算法:除以(这个tag被所用用户使用过的次数 * 这个user使用过tag的数量)
norm = len(tag_users[tag].items()) * len(user_tags[user].items())
elif norm_type == "NormTagBased-2": # NormTagBased-2算法:除以(这个user使用过tag的数量 * 被这个tag标记过item的数量)
norm = len(user_tags[user].items()) * len(tag_items[tag].items())
elif norm_type == "TagBased-IDF": #TagBased-IDF算法:
norm = math.log(len(tag_users[tag].items())+1)
else:
print("norm_type参数错误!")
break
if item not in recommend_items:
recommend_items[item] = wut * wti / norm
else:
recommend_items[item] = recommend_items[item] + wut * wti / norm
return sorted(recommend_items.items(), key=operator.itemgetter(1), reverse=True)[0:N]
"""注:SimpleTagBased推荐分数计算过程"""
# user=8的推荐结果[(1416, 61), (1526, 50), (4535, 47), (4639, 46), (23964, 46)]
# 其中item = 1416的得分计算过程
#for key in user_tags[8]:
# wti = str(tag_items[key].get(1416,0))
# if wti != '0':
# wut = str(a[key])
# print("str(key) + ":" + wut "+ "*" + wti)
"""5. 使用测试集,计算准确率和召回率"""
def precisionAndRecall(N, norm_type):
hit = 0
h_recall = 0
h_precision = 0
for user,items in test_data.items():
if user not in train_data:
continue
# 获取Top-N推荐列表
rank = recommend(user, N, norm_type)
for item,rui in rank:
if item in items:
hit = hit + 1
h_recall = h_recall + len(items)
h_precision = h_precision + N
#print('一共命中 %d 个, 一共推荐 %d 个, 用户设置tag总数 %d 个' %(hit, h_precision, h_recall))
# 返回准确率 和 召回率
return (hit/(h_precision*1.0)), (hit/(h_recall*1.0))
# 使用测试集,对推荐结果进行评估
def testRecommend(norm_type):
print("推荐结果评估")
print("%3s %10s %10s" % ('N',"精确率",'召回率'))
for N in [5,10,20,40,60,80,100]:
precision,recall = precisionAndRecall(N, norm_type)
print("%3d %10.3f%% %10.3f%%" % (N, precision * 100, recall * 100))
"""--------------分割线--------------------"""
load_data()
train_test_split(ratio = 0.2)
initStat()
testRecommend(norm_type = "TagBased-IDF") # norm_type{"SimpleTagBased", "NormTagBased-1", "NormTagBased-2", "TagBased-IDF"}
|
import array
# 1,[mutable/immutable]: item assignment, item deletion
# 2,hold arbitrary data types
if __name__ == '__main__':
# standard array: the list are mutable, and can hold arbitrary data types
print('-' * 20, " list, mutable dynmaic arrays ")
arr = ['gold', 'pogoda', 'abudo']
print(arr)
del(arr[1])
print(arr)
arr.append(3.14115)
print(arr)
tuple_list = tuple(arr)
print("list2tuple: ", tuple_list)
print('-' * 20, " tuple, immutable containers ")
#tpl = ('bagger', 'vanish', 'puff')
tpl = 'bagger', 'vanish', 'puff'
print(tpl)
#tpl[1] = 'release' # immutable, not support item assignment
#del(tpl[1]) # neither support item deletion
# tuple concatenation !!! (1) is type of int, not tuple
tpl = tpl + (33.0,) # can only concatenate tuple (not "float") to tuple
tpl = tpl + (33,) # so, must use (int/float,), not (int/float)
print(tpl)
list_tpl = list(tpl)
print("tuple2list: ", list_tpl)
print('-' * 20, " array, basic typed arrays")
tarr = array.array('f', [-9.0, 8.0, -7.0, 6.0])
print('nice repr: ', tarr)
tarr[3] = -5.0
print('mutable: ', tarr)
del(tarr[1])
print('mutable: ', tarr)
tarr.append(4.0)
print('mutable: ', tarr)
#tarr[3] = 'typed' # so must be the 'type', here should be float
print('-' * 20, " str, immutable arrays of unicode charactors")
sarr = 'remind us people of night'
#sarr[1] = 'A' # immutable,
#del(sarr[3]) # immutable
larr = list(sarr)
print('can be unpacked into a list, to get a mutable repr: ')
print(larr)
marr = ''.join(list(sarr))
print('back to immutable string: ', marr)
#del(marr[0])
print("strings are recursivee data structures: ", type(sarr), type(sarr[0]))
# unsigned char, [0, 255]
print('-' * 20, " bytes, immutable arrays of single bytes")
barr = bytes((255, 254))
print('immutable: ', barr)
#barr[0]
#del(barr[1])
print('-' * 20, " bytearray, dedicated mutable arrays of single bytes")
darr = bytearray((2, 24, 241))
print('bad repr: ', darr)
darr[1] = 255
print('mutable: ', darr)
del(darr[2])
print('mutable: ', darr)
darr.append(23)
print('mutable: ', darr)
#darr[2] = 'should-not-str' # integer is required
#darr[2] = 256 # range (0, 256)
carr = bytes(darr)
print('can be converted back to into bytes:')
print(carr)
bdarr = bytearray(carr)
print('can be converted back to into bytearray:')
print(bdarr)
|
# # F = C * 9/5 +32
# celsius = 0
# fahrenheit = celsius * 9/5 + 32
# print(fahrenheit)
# celsius = 100
# fahrenheit = celsius * 9/5 + 32
# print(fahrenheit)
# celsius = 23
# fahrenheit = celsius * 9/5 + 32
# print(fahrenheit)
c_list = [0, 23, 100]
def celsius_to_fahrenheit(celsius):
fahrenheit = celsius * 9/5 + 32
return fahrenheit
def c_to_f_and_print(celsius):
value = celsius_to_fahrenheit(celsius)
print(value)
for celsius_value in c_list:
c_to_f_and_print(celsius_value)
# c_to_f_and_print(c_list[0])
# c_to_f_and_print(c_list[1])
# c_to_f_and_print(c_list[2])
# value_0 = celsius_to_fahrenheit(c_list[0])
# print(value_0)
# value_0 = celsius_to_fahrenheit(c_list[1])
# print(value_0)
# value_0 = celsius_to_fahrenheit(c_list[2])
# print(value_0)
# # print(celsius_to_fahrenheit(100)
# def happy():
# print('Happy birthday to you!')
# def happy_john():
# print('Happy birthday, dear John!')
# def happy_jane():
# print('Happy birthday, dear Jane!')
# def happy_name(name):
# print(f'Happy birthday, dear {name}')
# print('Happy birthday to you!')
# print('Happy birthday to you!')
# print('Happy birthday, dear John!')
# print('Happy birthday to you!')
# print()
# happy()
# happy()
# happy_john()
# happy()
# happy()
# happy()
# happy_jane()
# happy()
# print()
# happy()
# happy()
# happy_name('Wei')
# happy()
# def sing_song(name):
# happy()
# happy()
# happy_name(name)
# happy()
# print()
# sing_song('Ashish')
|
from django_quill.fields import QuillField
from django.db import models
class Core(models.Model):
content = QuillField() |
import tkinter as tk
from tkinter import *
from tkinter import font
from tkinter.filedialog import askopenfilename
from tkinter import ttk
import textwrap
from AppOperations import AppOperations as ao # the class build for this purpose
from CommandsGUI import CommandsGUI
from CommandsGUI import ScreenGUI
#from Generic import Generic
from GUIfunctions import GUIfunctions
from DBOperations import DBOperations
from tkinter import messagebox
# last parent5
data_valid = 0 # to check if the data was sucessfully inserted or not!
info = [
("Name (TEXT):",1),
("e-mail (TEXT):",2),
("Flat no. (TEXT):",3),
("Tower no. (TEXT):",4),
("Area (NUMBER):",5),
("Parking (TEXT):",6),
("Recpt. Fess (NUMBER):",7),
("Address (TEXT):",8),
("Contact number (TEXT):",9)
]
e=["","","","","","","","","",""]
root = Tk()
menu = Menu(root)
root.config(menu=menu)
root.title("FLAT-INVENTORY JIMSOFT")
root.geometry("1000x600+200+200")
print("Starting your application... This may take some time... hold on!")
class Generic: # this class is used to combine multiple functions to return into 1 function
def combine_funcs(*funcs):
# command = combine_funcs(func1, func2)
def combined_func(*args, **kwargs):
for f in funcs:
f(*args, **kwargs)
return combined_func
def answer():
showerror("Answer", "Sorry, no answer available")
def callback():
if messagebox.askyesno('Verify', 'Really quit?'):
if messagebox.showwarning('Save Changes?', 'Commit all existing data ?'):
print('Changes saved!')
ao.save_root()
root.destroy()
else:
messagebox.showinfo('No', 'Quit has been cancelled')
def delete_dummy():
GUIfunctions.delete_multiple()
# to implement the dialogue warning stuffs on exiting
root.protocol("WM_DELETE_WINDOW",Generic.callback)
if __name__ == "__main__":
ScreenGUI.detailsMenu()
ScreenGUI.manipulateMenu()
ScreenGUI.billMenu()
mainloop()
|
""" This class keeps global parameters, used
in the whole DHCP server
"""
# socket parameters
PORT = 67
BROADCAST_IP_ADDRESS = '0.0.0.0'
# DHCP message constant values
DHCP_EMPTY_BYTE = 0
NUM_CHADDR_UNUSED_BYTES = 10
NUM_BOOTP_LEGACY_BYTES = 192
# message options codes
END_OPTION_CODE = 255
DHCP_MESSAGE_TYPE = 53
REQUESTED_IP_CODE = 50
PARAMETER_REQUEST_LIST_CODE = 55
DHCP_SERVER_IDENTIFIER = 54
# requested parameters list codes
REQUEST_SUBNET_MASK = 1
ROUTER = 3
DOMAIN_NAME = 15
DOMAIN_NAME_SERVER = 6
# DHCP message types
DHCP_DISCOVERY = 1
DHCP_OFFER = 2
DHCP_REQUEST = 3
DHCP_ACK = 5
DHCP_NACK = 6
# message type byte index
MESSAGE_TYPE_BYTE_INDEX = 242
# Offer options types
OFFER_SUBNET_MASK = 1
OFFER_ROUTER = 3
OFFER_LEASE_TIME = 51
OFFER_DHCP_SERVER = 54
OFFER_DNS_SERVERS = 6
# ACK options types
ACK_SUBNET_MASK = 1
ACK_ROUTER = 3
ACK_LEASE_TIME = 51
ACK_DHCP_SERVER = 54
ACK_DNS_SERVERS = 6
# global parameters for config file
CONFIG_IP_ADDRESSES_RANGE = "range"
CONFIG_RANGE_FROM = "from"
CONFIG_RANGE_TO = "to"
CONFIG_GATEWAY_IP_ADDRESS = "default-gw"
CONFIG_SUBNET_MASK = "netmask"
CONFIG_LEASE = "lease"
CONFIG_DNS = "dns"
CONFIG_RULES = "rules"
CONFIG_RULES_MAC = "mac"
CONFIG_RULES_FIX_ADDRESS = "fix-address"
MY_SERVER_IP = '192.168.0.100'
|
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
import tqdm
import argparse
import time
from emotion_classification.dataset.emotion_dataset import PredictDataSet
from emotion_classification.model.model import *
classes = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
def predict(device, image_path, weight_path, batch_size):
if device == '':
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Device: {}'.format(device))
transform = transforms.Compose([transforms.Resize([48, 48]), transforms.ToTensor()])
dataset = PredictDataSet(image_path, transform=transform)
print('DataSet: PredictDataSet')
print(f'DataSet size: {len(dataset)}')
dl = DataLoader(dataset, batch_size=batch_size, shuffle=False)
model = darknet(num_classes=7, num_blocks=[1, 2, 8])
model.to(device)
model.load_state_dict(torch.load(weight_path, map_location=device))
model.eval()
pbar = tqdm.tqdm(dl)
preds = []
since = time.time()
for x in pbar:
x = x.to(device)
out = model(x)
pred = torch.argmax(out, dim=1)
preds += pred.tolist()
print('Time: {:.2f}s'.format(time.time() - since))
preds = [classes[p] for p in preds]
return preds
if __name__ == '__main__':
image_path = 'inference/images'
weight_path = 'weights/model_epoch_53.pt'
batch_size = 8
pred = predict('cpu', image_path, weight_path, batch_size)
print(pred)
|
# DC motor cqparts model
# 2018 Simon Kirkby obeygiantrobot@gmail.com
import cadquery as cq
import cqparts
from cqparts.params import *
from cqparts.display import render_props
from cqparts.constraint import Fixed, Coincident
from cqparts.constraint import Mate
from cqparts.utils.geometry import CoordSystem
import math
from cqparts_motors import shaft
from cqparts.catalogue import JSONCatalogue
# defines the profile of the motor , returns a wire
def _profile(shape, diam, thickness):
s = cq.Workplane("XY")
if shape == "circle":
p = s.circle(diam / 2)
return p
if shape == "flat":
r = diam / 2
ht = thickness / 2
ip = math.sqrt(r * r - ht * ht)
p = (
s.moveTo(0, ht)
.lineTo(ip, ht)
.threePointArc((r, 0), (ip, -ht))
.lineTo(0, -ht)
.mirrorY()
)
return p
if shape == "rect":
p = s.rect(thickness / 2, diam / 2)
return p
# the motor cup
class _Cup(cqparts.Part):
height = PositiveFloat(25.1, doc="cup length")
diam = PositiveFloat(20.4, doc="cup diameter")
thickness = PositiveFloat(15.4, doc="cup thickness for flat profile")
hole_spacing = PositiveFloat(12.4, doc="distance between the holes")
hole_size = PositiveFloat(2, doc="hole size")
step_diam = PositiveFloat(12, doc="step diameter")
step_height = PositiveFloat(0, doc="height if step, if zero no step")
bush_diam = PositiveFloat(6.15, doc="diameter of the bush")
bush_height = PositiveFloat(1.6, doc="height of the bush")
profile = String("flat", doc="profile shape (circle|flat|rect)")
def make(self):
# grab the correct profile
s = cq.Workplane("XY")
cup = _profile(self.profile, self.diam, self.thickness).extrude(-self.height)
if self.step_height > 0:
st = s.circle(self.step_diam / 2).extrude(self.step_height)
cup = cup.union(st)
bush = (
s.workplane(offset=self.step_height)
.circle(self.bush_diam / 2)
.extrude(self.bush_height)
)
cup = cup.union(bush)
return cup
def get_cutout(self, clearance=0):
return (
cq.Workplane("XY", origin=(0, 0, 0))
.circle((self.diam / 2) + clearance)
.extrude(10)
)
@property
def mate_bottom(self):
return Mate(
self,
CoordSystem(origin=(0, 0, -self.height), xDir=(1, 0, 0), normal=(0, 0, 1)),
)
class BackCover(cqparts.Part):
height = PositiveFloat(6, doc="back length")
diam = PositiveFloat(20.4, doc="back diameter")
thickness = PositiveFloat(15.4, doc="back thickness for flat profile")
profile = String("flat", doc="profile shape (circle|flat|rect)")
bush_diam = PositiveFloat(6.15, doc="diameter of the bush")
bush_height = PositiveFloat(1.6, doc="height of the bush")
_render = render_props(color=(50, 255, 255))
def make(self):
# grab the correct profile
s = cq.Workplane("XY")
back = (
s.workplane(offset=-self.height)
.circle(self.bush_diam / 2)
.extrude(-self.bush_height)
)
if self.height > 0:
b = _profile(self.profile, self.diam, self.thickness).extrude(-self.height)
back = back.union(b)
return back
class DCMotor(cqparts.Assembly):
height = PositiveFloat(25.1, doc="back length")
diam = PositiveFloat(20.4, doc="back diameter")
thickness = PositiveFloat(15.4, doc="back thickness for flat profile")
profile = String("flat", doc="profile shape (circle|flat|rect)")
bush_diam = PositiveFloat(6.15, doc="diameter of the bush")
bush_height = PositiveFloat(1.6, doc="height of the bush")
shaft_type = shaft.Shaft # replace with other shaft
shaft_length = PositiveFloat(11.55, doc="length of the shaft")
shaft_diam = PositiveFloat(2, doc="diameter of the shaft")
cover_height = PositiveFloat(0, doc="back cover height")
def make_components(self):
return {
"body": _Cup(
height=self.height,
thickness=self.thickness,
diam=self.diam,
profile=self.profile,
bush_diam=self.bush_diam,
bush_height=self.bush_height,
),
"shaft": self.shaft_type(length=self.shaft_length, diam=self.shaft_diam),
"back": BackCover(
height=self.cover_height,
thickness=self.thickness,
diam=self.diam,
profile=self.profile,
bush_diam=self.bush_diam,
bush_height=self.bush_height,
),
}
def make_constraints(self):
return [
Fixed(self.components["body"].mate_origin),
Coincident(
self.components["shaft"].mate_origin,
self.components["body"].mate_origin,
),
Coincident(
self.components["back"].mate_origin, self.components["body"].mate_bottom
),
]
if __name__ == "__main__":
from cqparts.display import render_props, display
dc = DCMotor()
display(dc)
|
from typing import Dict, List, NamedTuple, Tuple
import os
import h5py
import numpy as np
import pandas as pd
from PIL import Image
from ..logging import DEBUG, WARNING, log
class Spot(NamedTuple):
r"""
Data type for encoding circular capture spots, which are used in
technologies based on the Spatial Transcriptomics method
(https://doi.org/10.1126/science.aaf2403 ).
"""
x: float
y: float
r: float
def rescale(
image: np.ndarray, scaling_factor: float, resample: int = Image.NEAREST
) -> np.ndarray:
r"""
Rescales image
:param image: Image array
:param scaling_factor: Scaling factor
:param resample: Resampling filter
:returns: The rescaled image
"""
image = Image.fromarray(image)
image = image.resize(
[round(x * scaling_factor) for x in image.size], resample=resample,
)
image = np.array(image)
return image
def labels_from_spots(dst: np.ndarray, spots: List[Spot]) -> None:
r"""Fills `dst` with labels enumerated from `spots`"""
for i, s in enumerate(spots, 1):
x, y, radius = [int(round(x)) for x in (s.x, s.y, s.r)]
dst[
tuple(
zip(
*(
(y - dy, x - dx)
for dy in range(-radius, radius + 1)
for dx in range(-radius, radius + 1)
if dy ** 2 + dx ** 2 <= s.r ** 2
)
)
)
] = i
def crop_image(
image: np.ndarray, spots: List[Spot], margin: float = 0.12
) -> np.ndarray:
r"""Crops `image`, keeping a fixed minimum margin to the `spots`."""
cs = [[s.x, s.y] for s in spots]
xmin, ymin = np.min(cs, 0)
xmax, ymax = np.max(cs, 0)
margin = margin * max(xmax - xmin, ymax - ymin)
xmin = int(round(xmin - margin))
xmax = int(round(xmax + margin))
ymin = int(round(ymin - margin))
ymax = int(round(ymax + margin))
xmin, ymin = [max(a, 0) for a in (xmin, ymin)]
return image[ymin:ymax, xmin:xmax]
def mask_tissue(
image: np.ndarray, counts: pd.DataFrame, label: np.ndarray
) -> Tuple[pd.DataFrame, np.ndarray]:
r"""
Detects the tissue in `image`. The area outside of the tissue is given a
new label with zero counts everywhere.
"""
try:
# pylint: disable=import-outside-toplevel
import tissue_recognition as tr
except ImportError:
log(
WARNING,
"Tissue masking requires the ST Tissue Recognition library"
# pylint: disable=line-too-long
" (https://github.com/SpatialTranscriptomicsResearch/st_tissue_recognition)."
" This step will be skipped.",
)
return counts, label
mask = np.zeros(image.shape[:2], dtype=np.uint8)
tr.recognize_tissue(image.copy(), mask)
mask = tr.get_binary_mask(mask)
counts.index += 1
label[label != 0] += 1
in_mask = np.unique(label[mask & (label != 0)])
label[~mask.astype(bool) & ~np.isin(label, in_mask)] = 1
counts = pd.concat(
[
pd.DataFrame(
[np.repeat(0, counts.shape[1])],
columns=counts.columns,
index=[1],
).astype(pd.SparseDtype("float", 0)),
counts,
]
)
return counts, label
def write_data(
counts: pd.DataFrame,
image: np.ndarray,
label: np.ndarray,
annotation: Dict[str, np.ndarray],
type_label: str,
path: str = "data.h5",
) -> None:
r"""Writes data to the format used by XFuse."""
if image.shape[:2] != label.shape[:2]:
raise RuntimeError(
f"Image shape ({image.shape[:2]}) is not equal to"
f" the shape of the label image ({label.shape[:2]})."
)
if np.max(image.shape[:2]) > 5000:
log(
WARNING,
"The image resolution is very large! 😱"
" XFuse typically works best on medium resolution images"
" (approximately 1000x1000 px)."
" If you experience performance issues, please consider reducing"
" the resolution.",
)
if counts.columns.duplicated().any():
log(
WARNING,
"Count matrix contains duplicated columns."
" Counts will be summed by column name.",
)
counts = counts.sum(axis=1, level=0)
log(DEBUG, "writing data to %s", path)
os.makedirs(os.path.normpath(os.path.dirname(path)), exist_ok=True)
with h5py.File(path, "w") as data_file:
data = (
counts.astype(pd.SparseDtype("float", 0.0)).sparse.to_coo().tocsr()
)
data_file.create_dataset(
"counts/data", data.data.shape, float, data.data.astype(float)
)
data_file.create_dataset(
"counts/indices",
data.indices.shape,
data.indices.dtype,
data.indices,
)
data_file.create_dataset(
"counts/indptr", data.indptr.shape, data.indptr.dtype, data.indptr
)
data_file.create_dataset(
"counts/columns",
counts.columns.shape,
h5py.string_dtype(),
counts.columns.values,
)
data_file.create_dataset(
"counts/index", counts.index.shape, int, counts.index.astype(int)
)
data_file.create_dataset("image", image.shape, np.uint8, image)
data_file.create_dataset("label", label.shape, np.int16, label)
data_file.create_group("annotation", track_order=True)
for k, v in annotation.items():
data_file.create_dataset(f"annotation/{k}", v.shape, np.uint16, v)
data_file.create_dataset(
"type", data=type_label, dtype=h5py.string_dtype()
)
|
#!/bin/python3
# Idea: keep two heap, lower is max heap, higher is min heap.
import sys
from heapq import heappush as push, heappushpop as pushpop
class Spliter:
def __init__(self):
self.upper = []
self.lower = []
def median(self):
if len(self.upper) > len(self.lower):
return self.upper[0]
else:
return (self.upper[0] - self.lower[0]) / 2.
def add(self, value):
value = pushpop(self.upper, value)
value = -pushpop(self.lower, -value)
if len(self.upper) <= len(self.lower):
push(self.upper, value)
else:
push(self.lower, -value)
n = int(input().strip())
a = []
a_i = 0
s = Spliter()
for a_i in range(n):
a_t = int(input().strip())
s.add(a_t)
print(s.median()*1.0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_tg_react
------------
Tests for `tg_react` models module.
"""
import unittest
class TestTgReact(unittest.TestCase):
def test_something(self):
# TODO: Add more tests
pass
|
from django.db import models
from django.contrib.auth.models import User
class TaskModel(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField(null=True, blank=True)
complete = models.BooleanField(default=False)
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
def __str__(self):
if len(self.title) > 36:
return self.title[:36] + "..."
return self.title
class Meta:
order_with_respect_to = 'user'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Mar 6, 2013
@author: manuel
'''
from __future__ import print_function
from scipy.stats import poisson
from hmmlearn.hmm import _BaseHMM
import string , numpy as np
import sys
from time import time
from math import fabs
from help_hmm import _init, _add_pseudo_counts, _valid_posteriors
# import cProfile
# import trace
lookup_poisson = {}
lookup_state = {}
lookup_poisson_state = {}
lookup_denum = {}
def get_init_parameters(s1, s2, **info):
#get observation that occurs most often:
distr_magnitude = int(info['distr_magnitude'])
n_components = int(info['n_components'])
n_features = int(info['n_features'])
#emp_mean =[float(np.argmax(np.bincount(map(lambda x: x[0], s1)))), float(np.argmax(np.bincount(map(lambda x: x[1], s2)))) ]
emp_mean = [np.mean(map(lambda x: x[0], s1)), np.mean(map(lambda x: x[1], s2))]
initial_c = [[[1/float(info['distr_magnitude']) for _ in range(n_components)] for _ in range(distr_magnitude)] for _ in range(n_features)]
initial_p = [[[0 for _ in range(n_components)] for _ in range(distr_magnitude)] for _ in range(n_features)]
for dim in range(n_features):
for comp in range(distr_magnitude):
for state in range(n_components):
if state == 0:
background_value = max(1, emp_mean[dim] / 100.)
if comp == 0:
initial_p[dim][comp][state] = background_value
elif comp > 0:
initial_p[dim][comp][state] = (comp+1) * initial_p[dim][0][state]
indices = [1,2] if dim == 0 else [2,1]
if state > 0:
if comp == 0:
initial_p[dim][comp][indices[0]] = distr_magnitude * emp_mean[dim] / float(sum(range(1, distr_magnitude+1)))
initial_p[dim][comp][indices[1]] = initial_p[dim][comp][0]
elif comp > 0:
initial_p[dim][comp][state] = (comp+1) * initial_p[dim][0][state]
return np.array(initial_c, np.float64), np.array(initial_p, np.float64)
class PoissonHMM2d3s(_BaseHMM):
def __init__(self, distr_magnitude, factors, init_state_seq=None, p = [[[3, 2, 1], [12, 15, 20], [2, 1, 1]], [[3, 2, 3], [4, 2, 1], [15, 16, 18]]], \
c = [[[0.5, 0.4, 0.1], [0.5, 0.4, 0.1], [0.5, 0.4, 0.1]], [[0.5, 0.4, 0.1], [0.5, 0.4, 0.1], [0.5, 0.4, 0.1]]], n_components=3, covariance_type='diag', startprob=[1,0,0],
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self.c = c # 1) dim 2) component 3) state
self.p = p # 1) dim 2) component, parameter of Poisson distribution 3) state
self.n_features = 2 #emission dimension
self.init_state_seq = init_state_seq
self.distr_magnitude = distr_magnitude
self.factors = factors #weight of the posteriors
assert len(self.factors) == self.distr_magnitude
self.weights = 0 #weigths for merging distributions
def save_setup(self, tracker, n, p):
tracker.write(text=self.p, header='Poisson P')
tracker.write(text=self.c, header='Poisson C')
tracker.write(text=str(n), header='Poisson p-value settings (n, p)')
tracker.write(text=str(p))
tracker.write(text=self._get_transmat(), header="Transmission matrix")
tracker.write(text=self.distr_magnitude, header="Distribution magnitude")
def _compute_log_likelihood(self, X):
matrix = []
lookup = {}
for x in X:
row = []
for state in range(self.n_components): #state
res = 0
for dim in range(self.n_features): #dim
for comp in range(self.distr_magnitude):
index = (x[dim], self.p[dim][comp][state])
if lookup.has_key( index ):
res += lookup[index] * self.c[dim][comp][state]
else:
y = poisson.logpmf(x[dim], self.p[dim][comp][state])
#lookup[index] = y
res += y * self.c[dim][comp][state]
#print(y, self.c[dim][comp][state])
row.append(res)
#print(self.c)
#print(x, row)
matrix.append(row)
return np.asarray(matrix)
def _generate_sample_from_state(self, state, random_state=None):
res = []
for dim in range(self.n_features):
erg = round(sum([poisson.rvs(self.p[dim][comp][state]) * self.c[dim][comp][state] for comp in range(self.distr_magnitude)]))
res.append(erg)
return np.array(res)
def _initialize_sufficient_statistics(self):
stats = super(PoissonHMM2d3s, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['post_sum_l'] = np.zeros([self.n_features, self.distr_magnitude, self.n_components])
stats['post_sum_l_emisson'] = np.zeros([self.n_features, self.distr_magnitude, self.n_components])
stats['post_sum_l_factor'] = np.zeros([self.n_features, self.distr_magnitude, self.n_components])
stats['weights'] = [[[1, 1] for _ in range(self.n_components)] for _ in range(self.n_features)]
return stats
def _get_poisson(self, x, p):
if lookup_poisson.has_key((x, p)):
value_poisson = lookup_poisson[(x, p)]
else:
value_poisson = poisson.pmf(x, p)
lookup_poisson[(x, p)] = value_poisson
return value_poisson
def _get_value(self, state, symbol, dim):
help_i = [self.c[dim][i][state] for i in range(self.distr_magnitude)]
help_j = [self.p[dim][i][state] for i in range(self.distr_magnitude)]
index = (state, symbol[dim], tuple(help_i), tuple(help_j))
if index not in lookup_state:
res = 0
for comp in range(self.distr_magnitude):
res += self.c[dim][comp][state] * self._get_poisson(symbol[dim], self.p[dim][comp][state])
lookup_state[index] = res
return lookup_state[index]
def _help_accumulate_sufficient_statistics(self, obs, stats, posteriors):
posteriors = _valid_posteriors(posteriors, obs)
i = 0
print("run...! start at " + str(time()), file=sys.stderr)
for t, symbol in enumerate(obs):
stats['post'] += posteriors[t]
for dim in range(self.n_features):
for comp in range(self.distr_magnitude):
#lookup
index = (symbol[dim], tuple([self.p[dim][comp][state] for state in range(self.n_components)]))
if index not in lookup_poisson_state:
tmp = np.array([self._get_poisson(symbol[dim], self.p[dim][comp][state]) for state in range(self.n_components)])
lookup_poisson_state[index] = tmp
h = lookup_poisson_state[index]
enum = self.c[dim][comp] * h
denum = np.array([self._get_value(state, symbol, dim) for state in range(self.n_components)])
i += 1
try:
help = posteriors[t] * enum / _add_pseudo_counts(denum)
except:
print("%s \n" %i, file=sys.stderr)
print("%s %s %s \n" %(denum, symbol, dim), file=sys.stderr)
print("%s \n" %(self.c), file=sys.stderr)
print("%s \n" %(self.p), file=sys.stderr)
print("%s \n" %(posteriors[t]), file=sys.stderr)
print("%s \n" %(enum), file=sys.stderr)
help = np.array([1.0/self.distr_magnitude, 1.0/self.distr_magnitude, 1.0/self.distr_magnitude])
stats['post_sum_l'][dim][comp] += help
stats['post_sum_l_emisson'][dim][comp] += help * symbol[dim]
stats['post_sum_l_factor'][dim][comp] += help * self.factors[comp]
if posteriors[t][1] > 0.5 or posteriors[t][2] > 0.5:
if posteriors[t][1] >= posteriors[t][2]:
stats['weights'][dim][state][0] += 1
if posteriors[t][2] > posteriors[t][1]:
stats['weights'][dim][state][1] += 1
#print(self.p)
stats['posterior'] = np.copy(posteriors)
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(PoissonHMM2d3s, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
self._help_accumulate_sufficient_statistics(obs, stats, posteriors)
def _help_do_mstep(self, stats):
for dim in range(self.n_features):
for comp in range(self.distr_magnitude):
for state in range(self.n_components):
self.c[dim][comp][state] = stats['post_sum_l'][dim][comp][state] / _add_pseudo_counts(stats['post'][state])
# if comp == 0:
self.p[dim][comp][state] = stats['post_sum_l_emisson'][dim][comp][state] / (_add_pseudo_counts(stats['post_sum_l_factor'][dim][comp][state]))
self.p[dim][comp][state] = _add_pseudo_counts(self.p[dim][comp][state])
# else:
# self.p[dim][comp][state] = self.factors[comp] * self.p[dim][0][state]
self.merge_distr(stats['weights'])
def _do_mstep(self, stats, params):
super(PoissonHMM2d3s, self)._do_mstep(stats, params)
self._help_do_mstep(stats)
def get_mean(self, state, dim):
erg = 0
for comp in range(self.distr_magnitude):
erg += self.c[dim][comp][state] * self.p[dim][comp][state]
return erg
def merge_distr(self, weights):
for dim in range(self.n_features):
for comp in range(self.distr_magnitude):
for state1, state2 in [[1, 2], [2, 1], [0, 0]]:
dim2 = 1 if dim == 0 else 0
c1, c2 = weights[dim][state1][0], weights[dim][state2][1]
f = c1 / float(c1 + c2)
p_norm = self.p[dim2][comp][state2] + f * fabs(self.p[dim][comp][state1] - self.p[dim2][comp][state2])
c_norm = self.c[dim2][comp][state2] + f * fabs(self.c[dim][comp][state1] - self.c[dim2][comp][state2])
self.p[dim][comp][state1] = p_norm
self.p[dim2][comp][state2] = p_norm
self.c[dim][comp][state1] = c_norm
self.c[dim2][comp][state2] = c_norm
def test(c = 30, verbose=False):
res = [True] * 4
errors = 0.
#3 components
distr_magnitude = 3
factors = [1,2,3]
tmp1 = np.array([[[3, 12, 2], [2, 15, 1], [1, 20, 1]], [[3, 4, 15], [2, 2, 16], [3, 1, 18]]], np.float64)
c1 = np.array([[[0.2, 0.3, 0.4], [0.3, 0.4, 0.3], [0.5, 0.3, 0.3]], [[0.5, 0.4, 0.6], [0.4, 0.4, 0.3], [0.1, 0.2, 0.1]]], np.float64)
tmp2 = np.array([[[2, 10, 4], [2, 11, 3], [3, 14, 1]], [[1, 4, 14], [3, 3, 15], [2, 12, 20]]], np.float64)
c2 = np.array([[[0.1, 0.5, 0.3], [0.4, 0.3, 0.4], [0.5, 0.2, 0.3]], [[0.4, 0.3, 0.6], [0.4, 0.5, 0.3], [0.2, 0.2, 0.1]]], np.float64)
m = PoissonHMM2d3s(p=tmp1, c=c1, distr_magnitude=distr_magnitude, factors = factors)
X, Z = m.sample(c) #returns (obs, hidden_states)
m2 = PoissonHMM2d3s(p=tmp2, c=c2, distr_magnitude=distr_magnitude, factors = factors)
m2.fit([X])
e = m2.predict(X)
for i, el in enumerate(X):
if verbose:
print(el, Z[i], e[i], Z[i] == e[i], sep='\t', file=sys.stderr)
if Z[i] != e[i]:
res[distr_magnitude] = False
errors += 1
print("test", distr_magnitude, res[distr_magnitude], file=sys.stderr)
#2 components
distr_magnitude = 2
factors = [1,2]
tmp1 = np.array([[[1, 15, 2], [2, 16, 1]], [[3, 1, 15], [2, 1, 16]]], np.float64)
c1 = np.array([[[0.6, 0.7, 0.5], [0.4, 0.3, 0.5]], [[0.6, 0.8, 0.5], [0.4, 0.2, 0.5]]], np.float64)
tmp2 = np.array([[[2, 14, 1], [1, 14, 2]], [[4, 2, 17], [2, 2, 19]]], np.float64)
c2 = np.array([[[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]], np.float64)
m = PoissonHMM2d3s(p=tmp1, c=c1, distr_magnitude=distr_magnitude, factors = factors)
X, Z = m.sample(c) #returns (obs, hidden_states)
m2 = PoissonHMM2d3s(p=tmp2, c=c2, distr_magnitude=distr_magnitude, factors = factors)
m2.fit([X])
e = m2.predict(X)
for i, el in enumerate(X):
if verbose:
print(el, Z[i], e[i], Z[i] == e[i], sep='\t', file=sys.stderr)
if Z[i] != e[i]:
res[distr_magnitude] = False
errors += 1
print("test", distr_magnitude, res[distr_magnitude], file=sys.stderr)
#1 compnent
distr_magnitude = 1
factors = [1]
tmp1 = np.array([[[1, 15, 2]], [[2, 1, 16]]], np.float64)
c1 = np.array([[[1, 1, 1]], [[1, 1, 1]]], np.float64)
tmp2 = np.array([[[2, 14, 1]], [[4, 2, 17]]], np.float64)
c2 = np.array([[[1, 1, 1]], [[1, 1, 1]]], np.float64)
m = PoissonHMM2d3s(p=tmp1, c=c1, distr_magnitude=distr_magnitude, factors = factors)
X, Z = m.sample(c) #returns (obs, hidden_states)
m2 = PoissonHMM2d3s(p=tmp2, c=c2, distr_magnitude=distr_magnitude, factors = factors)
m2.fit([X])
e = m2.predict(X)
for i, el in enumerate(X):
if verbose:
print(el, Z[i], e[i], Z[i] == e[i], sep='\t', file=sys.stderr)
if Z[i] != e[i]:
res[distr_magnitude] = False
errors += 1
print("test", distr_magnitude, res[distr_magnitude], file=sys.stderr)
res = res[1:]
return res, errors/c
if __name__ == '__main__':
print(test(c=100, verbose=True))
#c, p = get_init_parameters([(10,1), (50,3), (50,2), (40,2)], [(0,10),(2,12),(10,16)], distr_magnitude=3, n_components=3, n_features=2)
#print(p)
#3 components
c= 100
distr_magnitude = 3
factors = map(lambda x: x/(float(distr_magnitude)), [1,2,3])
#factors = np.array(factors) + np.array([2/float(distr_magnitude)]*3)
#factors = map(lambda x: x*float(distr_magnitude), [1,2,3])
#factors[0]=1
#factors = np.array(factors) + np.array([2/float(distr_magnitude)]*3)
# factors = [1,2,3]
#
#
# tmp1 = np.array([[[3, 12, 2], [2, 15, 1], [1, 20, 1]], [[3, 4, 15], [2, 2, 16], [3, 1, 18]]], np.float64)
# c1 = np.array([[[0.2, 0.3, 0.4], [0.3, 0.4, 0.3], [0.5, 0.3, 0.3]], [[0.5, 0.4, 0.6], [0.4, 0.4, 0.3], [0.1, 0.2, 0.1]]], np.float64)
#
# tmp2 = np.array([[[2, 10, 4], [2, 11, 3], [3, 14, 1]], [[1, 4, 14], [3, 3, 15], [2, 12, 20]]], np.float64)
# c2 = np.array([[[0.1, 0.5, 0.3], [0.4, 0.3, 0.4], [0.5, 0.2, 0.3]], [[0.4, 0.3, 0.6], [0.4, 0.5, 0.3], [0.2, 0.2, 0.1]]], np.float64)
#
# m = PoissonHMM2d3s(p=tmp1, c=c1, distr_magnitude=distr_magnitude, factors = [1,2,3])
#
# X, Z = m.sample(c) #returns (obs, hidden_states)
# m2 = PoissonHMM2d3s(p=tmp2, c=c2, distr_magnitude=distr_magnitude, factors = factors)
#
# m2.fit([X])
#
# e = m2.predict(X)
# s1 = []
# s2 = []
# for i, el in enumerate(X):
# if e[i] == 1:
# s1.append(el[0])
# s2.append(el[1])
#
# #print(el, Z[i], e[i], Z[i] == e[i], sep='\t', file=sys.stderr)
# print(np.mean(s1), np.mean(s2))
# print(m2.c)
# print(m2.p)
|
from scipy.io import netcdf
import numpy as np
# for skua - TODO
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import matplotlib.ticker as ticker
from argparse import ArgumentParser
import os
import pdb
# names of variable to plot
Variable_name_l = ["theta", "vapor", "RH", "number_od_SDs", "aerosol_number", "w", "dtheta_mphys", "dqv_mphys", "cloud_number_r20um", "rain_number_r20um", "cloud_mass_r20um", "rain_mass_r20um"]
prsr = ArgumentParser(add_help=True, description='TODO')
prsr.add_argument('--outdir', default="", help='output directory from kid_a_setup/output')
prsr.add_argument('--it_l', nargs='+', type=int, required=None, help='time indexes for plotting, e.g., --it_l 0 2 -1 ')
args = prsr.parse_args()
# reading variables from the netcdf file
def reading_netcdf(netcdf_file, var_l):
var_d = {}
for var in var_l + ["z", "x", "time"]:
var_d[var] = netcdf_file.variables[var][:]
return var_d
def contour_plot(outdir_path, var_name_l, var_d, it, nr_fig):
fig = plt.figure(nr_fig, figsize = (8,8))
fig.suptitle("time = " + str(var_d["time"][it]/60) + " min")
x_range = var_d["x"][:]
z_range = var_d["z"][1:]
#pdb.set_trace()
X, Y = np.meshgrid(x_range, z_range)
nr_pl = 1
for var in var_name_l:
print var
ax = plt.subplot(2,2,nr_pl)
var_domain = var_d[var][it,1:,:]
var_min, var_max = var_domain.min(), var_domain.max()
if var_min < var_max:
if var_min == 0.:
levels_var = np.linspace(var_max * 0.1, var_max, 6)
else:
levels_var = np.linspace(var_min, var_max, 6)
print levels_var
CS = plt.contourf(X, Y, var_domain, cmap=plt.cm.Blues, alpha=0.7, levels=levels_var)
plt.xlabel(var + "; min = " + '%s' % float('%.3g' % var_domain.min()) +
", max = " + '%s' % float('%.3g' % var_domain.max()) + "\n" +
"min_level = " + '%s' % float('%.3g' % levels_var[0]),
fontsize=10)
if nr_pl in [1, 3]:
plt.ylabel(r'height $[m]$', fontsize=10)
nr_pl += 1
plt.savefig(os.path.join(outdir_path, "contour_" + str(var_d["time"][it]) + "-".join(var_name_l) + ".pdf"))
plt.show()
def main(outdir_path, filename="SC_2D_out.nc", variable_name_l=Variable_name_l):
nf = netcdf.netcdf_file(os.path.join(outdir_path, filename), 'r')
var_d = reading_netcdf(nf, variable_name_l)
variable_name_lpl = [variable_name_l[i:i+4] for i in xrange(0, len(variable_name_l), 4)]
it_l = args.it_l if args.it_l!=None else range(0, var_d["time"].shape[0], 12)
nr_fig = 1
for it in it_l:
for var_name in variable_name_lpl:
contour_plot(outdir_path, var_name, var_d, it, nr_fig)
nr_fig += 1
main(os.path.join("../kid_a_setup/output", args.outdir))
|
import math
s = int(raw_input('ENTER THE VALUE OF N: '))
n = s + 1
def cat(n):
if n == 0:
return 1
else:
return (4*n - 2)/( n + 1 ) * cat(n-1)
def print_cat(n):
c1, c2 = 1, 1
while c1 <= cat(n):
print(c1)
c1, c2 = c2, (4*n - 2)/ ( n + 1 ) * c2
print print_cat(n)
|
import sys
import os
import string
import glob
import optparse
import shlex
import time
import getopt
import shutil
import fnmatch
import cmd
import django
from os.path import join
from datetime import datetime, timedelta
from django.utils import timezone
sys.path.append(os.path.abspath('..'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae2.settings')
django.setup()
from django.conf import settings
from fae2.settings import APP_DIR
from django.db import models
from reports.models import WebsiteReport
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
DEBUG=False
INFO=True
def debug(s):
if DEBUG:
print("[ARCHIVED REPORTS][DEBUG]: " + s)
def info(s):
if INFO:
print("[ARCHIVED REPORTS][INFO]: " + s)
def error(s):
print("[ARCHIVED REPORTS][**ERROR**]: " + s)
def main():
message_flag = True
while True:
ws_eval_results = []
now = timezone.now();
error_date = now - timedelta(days=1)
delete_date = now - timedelta(days=2)
info("=================================================")
info(" Now: " + str(now))
info(" Error date: " + str(error_date ))
info(" Delete date: " + str(delete_date))
info("=================================================")
for user in User.objects.all():
if user.username == 'anonymous':
continue
else:
# Delete any reports that had errors
try:
ws_reports = WebsiteReport.objects.filter(user=user, created__lt=error_date, status='E')
if len(ws_reports):
for wsr in ws_reports:
try:
wsr.delete()
except:
error("Error deleted: " + str(wsr))
except:
error("Error accessing the database for status='E'")
# Delete the oldest reports over the saved limit
try:
ws_reports = WebsiteReport.objects.filter(user=user, created__lt=delete_date, status='C').exclude(archive=True).order_by('-created')
except:
error("Error accessing the database for status='E' and archived=True ")
profile = False
try:
profile = user.profile
except:
error("Error accessing profile: " + str(user))
if profile:
max = profile.max_saved
num = len(ws_reports)
diff = num-max
info(" User " + str(user) + " " + str(num) + " unsaved reports old enough for deletion (Buffer " + str(max) + " reports)")
if diff:
i = 0;
for wsr in ws_reports:
if i < diff:
try:
wsr.set_status_summary()
except:
error("Error setting website report to summary: " + str(wsr))
i = i + 1
if DEBUG:
time.sleep(20) # wait 20 seconds between checks if in DEBUG mode
else:
time.sleep(43200) # wait 12 hours between checks
if __name__ == "__main__":
main()
|
import sys
import BayesDB
classifier = sys.argv[1].strip()
if classifier == 'NONE':
classifier = None
training_data = open(sys.argv[2], 'r')
db_file = sys.argv[3]
db = BayesDB.createOrLoad(db_file)
docs = 0
for line in training_data:
line = line.strip()
## assume non-trivial sanitisation done upstream
if line == "":
continue
db.addDoc(line, classifier)
docs+=1
print 'Classified %d docs as %s' % (docs, classifier)
print 'Writing DB file %s ...' % db_file
db.dump()
print 'New DB state:'
db.describe()
print 'Done'
|
from flask import Flask, request, render_template
from werkzeug.exceptions import NotFound
from cart_views import cart_app
from products_views import products_app
app = Flask(__name__)
app.register_blueprint(products_app, url_prefix='/products/')
app.register_blueprint(cart_app, url_prefix='/cart/')
@app.route('/')
@app.route('/<int:user_id>')
@app.route('/<name>', methods=['GET', 'POST', 'PUT'])
def index_page(name=None, user_id=None):
if request.method == 'GET':
# justname = name if name else 'DefaultName'
justname = name or "DefaultName"
response = render_template('index.html',
name='World',
args=request.args,
products=["apple", "banana"],
justname=justname,
user_id=user_id)
return response
return f'hello {request.method} request'
@app.route('/404/')
def not_found():
raise NotFound
app.run('localhost', 8000, debug=True)
|
#!/usr/bin/env python3
# encoding: utf-8
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--feat', type=str)
parser.add_argument('--num_frames', type=str)
parser.add_argument('--trans', type=str)
parser.add_argument('--tokens', type=str)
parser.add_argument('--phones', type=str, default=None)
parser.add_argument('--output', type=str)
args = parser.parse_args()
uttid2feat = {}
with open(args.feat) as f:
for line in f:
uttid, feat = line.strip().split()
uttid2feat[uttid] = feat
uttid2num_frames = {}
with open(args.num_frames) as f:
for line in f:
uttid, num_frames = line.strip().split()
uttid2num_frames[uttid] = int(num_frames)
uttid2trans = {}
with open(args.trans) as f:
for line in f:
try:
uttid, trans = line.strip().split(maxsplit=1)
uttid2trans[uttid] = trans
except:
pass
if args.tokens:
uttid2tokens = {}
with open(args.tokens) as f:
for line in f:
try:
uttid, tokens = line.strip().split(maxsplit=1)
uttid2tokens[uttid] = tokens
except:
pass
if args.phones:
uttid2phones = {}
with open(args.phones) as f:
for line in f:
try:
uttid, phones = line.strip().split(maxsplit=1)
uttid2phones[uttid] = phones
except:
pass
samples = []
for i, uttid in enumerate(uttid2feat.keys()):
try:
sample = {}
sample['uttid'] = uttid
sample['feat'] = uttid2feat[uttid]
sample['feat_length'] = uttid2num_frames[uttid]
if args.tokens:
sample['tokens'] = uttid2tokens[uttid]
sample['token_length'] = len(sample['tokens'].split())
else:
sample['tokens'] = ' '.join(uttid2trans[uttid])
sample['token_length'] = len(sample['tokens'])
if args.phones:
sample['phones'] = uttid2phones[uttid]
sample['phone_length'] = len(sample['phones'].split())
sample['trans'] = uttid2trans[uttid]
except:
print('skip', uttid)
continue
samples.append(sample)
print('saved {}/{} samples'.format(len(samples), i+1))
jsonstring = json.dumps(samples, indent=2, ensure_ascii=False)
with open(args.output, 'w') as fw:
fw.write(jsonstring)
|
import json
import os
import pickle
import sys
import model
# Keep track of the last loaded version of each channel
MEMORY = 'pickle.db'
try:
with open(MEMORY, 'rb') as f:
memory = pickle.load(f)
except IOError:
memory = {}
def get_last_loaded(channel):
return memory.get(channel)
def set_last_loaded(channel, last_loaded):
memory[channel] = last_loaded
def save_memory():
with open(MEMORY, 'wb') as f:
pickle.dump(memory, f)
session = model.get_session()
def get_channel_name_and_date(filename):
channel = os.path.split(os.path.split(filename)[0])[1]
date = os.path.split(filename)[1].split('.json')[0]
return channel, date
def parse_file(filename):
with open(filename, 'rb') as f:
data = json.loads(f.read())
for item in data:
"""
{
"type": "message",
"user": "U02FVR4ND",
"text": "isaac: we're heading to dinner around 7pm",
"ts": "1409746135.000671"
}
"""
# Only messages
if item['type'] != 'message':
continue
# Skip bots
if 'bot_id' in item:
continue
# Ignore edits
if 'subtype' in item:
continue
words = item['text'].split()
for i in xrange(len(words) + 1):
user = item['user']
word_prev = words[i - 1].lower()[:254] if i > 0 else ''
word_next = words[i].lower()[:254] if i < len(words) else ''
word_entry = session.query(model.WordEntry).filter(
model.WordEntry.user==user,
model.WordEntry.word_prev==word_prev,
model.WordEntry.word_next==word_next).first()
if not word_entry:
word_entry = model.WordEntry()
word_entry.user = user
word_entry.word_prev = word_prev
word_entry.word_next = word_next
word_entry.count = 0
word_entry.count += 1
session.add(word_entry)
#two word combos
for i, word_next in enumerate(words):
word_next = word_next.lower()[:254]
if i<2:
continue
word_prev = '%s %s' % (words[i-2].lower()[:254], words[i-1].lower()[:254] )
word_entry = session.query(model.WordEntry).filter(
model.WordEntry.user==user,
model.WordEntry.word_prev==word_prev,
model.WordEntry.word_next==word_next).first()
if not word_entry:
word_entry = model.WordEntry()
word_entry.user = user
word_entry.word_prev = word_prev
word_entry.word_next = word_next
word_entry.count = 0
word_entry.count += 1
session.add(word_entry)
session.commit()
if __name__ == '__main__':
filenames = sys.argv[1:]
for filename in filenames:
channel, date = get_channel_name_and_date(filename)
last_loaded = get_last_loaded(channel)
if last_loaded >= date:
print "Skipping file: {}. Already loaded up to {} for channel {}"\
.format(filename, last_loaded, channel)
else:
print "Parsing file: {}".format(filename)
try:
parse_file(filename)
except Exception as e:
# possible errors when previous parsing was interrupted
print "Found error. Doing a session rollback"
print e
session.rollback()
continue
set_last_loaded(channel, date)
save_memory()
|
'''
Created on 6 feb. 2021
@author: David
'''
from pyb import Pin
from micropython import schedule
class Stepper(object):
'''
Counts signals received on a pin and counts them.
Also rises an event on a concrete step.
'''
def __init__(self, pin, pullMode=None, stepLevel=1):
'''
Constructor
@param pin: Pin where the signal is received
@param pullMode: (default: None) specifies if the pin has a (weak) pull resistor attached.
@param stepLevel: value on the pin indicating a step (values: 0, 1)
@see http://docs.micropython.org/en/latest/library/machine.Pin.html?highlight=pin#machine.Pin
'''
self._pin = Pin(pin, Pin.IN, pullMode)
self._state = 0
self._counter = 0
self._stepTrigger = 0
assert stepLevel in (0,1)
self._stepLevel = stepLevel
self._callback = None
self._callbackArgs = None
def setCallback(self, callback, args=None):
'''
Set the callback when the trigger step is reached
@param callback: Callback method with at least one parameter: this object itself
@param args: (optional) additional object passed to the callback
@return: self
'''
self._callback = callback
self._callbackArgs = args
return self
def setStepTrigger(self, stepTrigger):
'''
Set the step trigger
@param stepTrigger: Positive integer. On this step the callback will be called
@return: self
'''
self._stepTrigger = stepTrigger
return self
def resetCount(self):
'''
Set the step counter to zero
@return: self
'''
self._counter = 0
return self
def startCounting(self):
'''
Starts to count steps
@return: self
'''
self._state = self._pin.value()
self._pin.irq(self._onStep, Pin.IRQ_RISING if self._stepLevel == 1 else Pin.IRQ_FALLING)
return self
def stopCounting(self):
'''
Stops to count steps
'''
self._pin.irq(None)
def steps(self):
'''
@return: Number of steps after starting or reset
'''
return self._counter
def _execCallback(self):
'''
Executes the callback
'''
if self._callback:
if self._callbackArgs:
self._callback(self, self._callbackArgs)
else:
self._callback(self)
def _onStep(self, pin):
'''
Counts steps. It's used on the pin's IRQ
@param pin: The pin which received the signal
'''
if pin.value() != self._state:
self._state = pin.value()
if self._state == self._stepLevel:
self._counter+=1
if self._counter == self._stepTrigger:
schedule(Stepper._execCallback, self)
|
#!/usr/bin/env python
# wxPython module
import wx
# Matplotlib Figure object
from matplotlib.figure import Figure
# Numpy functions for image creation
import numpy as np
# import the WxAgg FigureCanvas object, that binds Figure to
# WxAgg backend. In this case, this is also a wxPanel
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
# import the NavigationToolbar WxAgg widget
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
class MplCanvasFrame(wx.Frame):
"""Class to represent a Matplotlib Figure as a wxFrame"""
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY,
title='Matplotlib Figure with Navigation Toolbar', size=(600, 400))
# usual Matplotlib functions
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
x = np.arange(0, 6, .01)
y = np.sin(x**2)*np.exp(-x)
self.axes.plot(x, y)
# initialize the FigureCanvas, mapping the figure to
# the WxAgg backend
self.canvas = FigureCanvas(self, wx.ID_ANY, self.figure)
# create an BoxSizer, to define the layout of our window
self.sizer = wx.BoxSizer(wx.VERTICAL)
# add the figure canvas
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.EXPAND)
# instantiate the Navigation Toolbar
self.toolbar = NavigationToolbar2Wx(self.canvas)
# needed to support Windows systems
self.toolbar.Realize()
# add it to the sizer
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# explicitly show the toolbar
self.toolbar.Show()
# sets the window to have the given layout sizer
self.SetSizer(self.sizer)
# adapt sub-widget sizes to fit the window size,
# following sizer specification
self.Fit()
class MplApp(wx.App):
"""Define customized wxApp for MplCanvasFrame"""
def OnInit(self):
# instantiate our custom wxFrame
frame = MplCanvasFrame()
# set it at the top-level window
self.SetTopWindow(frame)
# show it
frame.Show(True)
# return True to continue processing
return True
# we instantiate our wxApp class
mplapp = MplApp(False)
# and start the main loop
mplapp.MainLoop()
|
from django.shortcuts import render, render_to_response
from django.http.response import HttpResponseRedirect, HttpResponse
from django.template import loader
from django.template.context import RequestContext
# Create your views here.
def main(request):
return render_to_response('main.html')
def setos(request):
#print('setos')
if "favorite_os" in request.GET:
request.session['f_os'] = request.GET['favorite_os']
return HttpResponseRedirect('/showos')
else:
return render_to_response('setos.html')
def showos(request):
context = {}
if 'f_os' in request.session:
context['f_os'] = request.session['f_os']
context['message'] = '선택하신 운영체제는 %s'%request.session['f_os']
else:
context['f_os'] = None
context['message'] = '운영체제를 선택하세요'
template = loader.get_template('showos.html')
data = RequestContext(request, context)
request.session.set_expiry(5)
return HttpResponse(template.render(data), request)
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2012 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv, fields
class purchase_order_type(osv.osv):
_name = 'purchase.order.type'
_description = 'Purchase Order Type'
_columns = {
'name':fields.char('Type', size=64, readonly=False, required=True, translate=True, select=True, help="Type of Purchase Order"),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'sequence_id': fields.many2one('ir.sequence', 'Sequence', readonly=False, required=True, help="Sequence to be used by the Purchase Order Type", domain=[("code", "=", "purchase.order.type")]),
}
_defaults = {
'company_id' : lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order.type', context=context),
}
purchase_order_type()
|
# Generated by Django 2.2.8 on 2019-12-09 21:44
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('inventory', '0003_artcomments_articledetails'),
]
operations = [
migrations.RenameModel(
old_name='artComments',
new_name='ArticleComments',
),
]
|
from dal import autocomplete
from .models import Location
from django.db.models import Q
class LocationAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = Location.objects.all()
if self.q:
qs = qs.filter(Q(city__istartswith=self.q) | Q(state__istartswith=self.q))
return qs
|
import cv2
import numpy as np
def contours_by_mat(src_bank):
contours = cv2.Canny(src_bank,100,200)
return contours;
def tresh_contours_by_length(contours,min_length,max_length):
threshed = []
for i in contours:
if min_length <= len(contours[i]) <= max_length:
threshed.append(contours[i])
return threshed
def rects_by_contours(contours):
rects=[]
for i in len(contours):
rects.append(contours[i])
return rects
def draw_rects(src_image,rects[]):
rected=[]
rng=RNG(12345)
for i in range(0,rects.size()):
color = Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255))
rectangle(rected, rects[i].tl(), rects[i].br(), color, 2, 8, 0)
return rected
def submatrixes_by_rects(src_bank,rects):
subm=[]
#дописать
return subm
def detect_submatrixes():
submatr[0]
return submatr
capture = cv2.VideoCapture(0)
while(True):
src = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
detect_submatrixes(
submatrixes_by_rects(
src,rects_by_contours(
contours_by_mat()
)
)
)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
from . import sales_report_export
from . import sales_report_export_by_invoice
from . import stock_pick_item_report
from . import sales_report_outstock_item
from . import stock_picking_return
from . import customer_statement
from . import sales_delete_posted
|
import matplotlib.pyplot as plt
import numpy as np
import os
# Contains functions to plot/save information
def plot_perf(algs, save_dir=None, rm_mode=False):
if save_dir is not None:
try:
os.mkdir(save_dir)
except OSError:
pass
def save_or_show(name, save_dir, rm_mode):
if save_dir is None:
plt.show()
else:
path = save_dir + '/' + name + '.png'
if not rm_mode and os.path.exists(path):
raise ValueError('Directory already contains files, aborting')
else:
plt.savefig(path)
plt.figure()
# Histogram of number of connected components
if algs[0].observe_full:
hists = [np.histogram(alg.n_comps)[0] for alg in algs]
bin_edges = [np.histogram(alg.n_comps)[1][:-1] for alg in algs]
mean = np.mean(hists, axis=0)
std = np.std(hists, axis=0)
xs = np.mean(bin_edges, axis=0)
plt.plot(xs, mean)
plt.fill_between(xs, mean - 2 * std, mean + 2 * std, alpha=0.15)
plt.xlabel('Size of the connected components')
plt.ylabel('Number of times value is observed')
plt.title('Histogram of number of connected components')
save_or_show('component_sizes_histogram', save_dir, rm_mode)
# Histogram of number of times action was taken
Ns = np.array([alg.get_N() for alg in algs])
mean = np.mean(Ns, axis=0)
std = np.std(Ns, axis=0)
xs = range(mean.shape[0])
plt.xlabel('Action (node)')
plt.ylabel('Number of times chosen')
plt.plot(xs, mean)
plt.fill_between(xs, mean - 2 * std, mean + 2 * std, alpha=0.15)
plt.title('Histogram of number of times action was taken')
save_or_show('N_histogram', save_dir, rm_mode)
# Mean performances
if algs[0].observe_full:
f, axs = plt.subplots(2, 2)
attr_list = ['degree_regret', 'alpha_degree_regret', 'real_regret', 'alpha_real_regret']
names = ['degree', 'alpha_degree', 'real', 'alpha_real']
ax_list = axs.flatten()
else:
f, ax_list = plt.subplots(1, 2)
attr_list = ['degree_regret', 'alpha_degree_regret']
names = ['degree', 'alpha_degree']
data = [np.array([getattr(alg, attr) for alg in algs]) for attr in attr_list]
for ax, dat, name in zip(ax_list, data, names):
mean = np.mean(dat, axis=0)
std = np.std(dat, axis=0)
xs = range(mean.shape[0])
ax.plot(xs, mean)
ax.fill_between(xs, mean - 2 * std, mean + 2 * std, alpha=0.15)
ax.set_xlabel('Time')
ax.set_ylabel('Regret')
ax.set_title(name)
save_or_show('mean_performances', save_dir, rm_mode)
# Each performances
if algs[0].observe_full:
f, axs = plt.subplots(2, 2)
attr_list = ['degree_regret', 'alpha_degree_regret', 'real_regret', 'alpha_real_regret']
names = ['degree', 'alpha_degree', 'real', 'alpha_real']
ax_list = axs.flatten()
else:
f, ax_list = plt.subplots(1, 2)
attr_list = ['degree_regret', 'alpha_degree_regret']
names = ['degree', 'alpha_degree']
data = [np.array([getattr(alg, attr) for alg in algs]) for attr in attr_list]
for ax, dat, name in zip(ax_list, data, names):
xs = range(dat.shape[1])
for curv in dat:
ax.plot(xs, curv)
ax.set_xlabel('Time')
ax.set_ylabel('Regret')
ax.set_title(name)
save_or_show('each_performances', save_dir, rm_mode)
# Mean estimators:
if type(algs[0]).__name__ == 'DTS':
f, ax_list = plt.subplots(2, 1)
attr_list = ['k', 'beta']
names = ['K posterior', 'Beta posterior']
elif type(algs[0]).__name__ == 'DUCB':
f, ax_list = plt.subplots(1, 1)
attr_list = ['mu']
names = ['mu']
ax_list = [ax_list]
else:
raise ValueError('Unknown class type: ' + type(algs[0]).__name__)
data = np.zeros((len(attr_list), len(algs), algs[0].graph.n))
count = np.zeros((algs[0].graph.n))
for alg in algs:
count[alg.V0] += 1
for attr, dat in zip(attr_list, data):
for i,alg in enumerate(algs):
dat[i, alg.V0] = getattr(alg, attr)
for ax, dat, name in zip(ax_list, data, names):
mean = np.sum(dat, axis=0) / count
std = np.std(dat, axis=0)
xs = range(mean.shape[0])
ax.plot(xs, mean)
ax.fill_between(xs, mean - 2 * std, mean + 2 * std, alpha=0.15)
ax.set_xlabel('Action (Node)')
ax.set_ylabel('Value of estimator')
ax.set_title(name)
save_or_show('mean_estimators', save_dir, rm_mode)
# Each estimators:
if type(algs[0]).__name__ == 'DTS':
f, ax_list = plt.subplots(2, 1)
attr_list = ['k', 'beta']
names = ['K posterior', 'Beta posterior']
elif type(algs[0]).__name__ == 'DUCB':
f, ax_list = plt.subplots(1, 1)
attr_list = ['mu']
names = ['mu']
ax_list = [ax_list]
else:
raise ValueError('Unknown class type: ' + type(algs[0]).__name__)
data = np.zeros((len(attr_list), len(algs), algs[0].graph.n))
# data[:,:,:] = np.nan
for attr, dat in zip(attr_list, data):
for i,alg in enumerate(algs):
dat[i, alg.V0] = getattr(alg, attr)
for ax, dat, name in zip(ax_list, data, names):
xs = range(dat.shape[1])
for curve in dat:
ax.plot(xs, curve, '.')
ax.set_xlabel('Action (Node)')
ax.set_ylabel('Value of estimator')
ax.set_title(name)
save_or_show('each_estimators', save_dir, rm_mode)
# Each V0:
f, axs = plt.subplots(1, 2)
count = np.zeros((len(algs), algs[0].graph.n))
for i,alg in enumerate(algs):
count[i, alg.V0] = 1
for cnt in count:
axs[0].plot(range(count.shape[1]), cnt, '.')
axs[0].set_title('Nodes in V0 for each rep')
axs[0].set_xlabel('Action (Node)')
axs[0].set_ylabel('1=in V0, 0=not in V0')
axs[1].plot(range(count.shape[1]), count.sum(axis=0), '.')
axs[1].set_xlabel('Action (Node)')
axs[1].set_ylabel('Number of times node in V0')
axs[1].set_title('Number of times node in V0 overall')
save_or_show('count', save_dir, rm_mode)
# Prior:
if type(algs[0]).__name__ == 'DTS':
from scipy.stats import gamma
k, b = algs[0].prior_k, algs[0].prior_beta
xs = np.linspace(k/b-1, k/b+1, 1000)
f, ax = plt.subplots(1,1)
ax.plot(xs, gamma.pdf(xs, a=k, scale=1/b))
ax.set_xlabel('Action (Node)')
ax.set_ylabel('Prior density')
save_or_show('prior', save_dir, rm_mode)
k, b = algs[0].k, algs[0].beta
xs = np.linspace(k / b - 1, k / b + 1, 1000)
f, ax = plt.subplots(1, 1)
ax.plot(xs, gamma.pdf(xs, a=k, scale=1 / b))
ax.set_xlabel('Action (Node)')
ax.set_ylabel('Posterior density')
save_or_show('posterior', save_dir, rm_mode)
|
"""
https://www.python-lernen.de/kommentare.htm
Hier kommt auskommentierter Bereich
der über mehrere Zeilen sich zieht
"""
# hier kommt ein Kommentar |
n=3
max_w =8
w =[3, 4, 5]
v =[30, 50, 60]
"""
n, max_w = map(int, input().split())
w = []
v = []
for i in range(n):
a, b = map(int, input().split())
w.append(a)
v.append(b)
"""
dp = [[0 for _ in range(max_w+1)] for _ in range(n+2)]
for i in range(1, n+1):
for j in range(max_w+1):
dp[i][j] = max(dp[i][j], dp[i-1][j])
if j >= w[i-1]:
dp[i][j] = max(dp[i][j], dp[i-1][j-w[i-1]]+v[i-1])
print(max(dp[n])) |
import numpy as np
from scipy import ndimage as ndi
from skimage.transform import resize
from .utils import get_crop_pad_sequence
def resize_image(image, target_size):
"""Resize image to target size
Args:
image (numpy.ndarray): Image of shape (C x H x W).
target_size (tuple): Target size (H, W).
Returns:
numpy.ndarray: Resized image of shape (C x H x W).
"""
n_channels = image.shape[0]
resized_image = resize(image, (n_channels, target_size[0], target_size[1]), mode='constant')
return resized_image
def crop_image(image, target_size):
"""Crop image to target size. Image cropped symmetrically.
Args:
image (numpy.ndarray): Image of shape (C x H x W).
target_size (tuple): Target size (H, W).
Returns:
numpy.ndarray: Cropped image of shape (C x H x W).
"""
top_crop, right_crop, bottom_crop, left_crop = get_crop_pad_sequence(image.shape[1] - target_size[0],
image.shape[2] - target_size[1])
cropped_image = image[:, top_crop:image.shape[1] - bottom_crop, left_crop:image.shape[2] - right_crop]
return cropped_image
def binarize(image, threshold):
image_binarized = (image[1, :, :] > threshold).astype(np.uint8)
return image_binarized
|
import unittest
import numpy as np
from PCAfold import preprocess
from PCAfold import reduction
from PCAfold import PCA
from PCAfold import PreProcessing
from PCAfold import KernelDensity
class TestManipulation(unittest.TestCase):
def test_center_scale_all_possible_C_and_D(self):
test_data_set = np.random.rand(100,20)
# Instantiations that should work:
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=True)
except Exception:
self.assertTrue(False)
def test_center_scale_on_0D_variable(self):
test_0D_variable = np.random.rand(100,)
# Instantiations that should work:
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'none', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'auto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'std', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'pareto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'range', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, '-1to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'level', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'max', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'poisson', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast_2', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast_3', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast_4', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'none', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'auto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'std', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'pareto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'range', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, '-1to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'level', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'max', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'poisson', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast_2', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast_3', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_0D_variable, 'vast_4', nocenter=True)
except Exception:
self.assertTrue(False)
def test_center_scale_on_1D_variable(self):
test_1D_variable = np.random.rand(100,1)
# Instantiations that should work:
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'none', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'auto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'std', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'pareto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'range', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, '-1to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'level', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'max', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'poisson', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_2', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_3', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_4', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'none', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'auto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'std', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'pareto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'range', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, '-1to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'level', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'max', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'poisson', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_2', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_3', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_4', nocenter=True)
except Exception:
self.assertTrue(False)
def test_center_scale_MinusOneToOne(self):
tolerance = 10**-10
try:
test_data_set = np.random.rand(100,10)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,10):
self.assertTrue((np.min(X_cs[:,i]) > (-1 - tolerance)) and (np.min(X_cs[:,i]) < -1 + tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(1000,)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.min(X_cs[:,i]) > (-1 - tolerance)) and (np.min(X_cs[:,i]) < -1 + tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(2000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.min(X_cs[:,i]) > (-1 - tolerance)) and (np.min(X_cs[:,i]) < -1 + tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(100,10)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,10):
self.assertTrue((np.max(X_cs[:,i]) > (1 - tolerance)) and (np.max(X_cs[:,i]) < (1 + tolerance)))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(1000,)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.max(X_cs[:,i]) > (1 - tolerance)) and (np.max(X_cs[:,i]) < (1 + tolerance)))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(2000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.max(X_cs[:,i]) > (1 - tolerance)) and (np.max(X_cs[:,i]) < (1 + tolerance)))
except Exception:
self.assertTrue(False)
def test_center_scale_C_and_D_properties(self):
# This function tests if known properties of centers or scales hold:
test_data_set = np.random.rand(100,20)
means = np.mean(test_data_set, axis=0)
stds = np.std(test_data_set, axis=0)
zeros = np.zeros((20,))
ones = np.ones((20,))
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=False)
comparison = X_center == means
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=True)
difference = abs(X_scale - stds)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
comparison = X_center == zeros
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=False)
comparison = X_scale == ones
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
def test_invert_center_scale(self):
# This function tests all possible inversions of center_scale function:
test_data_set = np.random.rand(200,20)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
def test_invert_center_scale_on_0D_variable(self):
# This function tests all possible inversions of center_scale function:
test_data_set = np.random.rand(200,)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X.ravel() - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
def test_invert_center_scale_on_1D_variable(self):
# This function tests all possible inversions of center_scale function:
test_data_set = np.random.rand(200,1)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=False)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=True)
X = preprocess.invert_center_scale(X_cs, X_center, X_scale)
difference = abs(X - test_data_set)
comparison = difference < 10**(-14)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
def test_invert_center_scale_single_variable(self):
try:
test_data_set = np.ones((200,))
X_result = 2*np.ones((200,))
X = preprocess.invert_center_scale(test_data_set, 0, 2)
comparison = X == X_result
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
test_data_set = np.ones((200,))
X_result = 3*np.ones((200,))
X = preprocess.invert_center_scale(test_data_set, 1, 2)
comparison = X == X_result
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
test_data_set = np.ones((200,1))
X_result = 2*np.ones((200,))
X = preprocess.invert_center_scale(test_data_set, 0, 2)
comparison = X == X_result
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
test_data_set = np.ones((200,1))
X_result = 3*np.ones((200,))
X = preprocess.invert_center_scale(test_data_set, 1, 2)
comparison = X == X_result
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
def test_remove_constant_vars(self):
test_data_set = np.random.rand(100,20)
try:
# Inject two constant columns:
test_data_set_constant = np.hstack((test_data_set[:,0:3], 2.4*np.ones((100,1)), test_data_set[:,3:15], -8.1*np.ones((100,1)), test_data_set[:,15::]))
idx_removed_check = [3,16]
idx_retained_check = [0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21]
(X_removed, idx_removed, idx_retained) = preprocess.remove_constant_vars(test_data_set_constant)
comparison = X_removed == test_data_set
self.assertTrue(comparison.all())
self.assertTrue(idx_removed == idx_removed_check)
self.assertTrue(idx_retained == idx_retained_check)
except Exception:
self.assertTrue(False)
try:
# Inject a constant column that has values close to zero:
close_to_zero_column = -10**(-14)*np.ones((100,1))
close_to_zero_column[20:30,:] = -10**(-13)
close_to_zero_column[80:85,:] = -10**(-14)
test_data_set_constant = np.hstack((test_data_set[:,0:3], close_to_zero_column, test_data_set[:,3::]))
idx_removed_check = [3]
idx_retained_check = [0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
(X_removed, idx_removed, idx_retained) = preprocess.remove_constant_vars(test_data_set_constant)
comparison = X_removed == test_data_set
self.assertTrue(comparison.all())
self.assertTrue(idx_removed == idx_removed_check)
self.assertTrue(idx_retained == idx_retained_check)
except Exception:
self.assertTrue(False)
try:
# Inject a constant column that has values close to zero:
close_to_zero_column = -10**(-14)*np.ones((100,1))
close_to_zero_column[20:30,:] = 10**(-13)
close_to_zero_column[80:85,:] = 10**(-14)
test_data_set_constant = np.hstack((test_data_set[:,0:3], close_to_zero_column, test_data_set[:,3::]))
idx_removed_check = [3]
idx_retained_check = [0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
(X_removed, idx_removed, idx_retained) = preprocess.remove_constant_vars(test_data_set_constant)
comparison = X_removed == test_data_set
self.assertTrue(comparison.all())
self.assertTrue(idx_removed == idx_removed_check)
self.assertTrue(idx_retained == idx_retained_check)
except Exception:
self.assertTrue(False)
def test_PreProcessing(self):
test_data_set = np.random.rand(100,20)
# Inject two constant columns:
test_data_set_constant = np.hstack((test_data_set[:,0:3], 2.4*np.ones((100,1)), test_data_set[:,3:15], -8.1*np.ones((100,1)), test_data_set[:,15::]))
idx_removed_check = [3,16]
idx_retained_check = [0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21]
try:
preprocessed = PreProcessing(test_data_set_constant, scaling='none', nocenter=True)
comparison = preprocessed.X_removed == test_data_set
self.assertTrue(comparison.all())
self.assertTrue(preprocessed.idx_removed == idx_removed_check)
self.assertTrue(preprocessed.idx_retained == idx_retained_check)
self.assertTrue(np.shape(preprocessed.X_cs) == (100,20))
except Exception:
self.assertTrue(False)
def test_PreProcessing_not_allowed_attribute_setting(self):
test_data_set = np.random.rand(100,20)
pp = PreProcessing(test_data_set, scaling='auto')
with self.assertRaises(AttributeError):
pp.X_cs = 1
with self.assertRaises(AttributeError):
pp.X_center = 1
with self.assertRaises(AttributeError):
pp.X_scale = 1
with self.assertRaises(AttributeError):
pp.X_removed = 1
with self.assertRaises(AttributeError):
pp.idx_removed = 1
with self.assertRaises(AttributeError):
pp.idx_retained = 1
def test_outliers_detection_allowed_calls(self):
X = np.random.rand(100,10)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.6)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='MULTIVARIATE TRIMMING', trimming_threshold=0.6)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.2)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.1)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='PC CLASSIFIER')
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='range', method='PC CLASSIFIER')
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='pareto', method='PC CLASSIFIER')
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=0)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=1)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='PC CLASSIFIER', quantile_threshold=0.9)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='range', method='PC CLASSIFIER', quantile_threshold=0.99)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='pareto', method='PC CLASSIFIER', quantile_threshold=0.8)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=0, quantile_threshold=0.5)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=1, quantile_threshold=0.9)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
def test_outliers_detection_not_allowed_calls(self):
X = np.random.rand(100,10)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='scaling')
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='method')
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', verbose=1)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', verbose=0)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', trimming_threshold=-1)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', trimming_threshold=1.1)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', quantile_threshold=1.1)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', quantile_threshold=-1)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=0.9, quantile_threshold=1.1)
with self.assertRaises(ValueError):
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=0.9, quantile_threshold=-1)
def test_KernelDensity_allowed_calls(self):
X = np.random.rand(100,20)
try:
kerneld = KernelDensity(X, X[:,1])
except Exception:
self.assertTrue(False)
try:
kerneld = KernelDensity(X, X[:,4:9])
except Exception:
self.assertTrue(False)
try:
kerneld = KernelDensity(X, X[:,0])
except Exception:
self.assertTrue(False)
try:
kerneld = KernelDensity(X, X)
except Exception:
self.assertTrue(False)
try:
kerneld.X_weighted
kerneld.weights
except Exception:
self.assertTrue(False)
def test_KernelDensity_not_allowed_calls(self):
X = np.random.rand(100,20)
kerneld = KernelDensity(X, X[:,1])
with self.assertRaises(AttributeError):
kerneld.X_weighted = 1
with self.assertRaises(AttributeError):
kerneld.weights = 1
with self.assertRaises(ValueError):
kerneld = KernelDensity(X, X[20:30,1])
with self.assertRaises(ValueError):
kerneld = KernelDensity(X, X[20:30,:])
|
def includeme(config):
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_static_view('dist', '../build/dist')
config.add_static_view('build', 'build')
config.add_static_view('app1/dist', '../build/dist')
|
# PC served as a server
import socket
import ConnectWiFi
import MotorControl
import machine
import time
import jy901
from amg88xx import AMG88XX
from machine import Pin, I2C, UART
# Indicator Blue LED for successful connection
led = Pin(2, Pin.OUT)
led.on()
i2c = I2C(scl=Pin(5), sda=Pin(4))
thermal = AMG88XX(i2c)
pxmaddr = 0x52
port = 10000
# Your PC ip
host = '192.168.1.2'
def imu():
head_gyro = jy901.Gyro(i2c)
print(" Acc: " + repr(head_gyro.get_acc()) + "\n", "Gyro: " + repr(head_gyro.get_gyro()) + "\n",
"Angle:" + repr(head_gyro.get_angle()) + "\n")
time.sleep(0.5)
ang = head_gyro.get_angle()[2]
return ang
def proximity():
i2c.writeto(pxmaddr, b'0x00')
time.sleep(0.5)
pxd = i2c.readfrom(pxmaddr, 2)
pxd = int.from_bytes(pxd, "big")
print(str(pxd) + "mm")
time.sleep(1)
return pxd
def thermalcam():
thermal.refresh()
for row in range(8):
print()
for col in range(8):
print('{:4d}'.format(thermal[row, col]), end='')
print("\n")
time.sleep(1)
ConnectWiFi.connect()
rbSocket = socket.socket()
print("Finding...")
rbSocket.connect((host, port))
led.off()
print("TCP connected")
while True:
data = rbSocket.recv(1)
if len(data) == 0:
rbSocket.sendall(b'No data received.')
break
led.on()
decode_data = data.decode("utf8")
if decode_data == 'w':
rbSocket.send(b'Forward.')
MotorControl.forward()
elif decode_data == 'a':
rbSocket.send(b'Turn left')
MotorControl.turnleft()
elif decode_data == 'd':
MotorControl.turnright()
elif decode_data == 's':
MotorControl.backward()
elif decode_data == 't':
rbSocket.send(b'Stop')
MotorControl.stop()
elif decode_data == 'f':
rbSocket.send(b'forward offset')
MotorControl.forward_offset()
elif decode_data == 'b':
rbSocket.send(b'Backward offset')
MotorControl.back_offset()
elif decode_data == 'Successful Connection!':
rbSocket.send(b'Successful Connection!')
elif decode_data == 'm':
rbSocket.send(b'Meet enemy. Start attack!')
MotorControl.arm()
# else:
# rbSocket.sendall(b'Invalid input. Please enter again!')
|
sales = float(input("Enter sales: $"))
while sales >= 0:
if sales >= 1000:
user_bonus = (sales * 0.15)
else:
user_bonus = (sales * 0.1)
print("You're bonus is: {}".format(user_bonus))
sales = float(input("Enter sales: $")) |
from functions import *
if __name__ == '__main__':
print("_" * 80)
print("Bienvenid@ a DCCAirlines")
try:
while True:
print("_" * 80)
print("Menú principal")
print("-" * 80)
print("¿Qué acción desea realizar?")
foreach(lambda i: print(f"[{i[0]}] {i[1]}"), enumerate([
"Abrir archivo de consultas", "Ingresar consulta",
"Abrir historial", "Configurables"]))
entrada = revisar_input(input(), (lambda i: True if i in
list("0123") else False))
if entrada == "0":
print("_" * 80)
print(
"A continuación puede ingresar la dirección de consultas:")
archivo_ruta = revisar_input(input("Ruta del archivo: "),
(lambda i: True
if i == "" or os.path.exists(i)
else False), "Ruta del archivo: ")
if archivo_ruta == "":
archivo_ruta = "queries.txt"
entrada = list(obtener_inputs(archivo_ruta))
entradas_mostrar = enumerate(entrada)
foreach(lambda i: print(f"{formato_corchetes(i[0], 4)} {i[1]}"),
entradas_mostrar)
print("-" * 80)
print(
"A continuación puede seleccionar las consultas a visualizar")
print(
"En el formato: consulta1, consulta2, consulta3,"
"... (indicando el numero respectivo)")
print("[Enter] para volver :D")
consultas = revisar_input(input(),
lambda i: True if es_lista_de_numeros(
i, len(entrada)) else False)
lista_consultas = map(int, consultas.split(","))
foreach(lambda i: imprimir_output(interpretar_input(entrada[i]),
f"Consulta {i}: "
f"{entrada[i]}"),
lista_consultas)
elif entrada == "1":
ruta = "./output.txt"
print("A continuación puede ingresar una consulta")
consulta = input("Consulta: ")
if consulta == "":
pass
else:
guardar_consulta(parse(consulta))
elif entrada == "2":
ruta = "./output.txt"
datos = leer_output_txt(ruta)
lista_datos = [imprimir_y_guardar(i, [], True) for i in datos]
print("¿Desea eliminar parte del historial?")
foreach(lambda i: print(f"[{i[0]}] {i[1]}"),
enumerate(["Borrar todo",
"Borrar lista de datos",
"o [Enter] Continuar"]))
respuesta = revisar_input(input(),
lambda i: True if i in ["0", "1", "2",
""] else False)
if respuesta == "0":
print("¿Está seguro que desea vaciar todo el historial?")
foreach(lambda i: print(f"[{i[0]}] {i[1]}"),
enumerate(["Borrar todo",
"No borrar"]))
si_no = revisar_input(input(),
lambda i: True if i in ["0", "1",
""] else False)
if si_no == "0":
reiniciar_output_txt(ruta)
elif si_no in ["1", ""]:
print("Los datos no fueron borrados (:")
elif respuesta == "1":
print("A continuación, puede ingresar una lista"
"de datos a borrar")
print("De la forma numero_consulta1, numero_consulta2, ...")
print("[Enter] para volver al Menú")
consultas = revisar_input(input(),
lambda i: True if es_lista_de_numeros(
i, len(lista_datos)) else False)
lista_consultas = map(int, consultas.split(","))
foreach(lambda i: lista_datos.pop(i - 1), lista_consultas)
reiniciar_output_txt(ruta)
with open(ruta, encoding="UTF-8", mode="a") as archivo:
num = count(start=0)
foreach(lambda i: escribir_en_archivo(archivo, i,
next(num)),
lista_datos)
# rellenar esta parte con el llamado a sus funciones
# sigue corriendo el uso restringido en toda situacion
# de los for/while/etc dentro de este main a excepcion
# del que se encuentra arriba, por lo que no se puede
# agregar ninguno mas
# no es necesario que hagan una parte para salir del menu
except KeyboardInterrupt():
exit()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import AbstractUser
from lib.rest_framework_extend.common import gen_random_str
class User(AbstractUser):
api_secret = models.CharField(
u'Api Secret', max_length=64, null=True, blank=True)
def generate_api_secret(self):
if self.api_secret:
return
self.api_secret = gen_random_str(32, ascii_uppercase=True)
self.save()
|
import json
json_filename = '3'
csv_filename = '7'
with open(f'irrigation_data_as_json/{json_filename}.json', 'r') as f:
datastore = json.load(f)
with open(f"data/{csv_filename}.csv", 'w') as f:
f.write('time,soil_moisture,temperature,air_humidity' + '\n')
for data in datastore['production']['data']:
f.write(datastore['production']['data'][data] + '\n')
|
'''
Escreva um programa que leia valor em metros e exiba convetido em centímetros
e em milímetros
'''
metro = float (input('Coloque o valor em metros: '))
km = metro/1000
hm = metro/100
dam = metro/10
dm = metro*10
cm = metro*100
mm = metro*1000
print(f'{metro} metros medirá:\n{km}km, {hm} hm , {dam} dam\n{dm} dm, {cm} cm e {mm} mm')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reviews', '0050_merge'),
]
operations = [
migrations.AlterField(
model_name='study',
name='status',
field=models.CharField(default='U', max_length=1, choices=[('U', 'N\xe3o classificado'), ('R', 'Rejeitado'), ('A', 'Accepted'), ('D', 'Duplicated')]),
),
]
|
for i in range(1, 10):
print("0"*(9-i), end='')
for k in range(1, i+1):
print(i, end='')
print(end='\n')
|
import imageio
import numpy as np
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import accuracy_score
def assess_accuracy(gt_changed, gt_unchanged, changed_map, multi_class=False):
"""
assess accuracy of changed map based on ground truth
:param gt_changed: changed ground truth
:param gt_unchanged: unchanged ground truth
:param changed_map: changed map
:return: confusion matrix and overall accuracy
"""
cm = []
gt = []
if multi_class:
height, width, channel = gt_changed.shape
for i in range(0, height):
for j in range(0, width):
if (changed_map[i, j] == np.array([255, 255, 0])).all():
cm.append('soil')
# elif (changed_map[i, j] == np.array([0, 0, 255])).all():
# cm.append('water')
elif (changed_map[i, j] == np.array([255, 0, 0])).all():
cm.append('city')
else:
cm.append('unchanged')
if (gt_changed[i, j] == np.array([255, 255, 0])).all():
gt.append('soil')
elif (gt_changed[i, j] == np.array([255, 0, 0])).all():
gt.append('city')
# elif (gt_changed[i, j] == np.array([0, 0, 255])).all():
# gt.append('water')
elif (gt_unchanged[i, j] == np.array([255, 255, 255])).all():
gt.append('unchanged')
else:
gt.append('undefined')
conf_mat = confusion_matrix(y_true=gt, y_pred=cm,
labels=['soil', 'city', 'unchanged'])
kappa_co = cohen_kappa_score(y1=gt, y2=cm,
labels=['soil', 'city', 'unchanged'])
aa = conf_mat.diagonal() / np.sum(conf_mat, axis=1)
oa = np.sum(conf_mat.diagonal()) / np.sum(conf_mat)
return conf_mat, oa, aa, kappa_co
else:
height, width = changed_map.shape
changed_map = np.reshape(changed_map, (-1,))
gt_changed = np.reshape(gt_changed, (-1,))
gt_unchanged = np.reshape(gt_unchanged, (-1,))
cm = np.ones((height * width,))
cm[changed_map == 255] = 2
gt = np.zeros((height * width,))
gt[gt_changed == 255] = 2
gt[gt_unchanged == 255] = 1
conf_mat = confusion_matrix(y_true=gt, y_pred=cm,
labels=[1, 2]) # ['soil', 'water', 'city', 'unchanged'])
kappa_co = cohen_kappa_score(y1=gt, y2=cm,
labels=[1, 2]) # ['soil', 'water', 'city', 'unchanged'])
oa = np.sum(conf_mat.diagonal()) / np.sum(conf_mat)
return conf_mat, oa, kappa_co
if __name__ == '__main__':
# val_func()
ground_truth_changed = imageio.imread('./Adata/GF_2_2/change.bmp')[:, :, 0]
ground_truth_unchanged = imageio.imread('./Adata/GF_2_2/unchanged.bmp') # [:, :, 1]
cm_path = 'PCANet/compare/SAE_binary.bmp'
changed_map = imageio.imread(cm_path)
conf_mat, oa, kappa_co = assess_accuracy(ground_truth_changed, ground_truth_unchanged, changed_map,
multi_class=False)
conf_mat_2 = conf_mat.copy()
conf_mat_2[1, 1] = conf_mat[0, 0]
conf_mat_2[0, 0] = conf_mat[1, 1]
conf_mat_2[1, 0] = conf_mat[0, 1]
conf_mat_2[0, 1] = conf_mat[1, 0]
print(conf_mat)
print(conf_mat_2[1, 0] + conf_mat_2[0, 1])
print(oa)
print(kappa_co)
|
from collections import defaultdict
class Solution(object):
def sparsify(self, mat):
sp_dict = defaultdict(dict)
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j] != 0:
sp_dict[i][j] = mat[i][j]
return sp_dict
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
# regular matrix multiplication
# if not A or not A[0] or not B or not B[0]:
# return [[]]
# rowa = len(A)
# cola = len(A[0])
# rowb = len(B)
# colb = len(B[0])
# ret = [[0] * colb for i in range(rowa)]
# for i in range(rowa):
# for j in range(colb):
# for x in range(cola):
# ret[i][j] += A[i][x] * B[x][j]
# return ret
# accepted solution
if not A or not A[0] or not B or not B[0]:
return [[]]
rowa = len(A)
cola = len(A[0])
rowb = len(B)
colb = len(B[0])
table_a = self.sparsify(A)
table_b = self.sparsify(B)
ret = [[0] * colb for i in range(rowa)]
for i in table_a:
for j in table_a[i]:
if j not in table_b:
continue
for k in table_b[j]:
ret[i][k] += table_b[j][k]
return ret
if __name__ == "__main__":
A = [
[1, 0, 0],
[-1, 0, 3]
]
B = [
[7, 0, 0],
[0, 0, 0],
[0, 0, 1]
]
print(Solution().multiply(A, B))
|
#!/usr/bin/python
import commands
import os
t2 = os.system('ps -ef')
title = commands.getoutput('pwd')
t3 = os.popen('ls')
print t3
|
#http://www.eeweb.com/project/mohd_kashif/controlling-of-stepper-motors-using-raspberry-pi17
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
#Define GPIO signals to use
Pins = 18,22,24,26
#GPIO24,GPIO25,GPIO8,GPIO7
StepPins = [17,22,23,24]
#Set all pins as output for pin in StepPins:
for pin in StepPins:
print "Setup pins"
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, False)
StepCounter = 0
WaitTime = 0.05
#Define simple sequence
StepCount1 = 4
Seq1 = []
Seq1 = range(0, StepCount1)
Seq1[0] = [1,0,0,0]
Seq1[1] = [0,1,0,0]
Seq1[2] = [0,0,1,0]
Seq1[3] = [0,0,0,1]
#Define advanced sequence as shown in manufacturers datasheet
StepCount2 = 8
Seq2 = []
Seq2 = range(0, StepCount2)
Seq2[0] = [1,0,0,0]
Seq2[1] = [1,1,0,0]
Seq2[2] = [0,1,0,0]
Seq2[3] = [0,1,1,0]
Seq2[4] = [0,0,1,0]
Seq2[5] = [0,0,1,1]
Seq2[6] = [0,0,0,1]
Seq2[7] = [1,0,0,1]
#Choose a sequence to use
Seq = Seq2
StepCount = StepCount2
#Start main loop
while 1==1:
for pin in range(0, 4):
xpin = StepPins[pin]
if Seq[StepCounter][pin]!=0:
print " Step %i Enable %i" %(StepCounter,xpin)
GPIO.output(xpin, True)
else:
GPIO.output(xpin, False)
StepCounter += 1
# If we reach the end of the sequence
# start again
if (StepCounter==StepCount):
StepCounter = 0
if (StepCounter<0):
StepCounter = StepCount
# Wait before moving on
time.sleep(WaitTime)
#The 4 step sequence is faster but the torque is lower.
|
import unittest
from codebase.practice.MaxOfThree import MaxOfThree
class MaxOfThreeUnitTest(unittest.TestCase):
def test_find_max_between_three_numbers_when_three_number_is_different(self):
self.assertEqual(29, MaxOfThree().find_max(11, 5, 29))
|
# -*- coding: utf-8 -*-
__author__ = 'Peng'
from bs4 import BeautifulSoup,Comment
import urllib2
from urllib2 import urlopen,HTTPError
import MySQLdb
import json
import datetime
import logging
import sys
import re
import time
import random
def getSinaArticle(url):
#创建请求头
headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language":"zh-CN,zh;q=0.8,en;q=0.6",
"Referer":"https://www.baidu.com/s?ie=UTF-8&wd=sina"}
#打开网页
try:
request = urllib2.Request(url,headers=headers)
html = urlopen(request)
soup = BeautifulSoup(html.read(),"lxml")
print soup.prettify()
except HTTPError as e:
print(e)
url="http://news.sina.com.cn/"
getSinaArticle(url) |
import datetime
import re
import json
from datetime import timedelta
from difflib import unified_diff
from pathlib import Path
from urllib.parse import urlencode
import mock
import pytest
from bs4 import BeautifulSoup
from django.utils import timezone
from capapi.tests.helpers import check_response, is_cached
from capdb.tasks import update_elasticsearch_from_queue, CaseAnalysis
from capweb.helpers import reverse
from test_data.test_fixtures.helpers import set_case_text
@pytest.mark.django_db(databases=['capdb'])
def test_home(client, django_assert_num_queries, reporter):
""" Test / """
with django_assert_num_queries(select=2):
response = client.get(reverse('cite_home', host='cite'))
check_response(response, content_includes=reporter.full_name)
@pytest.mark.django_db(databases=['capdb'])
def test_series(client, django_assert_num_queries, volume_metadata_factory):
""" Test /series/ """
# make sure we correctly handle multiple reporters with same slug
volume_1, volume_2 = [volume_metadata_factory(
reporter__short_name='Mass.',
reporter__short_name_slug='mass',
) for _ in range(2)]
response = client.get(reverse('series', args=['mass'], host='cite'))
check_response(response)
content = response.content.decode()
for vol in (volume_1, volume_2):
assert vol.volume_number in content
assert vol.reporter.full_name in content
# make sure we redirect if series is not slugified
response = client.get(reverse('series', args=['Mass.'], host='cite'))
check_response(response, status_code=302)
response = client.get(reverse('series', args=['mass'], host='cite'), follow=True)
check_response(response, status_code=200)
# make sure we get 404 if bad series input
response = client.get(reverse('series', args=['*'], host='cite'))
check_response(response, status_code=404)
@pytest.mark.django_db(databases=['capdb'])
def test_series_as_citation(client):
# if series looks like a full case citation, redirect to case
response = client.get(reverse('series', args=['1 Mass. 1'], host='cite'))
check_response(response, redirect_to=reverse('citation', args=['mass', '1', '1'], host='cite'))
# if series looks like a full statutory citation, redirect to statute page
response = client.get(reverse('series', args=['11 U.S.C. § 550'], host='cite'))
check_response(response, redirect_to=reverse('citations', host='cite') + '?' + urlencode({'q': '11 U.S.C. § 550'}))
@pytest.mark.django_db(databases=['capdb'])
def test_volume(client, django_assert_num_queries, case_factory, elasticsearch):
""" Test /series/volume/ """
cases = [case_factory(
volume__reporter__full_name='Massachusetts%s' % i,
volume__reporter__short_name='Mass.',
volume__reporter__short_name_slug='mass',
volume__volume_number='1',
volume__volume_number_slug='1',
) for i in range(3)]
with django_assert_num_queries(select=1):
response = client.get(reverse('volume', args=['mass', '1'], host='cite'))
check_response(response)
content = response.content.decode()
for case in cases:
assert case.reporter.full_name in content
assert case.citations.first().cite in content
# make sure we redirect if reporter name / series is not slugified
response = client.get(reverse('volume', args=['Mass.', '1'], host='cite'))
check_response(response, status_code=302)
response = client.get(reverse('volume', args=['Mass.', '1'], host='cite'), follow=True)
check_response(response, status_code=200)
# make sure we get 404 if bad volume input
response = client.get(reverse('volume', args=['Mass.', '*'], host='cite'))
check_response(response, status_code=404)
@pytest.mark.django_db(databases=['capdb'])
def test_case_not_found(client, django_assert_num_queries, elasticsearch):
""" Test /series/volume/case/ not found """
with django_assert_num_queries(select=1):
response = client.get(reverse('citation', args=['fake', '123', '456'], host='cite'))
check_response(response, content_includes='Search for "123 Fake 456" in other databases')
@pytest.mark.django_db(databases=['capdb'])
def test_cases_multiple(client, django_assert_num_queries, case_factory, elasticsearch):
""" Test /series/volume/case/ with multiple matching cases """
cases = [case_factory(
jurisdiction__whitelisted=True,
citations__type='official',
citations__cite='23 Ill. App. 19',
citations__normalized_cite='23illapp19'
) for i in range(3)]
first_case = cases[0]
# disambiguation page should work even if cases wrongly end up with same frontend_url
assert set(c.frontend_url for c in cases) == {'/ill-app/23/19/'}
# disambiguation page includes all case short names
check_response(
client.get(reverse('citation', args=['ill-app', '23', '19'], host='cite'), follow=True),
content_includes=['Multiple cases match']+[c.name_abbreviation for c in cases],
content_excludes=first_case.name,
)
# single case pages work with ID appended, even if not matching frontend_url
check_response(
client.get(reverse('citation', args=['ill-app', '23', '19', first_case.id], host='cite')),
content_includes=first_case.name,
)
@pytest.mark.django_db(databases=['default', 'capdb', 'user_data'])
@pytest.mark.parametrize('response_type', ['html', 'pdf'])
def test_single_case(client, auth_client, token_auth_client, case_factory, elasticsearch, response_type, django_assert_num_queries, settings):
""" Test /series/volume/case/ with one matching case """
# set up for viewing html or pdf
case_text = "Case HTML"
unrestricted_case = case_factory(jurisdiction__whitelisted=True, body_cache__html=case_text, first_page_order=2, last_page_order=2)
restricted_case = case_factory(jurisdiction__whitelisted=False, body_cache__html=case_text, first_page_order=2, last_page_order=2)
if response_type == 'pdf':
case_text = "REMEMBERED"
unrestricted_url = unrestricted_case.get_pdf_url()
url = restricted_case.get_pdf_url()
content_type = 'application/pdf'
else:
unrestricted_url = unrestricted_case.get_full_frontend_url()
url = restricted_case.get_full_frontend_url()
content_type = None
### can load whitelisted case
with django_assert_num_queries(select=2):
check_response(client.get(unrestricted_url), content_includes=case_text, content_type=content_type)
### can load blacklisted case while logged out, via redirect
# first we get redirect to JS page
check_response(client.get(url, follow=True), content_includes="Click here to continue")
# POSTing will set our cookies and let the case load
response = client.post(reverse('set_cookie'), {'not_a_bot': 'yes', 'next': url}, follow=True)
check_response(response, content_includes=case_text, content_type=content_type)
session = client.session
assert session['case_allowance_remaining'] == settings.API_CASE_DAILY_ALLOWANCE - 1
# we can now load directly
response = client.get(url)
check_response(response, content_includes=case_text, content_type=content_type)
session = client.session
assert session['case_allowance_remaining'] == settings.API_CASE_DAILY_ALLOWANCE - 2
# can no longer load if quota used up
session['case_allowance_remaining'] = 0
session.save()
response = client.get(url)
if response_type == 'pdf':
assert response.status_code == 302 # PDFs redirect back to HTML version if quota exhausted
else:
check_response(response)
assert case_text not in response.content.decode()
session = client.session
assert session['case_allowance_remaining'] == 0
# check daily quota reset
session['case_allowance_last_updated'] -= 60 * 60 * 24 + 1
session.save()
response = client.get(url)
check_response(response, content_includes=case_text, content_type=content_type)
session = client.session
assert session['case_allowance_remaining'] == settings.API_CASE_DAILY_ALLOWANCE - 1
### can load normally as logged-in user
for c in [auth_client, token_auth_client]:
response = c.get(url)
check_response(response, content_includes=case_text, content_type=content_type)
previous_case_allowance = c.auth_user.case_allowance_remaining
c.auth_user.refresh_from_db()
assert c.auth_user.case_allowance_remaining == previous_case_allowance - 1
@pytest.mark.django_db(databases=['default', 'capdb', 'user_data'])
def test_single_case_fastcase(client, fastcase_case_factory, elasticsearch):
case_text = "Case HTML"
case = fastcase_case_factory(body_cache__html=case_text, first_page_order=2, last_page_order=2)
check_response(client.get(case.get_full_frontend_url()), content_includes=[case_text, "Case text courtesy of Fastcase"])
@pytest.mark.django_db(databases=['capdb'])
def test_case_series_name_redirect(client, unrestricted_case, elasticsearch):
""" Test /series/volume/case/ with series redirect when not slugified"""
cite = unrestricted_case.citations.first()
cite_parts = re.match(r'(\S+)\s+(.*?)\s+(\S+)$', cite.cite).groups()
# series is not slugified, expect redirect
response = client.get(
reverse('citation', args=[cite_parts[1], cite_parts[0], cite_parts[2]], host='cite'))
check_response(response, status_code=302)
response = client.get(
reverse('citation', args=[cite_parts[1], cite_parts[0], cite_parts[2]], host='cite'), follow=True)
check_response(response)
# series redirect works with case_id
response = client.get(
reverse('citation', args=[cite_parts[1], cite_parts[0], cite_parts[2], unrestricted_case.id], host='cite'))
check_response(response, status_code=302)
response = client.get(
reverse('citation', args=[cite_parts[1], cite_parts[0], cite_parts[2]], host='cite'), follow=True)
check_response(response)
def get_schema(response):
soup = BeautifulSoup(response.content.decode(), 'html.parser')
scripts = soup.find_all('script', {'type': 'application/ld+json'})
assert len(scripts) == 1
script = scripts[0]
return json.loads(script.string)
@pytest.mark.django_db(databases=['default', 'capdb'])
def test_schema_in_case(client, restricted_case, unrestricted_case, fastcase_case, elasticsearch):
### unrestricted case
for case in (unrestricted_case, fastcase_case):
response = client.get(case.get_full_frontend_url())
check_response(response, content_includes=case.body_cache.html)
schema = get_schema(response)
assert schema["headline"] == case.name_abbreviation
assert schema["author"]["name"] == case.court.name
# if case is whitelisted, extra info about inaccessibility is not needed
# https://developers.google.com/search/docs/data-types/paywalled-content
assert "hasPart" not in schema
### blacklisted case
response = client.post(reverse('set_cookie'), {'not_a_bot': 'yes', 'next': restricted_case.get_full_frontend_url()}, follow=True)
check_response(response, content_includes=restricted_case.body_cache.html)
schema = get_schema(response)
assert schema["headline"] == restricted_case.name_abbreviation
assert schema["author"]["name"] == restricted_case.court.name
# if case is blacklisted, we include more data
assert "hasPart" in schema
assert schema["hasPart"]["isAccessibleForFree"] == 'False'
@pytest.mark.django_db(databases=['default', 'capdb'])
def test_schema_in_case_as_google_bot(client, restricted_case, elasticsearch):
# our bot has seen too many cases!
session = client.session
session['case_allowance_remaining'] = 0
session.save()
assert session['case_allowance_remaining'] == 0
with mock.patch('cite.views.is_google_bot', lambda request: True):
response = client.get(restricted_case.get_full_frontend_url(), follow=True)
assert not is_cached(response)
# show cases anyway
check_response(response, content_includes=restricted_case.body_cache.html)
schema = get_schema(response)
assert schema["headline"] == restricted_case.name_abbreviation
assert schema["author"]["name"] == restricted_case.court.name
assert "hasPart" in schema
assert schema["hasPart"]["isAccessibleForFree"] == 'False'
@pytest.mark.django_db(databases=['default', 'capdb', 'user_data'])
def test_no_index(auth_client, case_factory, elasticsearch):
case = case_factory(no_index=True)
check_response(auth_client.get(case.get_full_frontend_url()), content_includes='content="noindex"')
@pytest.mark.django_db(databases=['capdb'])
def test_robots(client, case):
case_string = "Disallow: %s" % case.frontend_url
# default version is empty:
url = reverse('robots', host='cite')
response = client.get(url)
check_response(response, content_type="text/plain", content_includes='User-agent: *', content_excludes=case_string)
# case with robots_txt_until in future is included:
case.no_index = True
case.robots_txt_until = timezone.now() + timedelta(days=1)
case.save()
check_response(client.get(url), content_type="text/plain", content_includes=case_string)
# case with robots_txt_until in past is excluded:
case.robots_txt_until = timezone.now() - timedelta(days=1)
case.save()
response = client.get(url)
check_response(response, content_type="text/plain", content_includes='User-agent: *', content_excludes=case_string)
@pytest.mark.django_db(databases=['capdb'])
def test_geolocation_log(client, unrestricted_case, elasticsearch, settings, caplog):
""" Test state-level geolocation logging in case browser """
if not Path(settings.GEOIP_PATH).exists():
# only test geolocation if database file is available
return
settings.GEOLOCATION_FEATURE = True
check_response(client.get(unrestricted_case.get_full_frontend_url(), HTTP_X_FORWARDED_FOR='128.103.1.1'))
assert "Someone from Massachusetts, United States read a case" in caplog.text
### Extract single page image from a volume PDF with VolumeMetadata's extract_page_image ###
@pytest.mark.django_db(databases=['default', 'capdb'])
def test_retrieve_page_image(admin_client, auth_client, volume_metadata):
volume_metadata.pdf_file = "fake_volume.pdf"
volume_metadata.save()
response = admin_client.get(reverse('page_image', args=[volume_metadata.pk, '2'], host='cite'))
check_response(response, content_type="image/png")
assert b'\x89PNG' in response.content
response = auth_client.get(reverse('page_image', args=[volume_metadata.pk, '2'], host='cite'))
check_response(response, status_code=302)
@pytest.mark.django_db(databases=["default", "capdb"])
def test_case_editor(
reset_sequences, admin_client, auth_client, unrestricted_case_factory
):
unrestricted_case = unrestricted_case_factory(first_page_order=1, last_page_order=3)
url = reverse("case_editor", args=[unrestricted_case.pk], host="cite")
response = admin_client.get(url)
check_response(response)
response = auth_client.get(url)
check_response(response, status_code=302)
# make an edit
unrestricted_case.sync_case_body_cache()
body_cache = unrestricted_case.body_cache
old_html = body_cache.html
old_first_page = unrestricted_case.first_page
description = "Made some edits"
page = unrestricted_case.structure.pages.first()
response = admin_client.post(
url,
json.dumps(
{
"metadata": {
"name": [unrestricted_case.name, "new name"],
"decision_date_original": [
unrestricted_case.decision_date_original,
"2020-01-01",
],
"first_page": [old_first_page, "ignore this"],
"human_corrected": [False, True],
},
"description": description,
"edit_list": {
page.id: {
"BL_81.3": {
3: ["Case text 0", "Replacement text"],
}
}
},
}
),
content_type="application/json",
)
check_response(response)
# check OCR edit
body_cache.refresh_from_db()
new_html = body_cache.html
assert list(unified_diff(old_html.splitlines(), new_html.splitlines(), n=0))[
3:
] == [
'- <h4 class="parties" id="b81-4" data-blocks=\'[["BL_81.3",0,[226,1320,752,926]]]\'>Case text 0</h4>',
'+ <h4 class="parties" id="b81-4" data-blocks=\'[["BL_81.3",0,[226,1320,752,926]]]\'>Replacement text</h4>',
]
# check metadata
unrestricted_case.refresh_from_db()
assert unrestricted_case.name == "new name"
assert unrestricted_case.decision_date_original == "2020-01-01"
assert unrestricted_case.decision_date == datetime.date(year=2020, month=1, day=1)
assert unrestricted_case.human_corrected is True
assert unrestricted_case.first_page == old_first_page # change ignored
# check log
log_entry = unrestricted_case.correction_logs.first()
assert log_entry.description == description
assert log_entry.user_id == admin_client.auth_user.id
@pytest.mark.django_db(databases=['capdb'])
def test_citations_page(client, case_factory, elasticsearch):
dest_case = case_factory()
dest_cite = dest_case.citations.first()
source_cases = [case_factory() for _ in range(2)]
for case in source_cases:
set_case_text(case, dest_cite.cite)
case.sync_case_body_cache()
non_citing_case = case_factory()
update_elasticsearch_from_queue()
response = client.get(reverse('citations', host='cite')+f'?q={dest_case.pk}')
check_response(
response,
content_includes=[c.name_abbreviation for c in source_cases],
content_excludes=[non_citing_case.name_abbreviation]
)
@pytest.mark.django_db(databases=['capdb'])
def test_random_case(client, case_factory, elasticsearch):
""" Test random endpoint returns both cases eventually. """
# set up two cases
cases = set()
found = set()
for i in range(2):
case = case_factory()
CaseAnalysis(case=case, key='word_count', value=2000).save()
cases.add(case.get_full_frontend_url())
update_elasticsearch_from_queue()
# try 20 times to get both
for i in range(20):
response = client.get(reverse('random', host='cite'))
check_response(response, status_code=302)
assert response.url in cases
found.add(response.url)
if found == cases:
break
else:
raise Exception(f'Failed to redirect to {cases-found} after 20 tries.')
@pytest.mark.django_db(databases=['capdb', 'default', 'user_data'])
def test_redact_case_tool(admin_client, case, elasticsearch):
case.sync_case_body_cache()
update_elasticsearch_from_queue()
response = admin_client.post(reverse('redact_case', args=[case.pk]), {'kind': 'redact', 'text': 'Case'})
check_response(response)
response = admin_client.post(reverse('redact_case', args=[case.pk]), {'kind': 'elide', 'text': 'text'})
check_response(response)
case.refresh_from_db()
assert case.no_index_redacted == {"Case": "redacted"}
assert case.no_index_elided == {"text": "..."}
response = admin_client.get(case.get_full_frontend_url())
check_response(response, content_includes=[
"[ redacted ]",
"<span class='elided-text' role='button' tabindex='0' data-hidden-text='text'>...</span>"
])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.